Line data Source code
1 : #include <nondet.h>
2 : #include <proof_allocators.h>
3 : #include <ring_buffer_helper.h>
4 : #include <seahorn/seahorn.h>
5 :
6 : #include <aws/common/byte_buf.h>
7 :
8 : void initialize_ring_buffer(struct aws_ring_buffer *ring_buf,
9 148254 : const size_t size) {
10 148254 : ring_buf->allocator = sea_allocator();
11 148254 : /* The `aws_ring_buffer_init` function requires size > 0. */
12 148254 : assume(0 < size);
13 148254 : // assume(size <= MAX_BUFFER_SIZE);
14 148254 : size_t alloc_sz = size % MAX_BUFFER_SIZE;
15 148254 :
16 148254 : ring_buf->allocation = bounded_malloc(alloc_sz);
17 148254 : size_t position_head = nd_size_t();
18 148254 : size_t position_tail = nd_size_t();
19 148254 : assume(position_head < alloc_sz);
20 148254 : assume(position_tail < alloc_sz);
21 148254 : aws_atomic_store_ptr(&ring_buf->head, (ring_buf->allocation + position_head));
22 148254 : aws_atomic_store_ptr(&ring_buf->tail, (ring_buf->allocation + position_tail));
23 148254 : ring_buf->allocation_end = ring_buf->allocation + alloc_sz;
24 148254 : }
25 :
26 : void ensure_byte_buf_has_allocated_buffer_member_in_ring_buf(
27 0 : struct aws_byte_buf *buf, struct aws_ring_buffer *ring_buf) {
28 0 : buf->allocator = nd_bool() ? NULL : sea_allocator();
29 0 : uint8_t *head = aws_atomic_load_ptr(&ring_buf->head);
30 0 : uint8_t *tail = aws_atomic_load_ptr(&ring_buf->tail);
31 0 : if (head < tail) { /* [....H T....] */
32 0 : if (nd_bool()) {
33 0 : assume(tail < ring_buf->allocation_end);
34 0 : ensure_byte_buf_has_allocated_buffer_member_in_range(
35 0 : buf, tail, ring_buf->allocation_end);
36 0 : } else {
37 0 : assume(ring_buf->allocation < head);
38 0 : ensure_byte_buf_has_allocated_buffer_member_in_range(
39 0 : buf, ring_buf->allocation, head);
40 0 : }
41 0 : } else { /* [ T....H ] */
42 0 : ensure_byte_buf_has_allocated_buffer_member_in_range(buf, tail, head);
43 0 : }
44 0 : }
45 :
46 : void ensure_byte_buf_has_allocated_buffer_member_in_range(
47 0 : struct aws_byte_buf *buf, uint8_t *lo, uint8_t *hi) {
48 0 : sassert(lo < hi);
49 0 : size_t space = hi - lo;
50 0 : size_t pos = nd_size_t();
51 0 : // assume(pos < space);
52 0 : pos %= space;
53 0 : buf->buffer = lo + pos;
54 0 : size_t max_capacity = hi - buf->buffer;
55 0 : sassert(0 < max_capacity);
56 0 : assume(0 < buf->capacity);
57 0 : // assume(buf->capacity <= max_capacity)
58 0 : buf->capacity %= (max_capacity + 1);
59 0 : }
60 :
61 : bool ring_buffers_are_equal(struct aws_ring_buffer *r1,
62 0 : struct aws_ring_buffer *r2) {
63 0 : if (!r1 || !r2)
64 0 : return r1 == r2;
65 0 :
66 0 : bool res = r1->allocator == r2->allocator &&
67 0 : r1->allocation == r2->allocation &&
68 0 : r1->allocation_end == r2->allocation_end;
69 0 : if (!res)
70 0 : return false;
71 0 : uint8_t *r1_head = aws_atomic_load_ptr(&r1->head);
72 0 : uint8_t *r1_tail = aws_atomic_load_ptr(&r1->tail);
73 0 :
74 0 : uint8_t *r2_head = aws_atomic_load_ptr(&r2->head);
75 0 : uint8_t *r2_tail = aws_atomic_load_ptr(&r2->tail);
76 0 :
77 0 : return r1_head == r2_head && r1_tail == r2_tail;
78 0 : }
|