bionic
Revision | 673bb02eec857ef6b004e562ab41d6b3a0e21b8a (tree) |
---|---|
Time | 2020-11-05 09:02:09 |
Author | android-build-team Robot <android-build-team-robot@goog...> |
Commiter | android-build-team Robot |
Snap for 6952926 from 6aaff2a54df115dd12b5034d03a2c58c39e4881b to rvc-d2-release
Change-Id: I43ec97d5123405a4f786f1f09bff1a2414538f93
@@ -73,50 +73,42 @@ class AtexitArray { | ||
73 | 73 | // restart concurrent __cxa_finalize passes. |
74 | 74 | uint64_t total_appends_; |
75 | 75 | |
76 | - static size_t round_up_to_page_bytes(size_t capacity) { | |
77 | - return PAGE_END(capacity * sizeof(AtexitEntry)); | |
78 | - } | |
79 | - | |
80 | - static size_t next_capacity(size_t capacity) { | |
81 | - // Double the capacity each time. | |
82 | - size_t result = round_up_to_page_bytes(MAX(1, capacity * 2)) / sizeof(AtexitEntry); | |
83 | - CHECK(result > capacity); | |
84 | - return result; | |
85 | - } | |
76 | + static size_t page_start_of_index(size_t idx) { return PAGE_START(idx * sizeof(AtexitEntry)); } | |
77 | + static size_t page_end_of_index(size_t idx) { return PAGE_END(idx * sizeof(AtexitEntry)); } | |
86 | 78 | |
87 | 79 | // Recompact the array if it will save at least one page of memory at the end. |
88 | - bool needs_recompaction() { | |
89 | - return round_up_to_page_bytes(size_ - extracted_count_) < round_up_to_page_bytes(size_); | |
80 | + bool needs_recompaction() const { | |
81 | + return page_end_of_index(size_ - extracted_count_) < page_end_of_index(size_); | |
90 | 82 | } |
91 | 83 | |
92 | - void set_writable(bool writable); | |
84 | + void set_writable(bool writable, size_t start_idx, size_t num_entries); | |
85 | + static bool next_capacity(size_t capacity, size_t* result); | |
93 | 86 | bool expand_capacity(); |
94 | 87 | }; |
95 | 88 | |
96 | 89 | } // anonymous namespace |
97 | 90 | |
98 | 91 | bool AtexitArray::append_entry(const AtexitEntry& entry) { |
99 | - bool result = false; | |
92 | + if (size_ >= capacity_ && !expand_capacity()) return false; | |
100 | 93 | |
101 | - set_writable(true); | |
102 | - if (size_ < capacity_ || expand_capacity()) { | |
103 | - array_[size_++] = entry; | |
104 | - ++total_appends_; | |
105 | - result = true; | |
106 | - } | |
107 | - set_writable(false); | |
94 | + size_t idx = size_++; | |
108 | 95 | |
109 | - return result; | |
96 | + set_writable(true, idx, 1); | |
97 | + array_[idx] = entry; | |
98 | + ++total_appends_; | |
99 | + set_writable(false, idx, 1); | |
100 | + | |
101 | + return true; | |
110 | 102 | } |
111 | 103 | |
112 | 104 | // Extract an entry and return it. |
113 | 105 | AtexitEntry AtexitArray::extract_entry(size_t idx) { |
114 | 106 | AtexitEntry result = array_[idx]; |
115 | 107 | |
116 | - set_writable(true); | |
108 | + set_writable(true, idx, 1); | |
117 | 109 | array_[idx] = {}; |
118 | 110 | ++extracted_count_; |
119 | - set_writable(false); | |
111 | + set_writable(false, idx, 1); | |
120 | 112 | |
121 | 113 | return result; |
122 | 114 | } |
@@ -124,7 +116,7 @@ AtexitEntry AtexitArray::extract_entry(size_t idx) { | ||
124 | 116 | void AtexitArray::recompact() { |
125 | 117 | if (!needs_recompaction()) return; |
126 | 118 | |
127 | - set_writable(true); | |
119 | + set_writable(true, 0, size_); | |
128 | 120 | |
129 | 121 | // Optimization: quickly skip over the initial non-null entries. |
130 | 122 | size_t src = 0, dst = 0; |
@@ -143,51 +135,79 @@ void AtexitArray::recompact() { | ||
143 | 135 | } |
144 | 136 | |
145 | 137 | // If the table uses fewer pages, clean the pages at the end. |
146 | - size_t old_bytes = round_up_to_page_bytes(size_); | |
147 | - size_t new_bytes = round_up_to_page_bytes(dst); | |
138 | + size_t old_bytes = page_end_of_index(size_); | |
139 | + size_t new_bytes = page_end_of_index(dst); | |
148 | 140 | if (new_bytes < old_bytes) { |
149 | 141 | madvise(reinterpret_cast<char*>(array_) + new_bytes, old_bytes - new_bytes, MADV_DONTNEED); |
150 | 142 | } |
151 | 143 | |
144 | + set_writable(false, 0, size_); | |
145 | + | |
152 | 146 | size_ = dst; |
153 | 147 | extracted_count_ = 0; |
154 | - | |
155 | - set_writable(false); | |
156 | 148 | } |
157 | 149 | |
158 | 150 | // Use mprotect to make the array writable or read-only. Returns true on success. Making the array |
159 | 151 | // read-only could protect against either unintentional or malicious corruption of the array. |
160 | -void AtexitArray::set_writable(bool writable) { | |
152 | +void AtexitArray::set_writable(bool writable, size_t start_idx, size_t num_entries) { | |
161 | 153 | if (array_ == nullptr) return; |
154 | + | |
155 | + const size_t start_byte = page_start_of_index(start_idx); | |
156 | + const size_t stop_byte = page_end_of_index(start_idx + num_entries); | |
157 | + const size_t byte_len = stop_byte - start_byte; | |
158 | + | |
162 | 159 | const int prot = PROT_READ | (writable ? PROT_WRITE : 0); |
163 | - if (mprotect(array_, round_up_to_page_bytes(capacity_), prot) != 0) { | |
160 | + if (mprotect(reinterpret_cast<char*>(array_) + start_byte, byte_len, prot) != 0) { | |
164 | 161 | async_safe_fatal("mprotect failed on atexit array: %s", strerror(errno)); |
165 | 162 | } |
166 | 163 | } |
167 | 164 | |
165 | +// Approximately double the capacity. Returns true if successful (no overflow). AtexitEntry is | |
166 | +// smaller than a page, but this function should still be correct even if AtexitEntry were larger | |
167 | +// than one. | |
168 | +bool AtexitArray::next_capacity(size_t capacity, size_t* result) { | |
169 | + if (capacity == 0) { | |
170 | + *result = PAGE_END(sizeof(AtexitEntry)) / sizeof(AtexitEntry); | |
171 | + return true; | |
172 | + } | |
173 | + size_t num_bytes; | |
174 | + if (__builtin_mul_overflow(page_end_of_index(capacity), 2, &num_bytes)) { | |
175 | + async_safe_format_log(ANDROID_LOG_WARN, "libc", "__cxa_atexit: capacity calculation overflow"); | |
176 | + return false; | |
177 | + } | |
178 | + *result = num_bytes / sizeof(AtexitEntry); | |
179 | + return true; | |
180 | +} | |
181 | + | |
168 | 182 | bool AtexitArray::expand_capacity() { |
169 | - const size_t new_capacity = next_capacity(capacity_); | |
170 | - const size_t new_capacity_bytes = round_up_to_page_bytes(new_capacity); | |
183 | + size_t new_capacity; | |
184 | + if (!next_capacity(capacity_, &new_capacity)) return false; | |
185 | + const size_t new_capacity_bytes = page_end_of_index(new_capacity); | |
186 | + | |
187 | + set_writable(true, 0, capacity_); | |
171 | 188 | |
189 | + bool result = false; | |
172 | 190 | void* new_pages; |
173 | 191 | if (array_ == nullptr) { |
174 | 192 | new_pages = mmap(nullptr, new_capacity_bytes, PROT_READ | PROT_WRITE, |
175 | 193 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
176 | 194 | } else { |
177 | - new_pages = | |
178 | - mremap(array_, round_up_to_page_bytes(capacity_), new_capacity_bytes, MREMAP_MAYMOVE); | |
195 | + // mremap fails if the source buffer crosses a boundary between two VMAs. When a single array | |
196 | + // element is modified, the kernel should split then rejoin the buffer's VMA. | |
197 | + new_pages = mremap(array_, page_end_of_index(capacity_), new_capacity_bytes, MREMAP_MAYMOVE); | |
179 | 198 | } |
180 | 199 | if (new_pages == MAP_FAILED) { |
181 | 200 | async_safe_format_log(ANDROID_LOG_WARN, "libc", |
182 | 201 | "__cxa_atexit: mmap/mremap failed to allocate %zu bytes: %s", |
183 | 202 | new_capacity_bytes, strerror(errno)); |
184 | - return false; | |
203 | + } else { | |
204 | + result = true; | |
205 | + prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_capacity_bytes, "atexit handlers"); | |
206 | + array_ = static_cast<AtexitEntry*>(new_pages); | |
207 | + capacity_ = new_capacity; | |
185 | 208 | } |
186 | - | |
187 | - prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_capacity_bytes, "atexit handlers"); | |
188 | - array_ = static_cast<AtexitEntry*>(new_pages); | |
189 | - capacity_ = new_capacity; | |
190 | - return true; | |
209 | + set_writable(false, 0, capacity_); | |
210 | + return result; | |
191 | 211 | } |
192 | 212 | |
193 | 213 | static AtexitArray g_array; |