Android-x86
Fork
Donation

  • R/O
  • HTTP
  • SSH
  • HTTPS

bionic: Commit

bionic


Commit MetaInfo

Revision673bb02eec857ef6b004e562ab41d6b3a0e21b8a (tree)
Time2020-11-05 09:02:09
Authorandroid-build-team Robot <android-build-team-robot@goog...>
Commiterandroid-build-team Robot

Log Message

Snap for 6952926 from 6aaff2a54df115dd12b5034d03a2c58c39e4881b to rvc-d2-release

Change-Id: I43ec97d5123405a4f786f1f09bff1a2414538f93

Change Summary

Incremental Difference

--- a/libc/bionic/atexit.cpp
+++ b/libc/bionic/atexit.cpp
@@ -73,50 +73,42 @@ class AtexitArray {
7373 // restart concurrent __cxa_finalize passes.
7474 uint64_t total_appends_;
7575
76- static size_t round_up_to_page_bytes(size_t capacity) {
77- return PAGE_END(capacity * sizeof(AtexitEntry));
78- }
79-
80- static size_t next_capacity(size_t capacity) {
81- // Double the capacity each time.
82- size_t result = round_up_to_page_bytes(MAX(1, capacity * 2)) / sizeof(AtexitEntry);
83- CHECK(result > capacity);
84- return result;
85- }
76+ static size_t page_start_of_index(size_t idx) { return PAGE_START(idx * sizeof(AtexitEntry)); }
77+ static size_t page_end_of_index(size_t idx) { return PAGE_END(idx * sizeof(AtexitEntry)); }
8678
8779 // Recompact the array if it will save at least one page of memory at the end.
88- bool needs_recompaction() {
89- return round_up_to_page_bytes(size_ - extracted_count_) < round_up_to_page_bytes(size_);
80+ bool needs_recompaction() const {
81+ return page_end_of_index(size_ - extracted_count_) < page_end_of_index(size_);
9082 }
9183
92- void set_writable(bool writable);
84+ void set_writable(bool writable, size_t start_idx, size_t num_entries);
85+ static bool next_capacity(size_t capacity, size_t* result);
9386 bool expand_capacity();
9487 };
9588
9689 } // anonymous namespace
9790
9891 bool AtexitArray::append_entry(const AtexitEntry& entry) {
99- bool result = false;
92+ if (size_ >= capacity_ && !expand_capacity()) return false;
10093
101- set_writable(true);
102- if (size_ < capacity_ || expand_capacity()) {
103- array_[size_++] = entry;
104- ++total_appends_;
105- result = true;
106- }
107- set_writable(false);
94+ size_t idx = size_++;
10895
109- return result;
96+ set_writable(true, idx, 1);
97+ array_[idx] = entry;
98+ ++total_appends_;
99+ set_writable(false, idx, 1);
100+
101+ return true;
110102 }
111103
112104 // Extract an entry and return it.
113105 AtexitEntry AtexitArray::extract_entry(size_t idx) {
114106 AtexitEntry result = array_[idx];
115107
116- set_writable(true);
108+ set_writable(true, idx, 1);
117109 array_[idx] = {};
118110 ++extracted_count_;
119- set_writable(false);
111+ set_writable(false, idx, 1);
120112
121113 return result;
122114 }
@@ -124,7 +116,7 @@ AtexitEntry AtexitArray::extract_entry(size_t idx) {
124116 void AtexitArray::recompact() {
125117 if (!needs_recompaction()) return;
126118
127- set_writable(true);
119+ set_writable(true, 0, size_);
128120
129121 // Optimization: quickly skip over the initial non-null entries.
130122 size_t src = 0, dst = 0;
@@ -143,51 +135,79 @@ void AtexitArray::recompact() {
143135 }
144136
145137 // If the table uses fewer pages, clean the pages at the end.
146- size_t old_bytes = round_up_to_page_bytes(size_);
147- size_t new_bytes = round_up_to_page_bytes(dst);
138+ size_t old_bytes = page_end_of_index(size_);
139+ size_t new_bytes = page_end_of_index(dst);
148140 if (new_bytes < old_bytes) {
149141 madvise(reinterpret_cast<char*>(array_) + new_bytes, old_bytes - new_bytes, MADV_DONTNEED);
150142 }
151143
144+ set_writable(false, 0, size_);
145+
152146 size_ = dst;
153147 extracted_count_ = 0;
154-
155- set_writable(false);
156148 }
157149
158150 // Use mprotect to make the array writable or read-only. Returns true on success. Making the array
159151 // read-only could protect against either unintentional or malicious corruption of the array.
160-void AtexitArray::set_writable(bool writable) {
152+void AtexitArray::set_writable(bool writable, size_t start_idx, size_t num_entries) {
161153 if (array_ == nullptr) return;
154+
155+ const size_t start_byte = page_start_of_index(start_idx);
156+ const size_t stop_byte = page_end_of_index(start_idx + num_entries);
157+ const size_t byte_len = stop_byte - start_byte;
158+
162159 const int prot = PROT_READ | (writable ? PROT_WRITE : 0);
163- if (mprotect(array_, round_up_to_page_bytes(capacity_), prot) != 0) {
160+ if (mprotect(reinterpret_cast<char*>(array_) + start_byte, byte_len, prot) != 0) {
164161 async_safe_fatal("mprotect failed on atexit array: %s", strerror(errno));
165162 }
166163 }
167164
165+// Approximately double the capacity. Returns true if successful (no overflow). AtexitEntry is
166+// smaller than a page, but this function should still be correct even if AtexitEntry were larger
167+// than one.
168+bool AtexitArray::next_capacity(size_t capacity, size_t* result) {
169+ if (capacity == 0) {
170+ *result = PAGE_END(sizeof(AtexitEntry)) / sizeof(AtexitEntry);
171+ return true;
172+ }
173+ size_t num_bytes;
174+ if (__builtin_mul_overflow(page_end_of_index(capacity), 2, &num_bytes)) {
175+ async_safe_format_log(ANDROID_LOG_WARN, "libc", "__cxa_atexit: capacity calculation overflow");
176+ return false;
177+ }
178+ *result = num_bytes / sizeof(AtexitEntry);
179+ return true;
180+}
181+
168182 bool AtexitArray::expand_capacity() {
169- const size_t new_capacity = next_capacity(capacity_);
170- const size_t new_capacity_bytes = round_up_to_page_bytes(new_capacity);
183+ size_t new_capacity;
184+ if (!next_capacity(capacity_, &new_capacity)) return false;
185+ const size_t new_capacity_bytes = page_end_of_index(new_capacity);
186+
187+ set_writable(true, 0, capacity_);
171188
189+ bool result = false;
172190 void* new_pages;
173191 if (array_ == nullptr) {
174192 new_pages = mmap(nullptr, new_capacity_bytes, PROT_READ | PROT_WRITE,
175193 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
176194 } else {
177- new_pages =
178- mremap(array_, round_up_to_page_bytes(capacity_), new_capacity_bytes, MREMAP_MAYMOVE);
195+ // mremap fails if the source buffer crosses a boundary between two VMAs. When a single array
196+ // element is modified, the kernel should split then rejoin the buffer's VMA.
197+ new_pages = mremap(array_, page_end_of_index(capacity_), new_capacity_bytes, MREMAP_MAYMOVE);
179198 }
180199 if (new_pages == MAP_FAILED) {
181200 async_safe_format_log(ANDROID_LOG_WARN, "libc",
182201 "__cxa_atexit: mmap/mremap failed to allocate %zu bytes: %s",
183202 new_capacity_bytes, strerror(errno));
184- return false;
203+ } else {
204+ result = true;
205+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_capacity_bytes, "atexit handlers");
206+ array_ = static_cast<AtexitEntry*>(new_pages);
207+ capacity_ = new_capacity;
185208 }
186-
187- prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_capacity_bytes, "atexit handlers");
188- array_ = static_cast<AtexitEntry*>(new_pages);
189- capacity_ = new_capacity;
190- return true;
209+ set_writable(false, 0, capacity_);
210+ return result;
191211 }
192212
193213 static AtexitArray g_array;
Show on old repository browser