aboutsummaryrefslogtreecommitdiff
path: root/libshims/utils
diff options
context:
space:
mode:
authorNikhil Punathil <nikhilpe@gmail.com>2019-06-17 21:14:34 -0700
committerArian <arian.kulmer@web.de>2019-10-25 22:16:13 +0200
commitfd201d3c1df49d4af95d2c48709b5a98b6e6d8e9 (patch)
treeb4f84e360112bf9b36f05a35f8a305f08d8fbfdc /libshims/utils
parente37adcfc67e23451386efab1ca7c2a7774829cb7 (diff)
shinano-common: bring in libshim_camera from msm8974-common
we need updated sources that break rhine compatibility for working cam Change-Id: I274c91efc797b0304a2074baf4b908766d321356 Signed-off-by: Nikhil Punathil <nikhilpe@gmail.com>
Diffstat (limited to 'libshims/utils')
-rw-r--r--libshims/utils/Looper.cpp573
-rw-r--r--libshims/utils/VectorImpl.cpp691
2 files changed, 1264 insertions, 0 deletions
diff --git a/libshims/utils/Looper.cpp b/libshims/utils/Looper.cpp
new file mode 100644
index 0000000..4bab6d5
--- /dev/null
+++ b/libshims/utils/Looper.cpp
@@ -0,0 +1,573 @@
+//
+// Copyright 2010 The Android Open Source Project
+//
+// A looper implementation based on epoll().
+//
+#define LOG_TAG "Looper"
+
+//#define LOG_NDEBUG 0
+
+// Debugs poll and wake interactions.
+#define DEBUG_POLL_AND_WAKE 0
+
+// Debugs callback registration and invocation.
+#define DEBUG_CALLBACKS 0
+
+#include <cutils/log.h>
+#include <utils/Looper.h>
+#include <utils/Timers.h>
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <limits.h>
+
+
+namespace android {
+
+// --- WeakMessageHandler ---
+
+WeakMessageHandler::WeakMessageHandler(const wp<MessageHandler>& handler) :
+ mHandler(handler) {
+}
+
+WeakMessageHandler::~WeakMessageHandler() {
+}
+
+void WeakMessageHandler::handleMessage(const Message& message) {
+ sp<MessageHandler> handler = mHandler.promote();
+ if (handler != NULL) {
+ handler->handleMessage(message);
+ }
+}
+
+
+// --- SimpleLooperCallback ---
+
+SimpleLooperCallback::SimpleLooperCallback(Looper_callbackFunc callback) :
+ mCallback(callback) {
+}
+
+SimpleLooperCallback::~SimpleLooperCallback() {
+}
+
+int SimpleLooperCallback::handleEvent(int fd, int events, void* data) {
+ return mCallback(fd, events, data);
+}
+
+
+// --- Looper ---
+
+// Hint for number of file descriptors to be associated with the epoll instance.
+static const int EPOLL_SIZE_HINT = 8;
+
+// Maximum number of file descriptors for which to retrieve poll events each iteration.
+static const int EPOLL_MAX_EVENTS = 16;
+
+static pthread_once_t gTLSOnce = PTHREAD_ONCE_INIT;
+static pthread_key_t gTLSKey = 0;
+
+Looper::Looper(bool allowNonCallbacks) :
+ mAllowNonCallbacks(allowNonCallbacks), mSendingMessage(false),
+ mResponseIndex(0), mNextMessageUptime(LLONG_MAX) {
+ int wakeFds[2];
+ int result = pipe(wakeFds);
+ LOG_ALWAYS_FATAL_IF(result != 0, "Could not create wake pipe. errno=%d", errno);
+
+ mWakeReadPipeFd = wakeFds[0];
+ mWakeWritePipeFd = wakeFds[1];
+
+ result = fcntl(mWakeReadPipeFd, F_SETFL, O_NONBLOCK);
+ LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake read pipe non-blocking. errno=%d",
+ errno);
+
+ result = fcntl(mWakeWritePipeFd, F_SETFL, O_NONBLOCK);
+ LOG_ALWAYS_FATAL_IF(result != 0, "Could not make wake write pipe non-blocking. errno=%d",
+ errno);
+
+ mPolling = false;
+
+ // Allocate the epoll instance and register the wake pipe.
+ mEpollFd = epoll_create(EPOLL_SIZE_HINT);
+ LOG_ALWAYS_FATAL_IF(mEpollFd < 0, "Could not create epoll instance. errno=%d", errno);
+
+ struct epoll_event eventItem;
+ memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union
+ eventItem.events = EPOLLIN;
+ eventItem.data.fd = mWakeReadPipeFd;
+ result = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, mWakeReadPipeFd, & eventItem);
+ LOG_ALWAYS_FATAL_IF(result != 0, "Could not add wake read pipe to epoll instance. errno=%d",
+ errno);
+}
+
+Looper::~Looper() {
+ close(mWakeReadPipeFd);
+ close(mWakeWritePipeFd);
+ close(mEpollFd);
+}
+
+void Looper::initTLSKey() {
+ int result = pthread_key_create(& gTLSKey, threadDestructor);
+ LOG_ALWAYS_FATAL_IF(result != 0, "Could not allocate TLS key.");
+}
+
+void Looper::threadDestructor(void *st) {
+ Looper* const self = static_cast<Looper*>(st);
+ if (self != NULL) {
+ self->decStrong((void*)threadDestructor);
+ }
+}
+
+void Looper::setForThread(const sp<Looper>& looper) {
+ sp<Looper> old = getForThread(); // also has side-effect of initializing TLS
+
+ if (looper != NULL) {
+ looper->incStrong((void*)threadDestructor);
+ }
+
+ pthread_setspecific(gTLSKey, looper.get());
+
+ if (old != NULL) {
+ old->decStrong((void*)threadDestructor);
+ }
+}
+
+sp<Looper> Looper::getForThread() {
+ int result = pthread_once(& gTLSOnce, initTLSKey);
+ LOG_ALWAYS_FATAL_IF(result != 0, "pthread_once failed");
+
+ return (Looper*)pthread_getspecific(gTLSKey);
+}
+
+sp<Looper> Looper::prepare(int opts) {
+ bool allowNonCallbacks = opts & PREPARE_ALLOW_NON_CALLBACKS;
+ sp<Looper> looper = Looper::getForThread();
+ if (looper == NULL) {
+ looper = new Looper(allowNonCallbacks);
+ Looper::setForThread(looper);
+ }
+ if (looper->getAllowNonCallbacks() != allowNonCallbacks) {
+ ALOGW("Looper already prepared for this thread with a different value for the "
+ "LOOPER_PREPARE_ALLOW_NON_CALLBACKS option.");
+ }
+ return looper;
+}
+
+bool Looper::getAllowNonCallbacks() const {
+ return mAllowNonCallbacks;
+}
+
+int Looper::pollOnce(int timeoutMillis, int* outFd, int* outEvents, void** outData) {
+ int result = 0;
+ for (;;) {
+ while (mResponseIndex < mResponses.size()) {
+ const Response& response = mResponses.itemAt(mResponseIndex++);
+ int ident = response.request.ident;
+ if (ident >= 0) {
+ int fd = response.request.fd;
+ int events = response.events;
+ void* data = response.request.data;
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ pollOnce - returning signalled identifier %d: "
+ "fd=%d, events=0x%x, data=%p",
+ this, ident, fd, events, data);
+#endif
+ if (outFd != NULL) *outFd = fd;
+ if (outEvents != NULL) *outEvents = events;
+ if (outData != NULL) *outData = data;
+ return ident;
+ }
+ }
+
+ if (result != 0) {
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ pollOnce - returning result %d", this, result);
+#endif
+ if (outFd != NULL) *outFd = 0;
+ if (outEvents != NULL) *outEvents = 0;
+ if (outData != NULL) *outData = NULL;
+ return result;
+ }
+
+ result = pollInner(timeoutMillis);
+ }
+}
+
+int Looper::pollInner(int timeoutMillis) {
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ pollOnce - waiting: timeoutMillis=%d", this, timeoutMillis);
+#endif
+
+ // Adjust the timeout based on when the next message is due.
+ if (timeoutMillis != 0 && mNextMessageUptime != LLONG_MAX) {
+ nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+ int messageTimeoutMillis = toMillisecondTimeoutDelay(now, mNextMessageUptime);
+ if (messageTimeoutMillis >= 0
+ && (timeoutMillis < 0 || messageTimeoutMillis < timeoutMillis)) {
+ timeoutMillis = messageTimeoutMillis;
+ }
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ pollOnce - next message in %lldns, adjusted timeout: timeoutMillis=%d",
+ this, mNextMessageUptime - now, timeoutMillis);
+#endif
+ }
+
+ // Poll.
+ int result = POLL_WAKE;
+ mResponses.clear();
+ mResponseIndex = 0;
+
+ // We are about to idle.
+ mPolling = true;
+
+ struct epoll_event eventItems[EPOLL_MAX_EVENTS];
+ int eventCount = epoll_wait(mEpollFd, eventItems, EPOLL_MAX_EVENTS, timeoutMillis);
+
+ // No longer idling.
+ mPolling = false;
+
+ // Acquire lock.
+ mLock.lock();
+
+ // Check for poll error.
+ if (eventCount < 0) {
+ if (errno == EINTR) {
+ goto Done;
+ }
+ ALOGW("Poll failed with an unexpected error, errno=%d", errno);
+ result = POLL_ERROR;
+ goto Done;
+ }
+
+ // Check for poll timeout.
+ if (eventCount == 0) {
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ pollOnce - timeout", this);
+#endif
+ result = POLL_TIMEOUT;
+ goto Done;
+ }
+
+ // Handle all events.
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ pollOnce - handling events from %d fds", this, eventCount);
+#endif
+
+ for (int i = 0; i < eventCount; i++) {
+ int fd = eventItems[i].data.fd;
+ uint32_t epollEvents = eventItems[i].events;
+ if (fd == mWakeReadPipeFd) {
+ if (epollEvents & EPOLLIN) {
+ awoken();
+ } else {
+ ALOGW("Ignoring unexpected epoll events 0x%x on wake read pipe.", epollEvents);
+ }
+ } else {
+ ssize_t requestIndex = mRequests.indexOfKey(fd);
+ if (requestIndex >= 0) {
+ int events = 0;
+ if (epollEvents & EPOLLIN) events |= EVENT_INPUT;
+ if (epollEvents & EPOLLOUT) events |= EVENT_OUTPUT;
+ if (epollEvents & EPOLLERR) events |= EVENT_ERROR;
+ if (epollEvents & EPOLLHUP) events |= EVENT_HANGUP;
+ pushResponse(events, mRequests.valueAt(requestIndex));
+ } else {
+ ALOGW("Ignoring unexpected epoll events 0x%x on fd %d that is "
+ "no longer registered.", epollEvents, fd);
+ }
+ }
+ }
+Done: ;
+
+ // Invoke pending message callbacks.
+ mNextMessageUptime = LLONG_MAX;
+ while (mMessageEnvelopes.size() != 0) {
+ nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+ const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(0);
+ if (messageEnvelope.uptime <= now) {
+ // Remove the envelope from the list.
+ // We keep a strong reference to the handler until the call to handleMessage
+ // finishes. Then we drop it so that the handler can be deleted *before*
+ // we reacquire our lock.
+ { // obtain handler
+ sp<MessageHandler> handler = messageEnvelope.handler;
+ Message message = messageEnvelope.message;
+ mMessageEnvelopes.removeAt(0);
+ mSendingMessage = true;
+ mLock.unlock();
+
+#if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
+ ALOGD("%p ~ pollOnce - sending message: handler=%p, what=%d",
+ this, handler.get(), message.what);
+#endif
+ handler->handleMessage(message);
+ } // release handler
+
+ mLock.lock();
+ mSendingMessage = false;
+ result = POLL_CALLBACK;
+ } else {
+ // The last message left at the head of the queue determines the next wakeup time.
+ mNextMessageUptime = messageEnvelope.uptime;
+ break;
+ }
+ }
+
+ // Release lock.
+ mLock.unlock();
+
+ // Invoke all response callbacks.
+ for (size_t i = 0; i < mResponses.size(); i++) {
+ Response& response = mResponses.editItemAt(i);
+ if (response.request.ident == POLL_CALLBACK) {
+ int fd = response.request.fd;
+ int events = response.events;
+ void* data = response.request.data;
+#if DEBUG_POLL_AND_WAKE || DEBUG_CALLBACKS
+ ALOGD("%p ~ pollOnce - invoking fd event callback %p: fd=%d, events=0x%x, data=%p",
+ this, response.request.callback.get(), fd, events, data);
+#endif
+ int callbackResult = response.request.callback->handleEvent(fd, events, data);
+ if (callbackResult == 0) {
+ removeFd(fd);
+ }
+ // Clear the callback reference in the response structure promptly because we
+ // will not clear the response vector itself until the next poll.
+ response.request.callback.clear();
+ result = POLL_CALLBACK;
+ }
+ }
+ return result;
+}
+
+int Looper::pollAll(int timeoutMillis, int* outFd, int* outEvents, void** outData) {
+ if (timeoutMillis <= 0) {
+ int result;
+ do {
+ result = pollOnce(timeoutMillis, outFd, outEvents, outData);
+ } while (result == POLL_CALLBACK);
+ return result;
+ } else {
+ nsecs_t endTime = systemTime(SYSTEM_TIME_MONOTONIC)
+ + milliseconds_to_nanoseconds(timeoutMillis);
+
+ for (;;) {
+ int result = pollOnce(timeoutMillis, outFd, outEvents, outData);
+ if (result != POLL_CALLBACK) {
+ return result;
+ }
+
+ nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+ timeoutMillis = toMillisecondTimeoutDelay(now, endTime);
+ if (timeoutMillis == 0) {
+ return POLL_TIMEOUT;
+ }
+ }
+ }
+}
+
+void Looper::wake() {
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ wake", this);
+#endif
+
+ ssize_t nWrite;
+ do {
+ nWrite = write(mWakeWritePipeFd, "W", 1);
+ } while (nWrite == -1 && errno == EINTR);
+
+ if (nWrite != 1) {
+ if (errno != EAGAIN) {
+ ALOGW("Could not write wake signal, errno=%d", errno);
+ }
+ }
+}
+
+void Looper::awoken() {
+#if DEBUG_POLL_AND_WAKE
+ ALOGD("%p ~ awoken", this);
+#endif
+
+ char buffer[16];
+ ssize_t nRead;
+ do {
+ nRead = read(mWakeReadPipeFd, buffer, sizeof(buffer));
+ } while ((nRead == -1 && errno == EINTR) || nRead == sizeof(buffer));
+}
+
+void Looper::pushResponse(int events, const Request& request) {
+ Response response;
+ response.events = events;
+ response.request = request;
+ mResponses.push(response);
+}
+
+int Looper::addFd(int fd, int ident, int events, Looper_callbackFunc callback, void* data) {
+ return addFd(fd, ident, events, callback ? new SimpleLooperCallback(callback) : NULL, data);
+}
+
+int Looper::addFd(int fd, int ident, int events, const sp<LooperCallback>& callback, void* data) {
+#if DEBUG_CALLBACKS
+ ALOGD("%p ~ addFd - fd=%d, ident=%d, events=0x%x, callback=%p, data=%p", this, fd, ident,
+ events, callback.get(), data);
+#endif
+
+ if (!callback.get()) {
+ if (! mAllowNonCallbacks) {
+ ALOGE("Invalid attempt to set NULL callback but not allowed for this looper.");
+ return -1;
+ }
+
+ if (ident < 0) {
+ ALOGE("Invalid attempt to set NULL callback with ident < 0.");
+ return -1;
+ }
+ } else {
+ ident = POLL_CALLBACK;
+ }
+
+ int epollEvents = 0;
+ if (events & EVENT_INPUT) epollEvents |= EPOLLIN;
+ if (events & EVENT_OUTPUT) epollEvents |= EPOLLOUT;
+
+ { // acquire lock
+ AutoMutex _l(mLock);
+
+ Request request;
+ request.fd = fd;
+ request.ident = ident;
+ request.callback = callback;
+ request.data = data;
+
+ struct epoll_event eventItem;
+ memset(& eventItem, 0, sizeof(epoll_event)); // zero out unused members of data field union
+ eventItem.events = epollEvents;
+ eventItem.data.fd = fd;
+
+ ssize_t requestIndex = mRequests.indexOfKey(fd);
+ if (requestIndex < 0) {
+ int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_ADD, fd, & eventItem);
+ if (epollResult < 0) {
+ ALOGE("Error adding epoll events for fd %d, errno=%d", fd, errno);
+ return -1;
+ }
+ mRequests.add(fd, request);
+ } else {
+ int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_MOD, fd, & eventItem);
+ if (epollResult < 0) {
+ ALOGE("Error modifying epoll events for fd %d, errno=%d", fd, errno);
+ return -1;
+ }
+ mRequests.replaceValueAt(requestIndex, request);
+ }
+ } // release lock
+ return 1;
+}
+
+int Looper::removeFd(int fd) {
+#if DEBUG_CALLBACKS
+ ALOGD("%p ~ removeFd - fd=%d", this, fd);
+#endif
+
+ { // acquire lock
+ AutoMutex _l(mLock);
+ ssize_t requestIndex = mRequests.indexOfKey(fd);
+ if (requestIndex < 0) {
+ return 0;
+ }
+
+ int epollResult = epoll_ctl(mEpollFd, EPOLL_CTL_DEL, fd, NULL);
+ if (epollResult < 0) {
+ ALOGE("Error removing epoll events for fd %d, errno=%d", fd, errno);
+ return -1;
+ }
+
+ mRequests.removeItemsAt(requestIndex);
+ } // release lock
+ return 1;
+}
+
+void Looper::sendMessage(const sp<MessageHandler>& handler, const Message& message) {
+ nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+ sendMessageAtTime(now, handler, message);
+}
+
+void Looper::sendMessageDelayed(nsecs_t uptimeDelay, const sp<MessageHandler>& handler,
+ const Message& message) {
+ nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+ sendMessageAtTime(now + uptimeDelay, handler, message);
+}
+
+void Looper::sendMessageAtTime(nsecs_t uptime, const sp<MessageHandler>& handler,
+ const Message& message) {
+#if DEBUG_CALLBACKS
+ ALOGD("%p ~ sendMessageAtTime - uptime=%lld, handler=%p, what=%d",
+ this, uptime, handler.get(), message.what);
+#endif
+
+ size_t i = 0;
+ { // acquire lock
+ AutoMutex _l(mLock);
+
+ size_t messageCount = mMessageEnvelopes.size();
+ while (i < messageCount && uptime >= mMessageEnvelopes.itemAt(i).uptime) {
+ i += 1;
+ }
+
+ MessageEnvelope messageEnvelope(uptime, handler, message);
+ mMessageEnvelopes.insertAt(messageEnvelope, i, 1);
+
+ // Optimization: If the Looper is currently sending a message, then we can skip
+ // the call to wake() because the next thing the Looper will do after processing
+ // messages is to decide when the next wakeup time should be. In fact, it does
+ // not even matter whether this code is running on the Looper thread.
+ if (mSendingMessage) {
+ return;
+ }
+ } // release lock
+
+ // Wake the poll loop only when we enqueue a new message at the head.
+ if (i == 0) {
+ wake();
+ }
+}
+
+void Looper::removeMessages(const sp<MessageHandler>& handler) {
+#if DEBUG_CALLBACKS
+ ALOGD("%p ~ removeMessages - handler=%p", this, handler.get());
+#endif
+
+ { // acquire lock
+ AutoMutex _l(mLock);
+
+ for (size_t i = mMessageEnvelopes.size(); i != 0; ) {
+ const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(--i);
+ if (messageEnvelope.handler == handler) {
+ mMessageEnvelopes.removeAt(i);
+ }
+ }
+ } // release lock
+}
+
+void Looper::removeMessages(const sp<MessageHandler>& handler, int what) {
+#if DEBUG_CALLBACKS
+ ALOGD("%p ~ removeMessages - handler=%p, what=%d", this, handler.get(), what);
+#endif
+
+ { // acquire lock
+ AutoMutex _l(mLock);
+
+ for (size_t i = mMessageEnvelopes.size(); i != 0; ) {
+ const MessageEnvelope& messageEnvelope = mMessageEnvelopes.itemAt(--i);
+ if (messageEnvelope.handler == handler
+ && messageEnvelope.message.what == what) {
+ mMessageEnvelopes.removeAt(i);
+ }
+ }
+ } // release lock
+}
+
+bool Looper::isPolling() const {
+ return mPolling;
+}
+
+} // namespace android \ No newline at end of file
diff --git a/libshims/utils/VectorImpl.cpp b/libshims/utils/VectorImpl.cpp
new file mode 100644
index 0000000..936cd37
--- /dev/null
+++ b/libshims/utils/VectorImpl.cpp
@@ -0,0 +1,691 @@
+/*
+ * Copyright (C) 2005 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "Vector"
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <log/log.h>
+#include <safe_iop.h>
+
+#include <SharedBuffer.h>
+#include <utils/Errors.h>
+#include <utils/VectorImpl.h>
+
+/*****************************************************************************/
+
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+const size_t kMinVectorCapacity = 4;
+
+static inline size_t max(size_t a, size_t b) {
+ return a>b ? a : b;
+}
+
+// ----------------------------------------------------------------------------
+
+VectorImpl::VectorImpl(size_t itemSize, uint32_t flags)
+ : mStorage(0), mCount(0), mFlags(flags), mItemSize(itemSize)
+{
+}
+
+VectorImpl::VectorImpl(const VectorImpl& rhs)
+ : mStorage(rhs.mStorage), mCount(rhs.mCount),
+ mFlags(rhs.mFlags), mItemSize(rhs.mItemSize)
+{
+ if (mStorage) {
+ SharedBuffer::bufferFromData(mStorage)->acquire();
+ }
+}
+
+VectorImpl::~VectorImpl()
+{
+ ALOGW_IF(mCount,
+ "[%p] subclasses of VectorImpl must call finish_vector()"
+ " in their destructor. Leaking %d bytes.",
+ this, (int)(mCount*mItemSize));
+ // We can't call _do_destroy() here because the vtable is already gone.
+}
+
+VectorImpl& VectorImpl::operator = (const VectorImpl& rhs)
+{
+ LOG_ALWAYS_FATAL_IF(mItemSize != rhs.mItemSize,
+ "Vector<> have different types (this=%p, rhs=%p)", this, &rhs);
+ if (this != &rhs) {
+ release_storage();
+ if (rhs.mCount) {
+ mStorage = rhs.mStorage;
+ mCount = rhs.mCount;
+ SharedBuffer::bufferFromData(mStorage)->acquire();
+ } else {
+ mStorage = 0;
+ mCount = 0;
+ }
+ }
+ return *this;
+}
+
+void* VectorImpl::editArrayImpl()
+{
+ if (mStorage) {
+ const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
+ SharedBuffer* editable = sb->attemptEdit();
+ if (editable == 0) {
+ // If we're here, we're not the only owner of the buffer.
+ // We must make a copy of it.
+ editable = SharedBuffer::alloc(sb->size());
+ // Fail instead of returning a pointer to storage that's not
+ // editable. Otherwise we'd be editing the contents of a buffer
+ // for which we're not the only owner, which is undefined behaviour.
+ LOG_ALWAYS_FATAL_IF(editable == NULL);
+ _do_copy(editable->data(), mStorage, mCount);
+ release_storage();
+ mStorage = editable->data();
+ }
+ }
+ return mStorage;
+}
+
+size_t VectorImpl::capacity() const
+{
+ if (mStorage) {
+ return SharedBuffer::bufferFromData(mStorage)->size() / mItemSize;
+ }
+ return 0;
+}
+
+ssize_t VectorImpl::insertVectorAt(const VectorImpl& vector, size_t index)
+{
+ return insertArrayAt(vector.arrayImpl(), index, vector.size());
+}
+
+ssize_t VectorImpl::appendVector(const VectorImpl& vector)
+{
+ return insertVectorAt(vector, size());
+}
+
+ssize_t VectorImpl::insertArrayAt(const void* array, size_t index, size_t length)
+{
+ if (index > size())
+ return BAD_INDEX;
+ void* where = _grow(index, length);
+ if (where) {
+ _do_copy(where, array, length);
+ }
+ return where ? index : (ssize_t)NO_MEMORY;
+}
+
+ssize_t VectorImpl::appendArray(const void* array, size_t length)
+{
+ return insertArrayAt(array, size(), length);
+}
+
+ssize_t VectorImpl::insertAt(size_t index, size_t numItems)
+{
+ return insertAt(0, index, numItems);
+}
+
+ssize_t VectorImpl::insertAt(const void* item, size_t index, size_t numItems)
+{
+ if (index > size())
+ return BAD_INDEX;
+ void* where = _grow(index, numItems);
+ if (where) {
+ if (item) {
+ _do_splat(where, item, numItems);
+ } else {
+ _do_construct(where, numItems);
+ }
+ }
+ return where ? index : (ssize_t)NO_MEMORY;
+}
+
+static int sortProxy(const void* lhs, const void* rhs, void* func)
+{
+ return (*(VectorImpl::compar_t)func)(lhs, rhs);
+}
+
+status_t VectorImpl::sort(VectorImpl::compar_t cmp)
+{
+ return sort(sortProxy, (void*)cmp);
+}
+
+status_t VectorImpl::sort(VectorImpl::compar_r_t cmp, void* state)
+{
+ // the sort must be stable. we're using insertion sort which
+ // is well suited for small and already sorted arrays
+ // for big arrays, it could be better to use mergesort
+ const ssize_t count = size();
+ if (count > 1) {
+ void* array = const_cast<void*>(arrayImpl());
+ void* temp = 0;
+ ssize_t i = 1;
+ while (i < count) {
+ void* item = reinterpret_cast<char*>(array) + mItemSize*(i);
+ void* curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
+ if (cmp(curr, item, state) > 0) {
+
+ if (!temp) {
+ // we're going to have to modify the array...
+ array = editArrayImpl();
+ if (!array) return NO_MEMORY;
+ temp = malloc(mItemSize);
+ if (!temp) return NO_MEMORY;
+ item = reinterpret_cast<char*>(array) + mItemSize*(i);
+ curr = reinterpret_cast<char*>(array) + mItemSize*(i-1);
+ } else {
+ _do_destroy(temp, 1);
+ }
+
+ _do_copy(temp, item, 1);
+
+ ssize_t j = i-1;
+ void* next = reinterpret_cast<char*>(array) + mItemSize*(i);
+ do {
+ _do_destroy(next, 1);
+ _do_copy(next, curr, 1);
+ next = curr;
+ --j;
+ curr = reinterpret_cast<char*>(array) + mItemSize*(j);
+ } while (j>=0 && (cmp(curr, temp, state) > 0));
+
+ _do_destroy(next, 1);
+ _do_copy(next, temp, 1);
+ }
+ i++;
+ }
+
+ if (temp) {
+ _do_destroy(temp, 1);
+ free(temp);
+ }
+ }
+ return NO_ERROR;
+}
+
+void VectorImpl::pop()
+{
+ if (size())
+ removeItemsAt(size()-1, 1);
+}
+
+void VectorImpl::push()
+{
+ push(0);
+}
+
+void VectorImpl::push(const void* item)
+{
+ insertAt(item, size());
+}
+
+ssize_t VectorImpl::add()
+{
+ return add(0);
+}
+
+ssize_t VectorImpl::add(const void* item)
+{
+ return insertAt(item, size());
+}
+
+ssize_t VectorImpl::replaceAt(size_t index)
+{
+ return replaceAt(0, index);
+}
+
+ssize_t VectorImpl::replaceAt(const void* prototype, size_t index)
+{
+ ALOG_ASSERT(index<size(),
+ "[%p] replace: index=%d, size=%d", this, (int)index, (int)size());
+
+ if (index >= size()) {
+ return BAD_INDEX;
+ }
+
+ void* item = editItemLocation(index);
+ if (item != prototype) {
+ if (item == 0)
+ return NO_MEMORY;
+ _do_destroy(item, 1);
+ if (prototype == 0) {
+ _do_construct(item, 1);
+ } else {
+ _do_copy(item, prototype, 1);
+ }
+ }
+ return ssize_t(index);
+}
+
+ssize_t VectorImpl::removeItemsAt(size_t index, size_t count)
+{
+ ALOG_ASSERT((index+count)<=size(),
+ "[%p] remove: index=%d, count=%d, size=%d",
+ this, (int)index, (int)count, (int)size());
+
+ if ((index+count) > size())
+ return BAD_VALUE;
+ _shrink(index, count);
+ return index;
+}
+
+void VectorImpl::finish_vector()
+{
+ release_storage();
+ mStorage = 0;
+ mCount = 0;
+}
+
+void VectorImpl::clear()
+{
+ _shrink(0, mCount);
+}
+
+void* VectorImpl::editItemLocation(size_t index)
+{
+ ALOG_ASSERT(index<capacity(),
+ "[%p] editItemLocation: index=%d, capacity=%d, count=%d",
+ this, (int)index, (int)capacity(), (int)mCount);
+
+ if (index < capacity()) {
+ void* buffer = editArrayImpl();
+ if (buffer) {
+ return reinterpret_cast<char*>(buffer) + index*mItemSize;
+ }
+ }
+ return 0;
+}
+
+const void* VectorImpl::itemLocation(size_t index) const
+{
+ ALOG_ASSERT(index<capacity(),
+ "[%p] itemLocation: index=%d, capacity=%d, count=%d",
+ this, (int)index, (int)capacity(), (int)mCount);
+
+ if (index < capacity()) {
+ const void* buffer = arrayImpl();
+ if (buffer) {
+ return reinterpret_cast<const char*>(buffer) + index*mItemSize;
+ }
+ }
+ return 0;
+}
+
+ssize_t VectorImpl::setCapacity(size_t new_capacity)
+{
+ // The capacity must always be greater than or equal to the size
+ // of this vector.
+ if (new_capacity <= size()) {
+ return capacity();
+ }
+
+ size_t new_allocation_size = 0;
+ LOG_ALWAYS_FATAL_IF(!safe_mul(&new_allocation_size, new_capacity, mItemSize));
+ SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size);
+ if (sb) {
+ void* array = sb->data();
+ _do_copy(array, mStorage, size());
+ release_storage();
+ mStorage = const_cast<void*>(array);
+ } else {
+ return NO_MEMORY;
+ }
+ return new_capacity;
+}
+
+ssize_t VectorImpl::resize(size_t size) {
+ ssize_t result = NO_ERROR;
+ if (size > mCount) {
+ result = insertAt(mCount, size - mCount);
+ } else if (size < mCount) {
+ result = removeItemsAt(size, mCount - size);
+ }
+ return result < 0 ? result : size;
+}
+
+void VectorImpl::release_storage()
+{
+ if (mStorage) {
+ const SharedBuffer* sb = SharedBuffer::bufferFromData(mStorage);
+ if (sb->release(SharedBuffer::eKeepStorage) == 1) {
+ _do_destroy(mStorage, mCount);
+ SharedBuffer::dealloc(sb);
+ }
+ }
+}
+
+void* VectorImpl::_grow(size_t where, size_t amount)
+{
+// ALOGV("_grow(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
+// this, (int)where, (int)amount, (int)mCount, (int)capacity());
+
+ ALOG_ASSERT(where <= mCount,
+ "[%p] _grow: where=%d, amount=%d, count=%d",
+ this, (int)where, (int)amount, (int)mCount); // caller already checked
+
+ size_t new_size;
+ LOG_ALWAYS_FATAL_IF(!safe_add(&new_size, mCount, amount), "new_size overflow");
+
+ if (capacity() < new_size) {
+ // NOTE: This implementation used to resize vectors as per ((3*x + 1) / 2)
+ // (sigh..). Also note, the " + 1" was necessary to handle the special case
+ // where x == 1, where the resized_capacity will be equal to the old
+ // capacity without the +1. The old calculation wouldn't work properly
+ // if x was zero.
+ //
+ // This approximates the old calculation, using (x + (x/2) + 1) instead.
+ size_t new_capacity = 0;
+ LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_size, (new_size / 2)),
+ "new_capacity overflow");
+ LOG_ALWAYS_FATAL_IF(!safe_add(&new_capacity, new_capacity, static_cast<size_t>(1u)),
+ "new_capacity overflow");
+ new_capacity = max(kMinVectorCapacity, new_capacity);
+
+ size_t new_alloc_size = 0;
+ LOG_ALWAYS_FATAL_IF(!safe_mul(&new_alloc_size, new_capacity, mItemSize),
+ "new_alloc_size overflow");
+
+// ALOGV("grow vector %p, new_capacity=%d", this, (int)new_capacity);
+ if ((mStorage) &&
+ (mCount==where) &&
+ (mFlags & HAS_TRIVIAL_COPY) &&
+ (mFlags & HAS_TRIVIAL_DTOR))
+ {
+ const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
+ SharedBuffer* sb = cur_sb->editResize(new_alloc_size);
+ if (sb) {
+ mStorage = sb->data();
+ } else {
+ return NULL;
+ }
+ } else {
+ SharedBuffer* sb = SharedBuffer::alloc(new_alloc_size);
+ if (sb) {
+ void* array = sb->data();
+ if (where != 0) {
+ _do_copy(array, mStorage, where);
+ }
+ if (where != mCount) {
+ const void* from = reinterpret_cast<const uint8_t *>(mStorage) + where*mItemSize;
+ void* dest = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
+ _do_copy(dest, from, mCount-where);
+ }
+ release_storage();
+ mStorage = const_cast<void*>(array);
+ } else {
+ return NULL;
+ }
+ }
+ } else {
+ void* array = editArrayImpl();
+ if (where != mCount) {
+ const void* from = reinterpret_cast<const uint8_t *>(array) + where*mItemSize;
+ void* to = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
+ _do_move_forward(to, from, mCount - where);
+ }
+ }
+ mCount = new_size;
+ void* free_space = const_cast<void*>(itemLocation(where));
+ return free_space;
+}
+
+void VectorImpl::_shrink(size_t where, size_t amount)
+{
+ if (!mStorage)
+ return;
+
+// ALOGV("_shrink(this=%p, where=%d, amount=%d) count=%d, capacity=%d",
+// this, (int)where, (int)amount, (int)mCount, (int)capacity());
+
+ ALOG_ASSERT(where + amount <= mCount,
+ "[%p] _shrink: where=%d, amount=%d, count=%d",
+ this, (int)where, (int)amount, (int)mCount); // caller already checked
+
+ size_t new_size;
+ LOG_ALWAYS_FATAL_IF(!safe_sub(&new_size, mCount, amount));
+
+ if (new_size < (capacity() / 2)) {
+ // NOTE: (new_size * 2) is safe because capacity didn't overflow and
+ // new_size < (capacity / 2)).
+ const size_t new_capacity = max(kMinVectorCapacity, new_size * 2);
+
+ // NOTE: (new_capacity * mItemSize), (where * mItemSize) and
+ // ((where + amount) * mItemSize) beyond this point are safe because
+ // we are always reducing the capacity of the underlying SharedBuffer.
+ // In other words, (old_capacity * mItemSize) did not overflow, and
+ // where < (where + amount) < new_capacity < old_capacity.
+ if ((where == new_size) &&
+ (mFlags & HAS_TRIVIAL_COPY) &&
+ (mFlags & HAS_TRIVIAL_DTOR))
+ {
+ const SharedBuffer* cur_sb = SharedBuffer::bufferFromData(mStorage);
+ SharedBuffer* sb = cur_sb->editResize(new_capacity * mItemSize);
+ if (sb) {
+ mStorage = sb->data();
+ } else {
+ return;
+ }
+ } else {
+ SharedBuffer* sb = SharedBuffer::alloc(new_capacity * mItemSize);
+ if (sb) {
+ void* array = sb->data();
+ if (where != 0) {
+ _do_copy(array, mStorage, where);
+ }
+ if (where != new_size) {
+ const void* from = reinterpret_cast<const uint8_t *>(mStorage) + (where+amount)*mItemSize;
+ void* dest = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
+ _do_copy(dest, from, new_size - where);
+ }
+ release_storage();
+ mStorage = const_cast<void*>(array);
+ } else{
+ return;
+ }
+ }
+ } else {
+ void* array = editArrayImpl();
+ void* to = reinterpret_cast<uint8_t *>(array) + where*mItemSize;
+ _do_destroy(to, amount);
+ if (where != new_size) {
+ const void* from = reinterpret_cast<uint8_t *>(array) + (where+amount)*mItemSize;
+ _do_move_backward(to, from, new_size - where);
+ }
+ }
+ mCount = new_size;
+}
+
+size_t VectorImpl::itemSize() const {
+ return mItemSize;
+}
+
+void VectorImpl::_do_construct(void* storage, size_t num) const
+{
+ if (!(mFlags & HAS_TRIVIAL_CTOR)) {
+ do_construct(storage, num);
+ }
+}
+
+void VectorImpl::_do_destroy(void* storage, size_t num) const
+{
+ if (!(mFlags & HAS_TRIVIAL_DTOR)) {
+ do_destroy(storage, num);
+ }
+}
+
+void VectorImpl::_do_copy(void* dest, const void* from, size_t num) const
+{
+ if (!(mFlags & HAS_TRIVIAL_COPY)) {
+ do_copy(dest, from, num);
+ } else {
+ memcpy(dest, from, num*itemSize());
+ }
+}
+
+void VectorImpl::_do_splat(void* dest, const void* item, size_t num) const {
+ do_splat(dest, item, num);
+}
+
+void VectorImpl::_do_move_forward(void* dest, const void* from, size_t num) const {
+ do_move_forward(dest, from, num);
+}
+
+void VectorImpl::_do_move_backward(void* dest, const void* from, size_t num) const {
+ do_move_backward(dest, from, num);
+}
+
+void VectorImpl::reservedVectorImpl1() { }
+void VectorImpl::reservedVectorImpl2() { }
+void VectorImpl::reservedVectorImpl3() { }
+void VectorImpl::reservedVectorImpl4() { }
+void VectorImpl::reservedVectorImpl5() { }
+void VectorImpl::reservedVectorImpl6() { }
+void VectorImpl::reservedVectorImpl7() { }
+void VectorImpl::reservedVectorImpl8() { }
+
+/*****************************************************************************/
+
+SortedVectorImpl::SortedVectorImpl(size_t itemSize, uint32_t flags)
+ : VectorImpl(itemSize, flags)
+{
+}
+
+SortedVectorImpl::SortedVectorImpl(const VectorImpl& rhs)
+: VectorImpl(rhs)
+{
+}
+
+SortedVectorImpl::~SortedVectorImpl()
+{
+}
+
+SortedVectorImpl& SortedVectorImpl::operator = (const SortedVectorImpl& rhs)
+{
+ return static_cast<SortedVectorImpl&>( VectorImpl::operator = (static_cast<const VectorImpl&>(rhs)) );
+}
+
+ssize_t SortedVectorImpl::indexOf(const void* item) const
+{
+ return _indexOrderOf(item);
+}
+
+size_t SortedVectorImpl::orderOf(const void* item) const
+{
+ size_t o;
+ _indexOrderOf(item, &o);
+ return o;
+}
+
+ssize_t SortedVectorImpl::_indexOrderOf(const void* item, size_t* order) const
+{
+ // binary search
+ ssize_t err = NAME_NOT_FOUND;
+ ssize_t l = 0;
+ ssize_t h = size()-1;
+ ssize_t mid;
+ const void* a = arrayImpl();
+ const size_t s = itemSize();
+ while (l <= h) {
+ mid = l + (h - l)/2;
+ const void* const curr = reinterpret_cast<const char *>(a) + (mid*s);
+ const int c = do_compare(curr, item);
+ if (c == 0) {
+ err = l = mid;
+ break;
+ } else if (c < 0) {
+ l = mid + 1;
+ } else {
+ h = mid - 1;
+ }
+ }
+ if (order) *order = l;
+ return err;
+}
+
+ssize_t SortedVectorImpl::add(const void* item)
+{
+ size_t order;
+ ssize_t index = _indexOrderOf(item, &order);
+ if (index < 0) {
+ index = VectorImpl::insertAt(item, order, 1);
+ } else {
+ index = VectorImpl::replaceAt(item, index);
+ }
+ return index;
+}
+
+ssize_t SortedVectorImpl::merge(const VectorImpl& vector)
+{
+ // naive merge...
+ if (!vector.isEmpty()) {
+ const void* buffer = vector.arrayImpl();
+ const size_t is = itemSize();
+ size_t s = vector.size();
+ for (size_t i=0 ; i<s ; i++) {
+ ssize_t err = add( reinterpret_cast<const char*>(buffer) + i*is );
+ if (err<0) {
+ return err;
+ }
+ }
+ }
+ return NO_ERROR;
+}
+
+ssize_t SortedVectorImpl::merge(const SortedVectorImpl& vector)
+{
+ // we've merging a sorted vector... nice!
+ ssize_t err = NO_ERROR;
+ if (!vector.isEmpty()) {
+ // first take care of the case where the vectors are sorted together
+ if (do_compare(vector.itemLocation(vector.size()-1), arrayImpl()) <= 0) {
+ err = VectorImpl::insertVectorAt(static_cast<const VectorImpl&>(vector), 0);
+ } else if (do_compare(vector.arrayImpl(), itemLocation(size()-1)) >= 0) {
+ err = VectorImpl::appendVector(static_cast<const VectorImpl&>(vector));
+ } else {
+ // this could be made a little better
+ err = merge(static_cast<const VectorImpl&>(vector));
+ }
+ }
+ return err;
+}
+
+ssize_t SortedVectorImpl::remove(const void* item)
+{
+ ssize_t i = indexOf(item);
+ if (i>=0) {
+ VectorImpl::removeItemsAt(i, 1);
+ }
+ return i;
+}
+
+void SortedVectorImpl::reservedSortedVectorImpl1() { };
+void SortedVectorImpl::reservedSortedVectorImpl2() { };
+void SortedVectorImpl::reservedSortedVectorImpl3() { };
+void SortedVectorImpl::reservedSortedVectorImpl4() { };
+void SortedVectorImpl::reservedSortedVectorImpl5() { };
+void SortedVectorImpl::reservedSortedVectorImpl6() { };
+void SortedVectorImpl::reservedSortedVectorImpl7() { };
+void SortedVectorImpl::reservedSortedVectorImpl8() { };
+
+/*****************************************************************************/
+
+}; // namespace android
+