| 1 | /* |
| 2 | * Copyright (C) 2015 The Qt Company Ltd |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of |
| 14 | * its contributors may be used to endorse or promote products derived |
| 15 | * from this software without specific prior written permission. |
| 16 | * |
| 17 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
| 18 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 19 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 20 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
| 21 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 22 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 23 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | */ |
| 28 | |
| 29 | #include "config.h" |
| 30 | |
| 31 | #if OS(SYMBIAN) |
| 32 | |
| 33 | #include "BlockAllocatorSymbian.h" |
| 34 | |
| 35 | |
| 36 | namespace WTF { |
| 37 | |
| 38 | /** Efficiently allocates blocks of size blockSize with blockSize alignment. |
| 39 | * Primarly designed for JSC Collector's needs. |
| 40 | * Not thread-safe. |
| 41 | */ |
| 42 | AlignedBlockAllocator::AlignedBlockAllocator(TUint32 reservationSize, TUint32 blockSize ) |
| 43 | : m_reservation(reservationSize), |
| 44 | m_blockSize(blockSize) |
| 45 | { |
| 46 | |
| 47 | // Get system's page size value. |
| 48 | SYMBIAN_PAGESIZE(m_pageSize); |
| 49 | |
| 50 | // We only accept multiples of system page size for both initial reservation and the alignment/block size |
| 51 | m_reservation = SYMBIAN_ROUNDUPTOMULTIPLE(m_reservation, m_pageSize); |
| 52 | __ASSERT_ALWAYS(SYMBIAN_ROUNDUPTOMULTIPLE(m_blockSize, m_pageSize), User::Panic(_L("AlignedBlockAllocator1" ), KErrArgument)); |
| 53 | |
| 54 | // Calculate max. bit flags we need to carve a reservationSize range into blockSize-sized blocks |
| 55 | m_map.numBits = m_reservation / m_blockSize; |
| 56 | const TUint32 bitsPerWord = 8*sizeof(TUint32); |
| 57 | const TUint32 numWords = (m_map.numBits + bitsPerWord -1) / bitsPerWord; |
| 58 | |
| 59 | m_map.bits = new TUint32[numWords]; |
| 60 | __ASSERT_ALWAYS(m_map.bits, User::Panic(_L("AlignedBlockAllocator2" ), KErrNoMemory)); |
| 61 | m_map.clearAll(); |
| 62 | |
| 63 | // Open a Symbian RChunk, and reserve requested virtual address range |
| 64 | // Any thread in this process can operate this rchunk due to EOwnerProcess access rights. |
| 65 | TInt ret = m_chunk.CreateDisconnectedLocal(0 , 0, (TInt)m_reservation , EOwnerProcess); |
| 66 | if (ret != KErrNone) |
| 67 | User::Panic(_L("AlignedBlockAllocator3" ), ret); |
| 68 | |
| 69 | // This is the offset to m_chunk.Base() required to make it m_blockSize-aligned |
| 70 | m_offset = SYMBIAN_ROUNDUPTOMULTIPLE(TUint32(m_chunk.Base()), m_blockSize) - TUint(m_chunk.Base()); |
| 71 | |
| 72 | } |
| 73 | |
| 74 | void* AlignedBlockAllocator::alloc() |
| 75 | { |
| 76 | |
| 77 | TInt freeRam = 0; |
| 78 | void* address = 0; |
| 79 | |
| 80 | // Look up first free slot in bit map |
| 81 | const TInt freeIdx = m_map.findFree(); |
| 82 | |
| 83 | // Pseudo OOM: We ate up the address space we reserved.. |
| 84 | // ..even though the device may have free RAM left |
| 85 | if (freeIdx < 0) |
| 86 | return 0; |
| 87 | |
| 88 | TInt ret = m_chunk.Commit(m_offset + (m_blockSize * freeIdx), m_blockSize); |
| 89 | if (ret != KErrNone) |
| 90 | return 0; // True OOM: Device didn't have physical RAM to spare |
| 91 | |
| 92 | // Updated bit to mark region as in use. |
| 93 | m_map.set(freeIdx); |
| 94 | |
| 95 | // Calculate address of committed region (block) |
| 96 | address = (void*)( (m_chunk.Base() + m_offset) + (TUint)(m_blockSize * freeIdx) ); |
| 97 | |
| 98 | return address; |
| 99 | } |
| 100 | |
| 101 | void AlignedBlockAllocator::free(void* block) |
| 102 | { |
| 103 | // Calculate index of block to be freed |
| 104 | TInt idx = TUint(static_cast<TUint8*>(block) - m_chunk.Base() - m_offset) / m_blockSize; |
| 105 | |
| 106 | __ASSERT_DEBUG(idx >= 0 && idx < m_map.numBits, User::Panic(_L("AlignedBlockAllocator4" ), KErrCorrupt)); // valid index check |
| 107 | __ASSERT_DEBUG(m_map.get(idx), User::Panic(_L("AlignedBlockAllocator5" ), KErrCorrupt)); // in-use flag check |
| 108 | |
| 109 | // Return committed region to system RAM pool (the physical RAM becomes usable by others) |
| 110 | TInt ret = m_chunk.Decommit(m_offset + m_blockSize * idx, m_blockSize); |
| 111 | |
| 112 | // mark this available again |
| 113 | m_map.clear(idx); |
| 114 | } |
| 115 | |
| 116 | void AlignedBlockAllocator::destroy() |
| 117 | { |
| 118 | // release everything! |
| 119 | m_chunk.Decommit(0, m_chunk.MaxSize()); |
| 120 | m_map.clearAll(); |
| 121 | } |
| 122 | |
| 123 | AlignedBlockAllocator::~AlignedBlockAllocator() |
| 124 | { |
| 125 | destroy(); |
| 126 | m_chunk.Close(); |
| 127 | delete [] m_map.bits; |
| 128 | } |
| 129 | |
| 130 | } // end of namespace |
| 131 | |
| 132 | #endif // SYMBIAN |
| 133 | |