diff --git a/eth/main.cpp b/eth/main.cpp index 235183ef6..64679762d 100644 --- a/eth/main.cpp +++ b/eth/main.cpp @@ -820,7 +820,7 @@ int main(int argc, char** argv) while (web3.ethereum()->blockQueue().items().first + web3.ethereum()->blockQueue().items().second > 0) { - sleep(1); + this_thread::sleep_for(chrono::seconds(1)); web3.ethereum()->syncQueue(100000); } double e = chrono::duration_cast(chrono::steady_clock::now() - t).count() / 1000.0; diff --git a/libethash-cl/ethash_cl_miner.cpp b/libethash-cl/ethash_cl_miner.cpp index 79024e2ad..b12e4d9f8 100644 --- a/libethash-cl/ethash_cl_miner.cpp +++ b/libethash-cl/ethash_cl_miner.cpp @@ -24,13 +24,13 @@ #include #include +#include #include #include #include #include #include #include -#include #include #include #include @@ -416,6 +416,7 @@ bool ethash_cl_miner::init( void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook& hook, unsigned _msPerBatch) { + (void)_msPerBatch; try { struct pending_batch @@ -454,6 +455,8 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook uint64_t start_nonce = uniform_int_distribution()(engine); for (;; start_nonce += m_batchSize) { +// chrono::high_resolution_clock::time_point t = chrono::high_resolution_clock::now(); + // supply output buffer to kernel m_searchKernel.setArg(0, m_searchBuffer[buf]); if (m_dagChunksCount == 1) @@ -462,13 +465,7 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook m_searchKernel.setArg(6, start_nonce); // execute it! - boost::timer t; m_queue.enqueueNDRangeKernel(m_searchKernel, cl::NullRange, m_batchSize, m_workgroupSize); - unsigned ms = t.elapsed() * 1000; - if (ms > _msPerBatch * 1.1) - m_batchSize = max(128, m_batchSize * 9 / 10); - else if (ms < _msPerBatch * 0.9) - m_batchSize = m_batchSize * 10 / 9; pending.push({ start_nonce, buf }); buf = (buf + 1) % c_bufferCount; @@ -498,6 +495,20 @@ void ethash_cl_miner::search(uint8_t const* header, uint64_t target, search_hook pending.pop(); } + +/* chrono::high_resolution_clock::duration d = chrono::high_resolution_clock::now() - t; + if (d > chrono::milliseconds(_msPerBatch * 10 / 9)) + { + cerr << "Batch of" << m_batchSize << "took" << chrono::duration_cast(d).count() << "ms, >>" << _msPerBatch << "ms."; + m_batchSize = max(128, m_batchSize * 9 / 10); + cerr << "New batch size" << m_batchSize; + } + else if (d < chrono::milliseconds(_msPerBatch * 9 / 10)) + { + cerr << "Batch of" << m_batchSize << "took" << chrono::duration_cast(d).count() << "ms, <<" << _msPerBatch << "ms."; + m_batchSize = m_batchSize * 10 / 9; + cerr << "New batch size" << m_batchSize; + }*/ } // not safe to return until this is ready diff --git a/libethcore/Common.h b/libethcore/Common.h index 25a6a8e1d..732d09981 100644 --- a/libethcore/Common.h +++ b/libethcore/Common.h @@ -112,6 +112,7 @@ enum class ImportResult AlreadyInChain, AlreadyKnown, Malformed, + OverbidGasPrice, BadChain }; diff --git a/libethereum/BlockChain.cpp b/libethereum/BlockChain.cpp index 7abf9316e..9bf89665a 100644 --- a/libethereum/BlockChain.cpp +++ b/libethereum/BlockChain.cpp @@ -400,7 +400,6 @@ ImportRoute BlockChain::import(bytes const& _block, OverlayDB const& _db, Import // clog(BlockChainNote) << " Malformed block: " << diagnostic_information(ex); ex << errinfo_phase(2); ex << errinfo_now(time(0)); - ex << errinfo_block(_block); throw; } #endif @@ -1127,6 +1126,8 @@ VerifiedBlockRef BlockChain::verifyBlock(bytes const& _block, function(_ex); if (!block) { - cwarn << "ODD: onBadBlock called but exception has no block in it."; + cwarn << "ODD: onBadBlock called but exception (" << _ex.what() << ") has no block in it."; + cwarn << boost::diagnostic_information(_ex, true); return; } diff --git a/libethereum/TransactionQueue.cpp b/libethereum/TransactionQueue.cpp index a86f6abf3..8931ee218 100644 --- a/libethereum/TransactionQueue.cpp +++ b/libethereum/TransactionQueue.cpp @@ -113,6 +113,26 @@ ImportResult TransactionQueue::manageImport_WITH_LOCK(h256 const& _h, Transactio // If it doesn't work, the signature is bad. // The transaction's nonce may yet be invalid (or, it could be "valid" but we may be missing a marginally older transaction). + auto r = m_senders.equal_range(_transaction.from()); + for (auto it = r.first; it != r.second; ++it) + if (m_current.count(it->second) && m_current[it->second].nonce() == _transaction.nonce()) + if (_transaction.gasPrice() < m_current[it->second].gasPrice()) + return ImportResult::OverbidGasPrice; + else + { + remove_WITH_LOCK(it->second); + break; + } + else if (m_future.count(it->second) && m_future[it->second].nonce() == _transaction.nonce()) + if (_transaction.gasPrice() < m_future[it->second].gasPrice()) + return ImportResult::OverbidGasPrice; + else + { + remove_WITH_LOCK(it->second); + break; + } + else {} + // If valid, append to blocks. insertCurrent_WITH_LOCK(make_pair(_h, _transaction)); m_known.insert(_h); diff --git a/libp2p/Session.cpp b/libp2p/Session.cpp index 0c35293f6..1268b865c 100644 --- a/libp2p/Session.cpp +++ b/libp2p/Session.cpp @@ -212,11 +212,12 @@ bool Session::interpret(PacketType _t, RLP const& _r) } case PongPacket: DEV_GUARDED(x_info) - m_info.lastPing = std::chrono::steady_clock::now() - m_ping; - clog(NetTriviaSummary) << "Latency: " << chrono::duration_cast(m_info.lastPing).count() << " ms"; + { + m_info.lastPing = std::chrono::steady_clock::now() - m_ping; + clog(NetTriviaSummary) << "Latency: " << chrono::duration_cast(m_info.lastPing).count() << " ms"; + } break; case GetPeersPacket: - break; case PeersPacket: break; default: diff --git a/libsolidity/AST.cpp b/libsolidity/AST.cpp index 7333c024a..09af49c67 100644 --- a/libsolidity/AST.cpp +++ b/libsolidity/AST.cpp @@ -340,8 +340,10 @@ vector, FunctionTypePointer>> const& ContractDefinition::getIn { for (ASTPointer const& f: contract->getDefinedFunctions()) { + if (!f->isPartOfExternalInterface()) + continue; string functionSignature = f->externalSignature(); - if (f->isPartOfExternalInterface() && signaturesSeen.count(functionSignature) == 0) + if (signaturesSeen.count(functionSignature) == 0) { functionsSeen.insert(f->getName()); signaturesSeen.insert(functionSignature); diff --git a/libsolidity/ArrayUtils.cpp b/libsolidity/ArrayUtils.cpp index 3be12af72..f13b28173 100644 --- a/libsolidity/ArrayUtils.cpp +++ b/libsolidity/ArrayUtils.cpp @@ -134,14 +134,14 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons if (sourceBaseType->getCategory() == Type::Category::Array) { solAssert(byteOffsetSize == 0, "Byte offset for array as base type."); + auto const& sourceBaseArrayType = dynamic_cast(*sourceBaseType); m_context << eth::Instruction::DUP3; if (sourceIsStorage) m_context << u256(0); + else if (sourceBaseArrayType.location() == DataLocation::Memory) + m_context << eth::Instruction::MLOAD; m_context << eth::dupInstruction(sourceIsStorage ? 4 : 3) << u256(0); - copyArrayToStorage( - dynamic_cast(*targetBaseType), - dynamic_cast(*sourceBaseType) - ); + copyArrayToStorage(dynamic_cast(*targetBaseType), sourceBaseArrayType); m_context << eth::Instruction::POP << eth::Instruction::POP; } else if (directCopy) @@ -188,11 +188,18 @@ void ArrayUtils::copyArrayToStorage(ArrayType const& _targetType, ArrayType cons if (haveByteOffsetSource) incrementByteOffset(sourceBaseType->getStorageBytes(), 1, haveByteOffsetTarget ? 5 : 4); else + { + m_context << eth::swapInstruction(2 + byteOffsetSize); + if (sourceIsStorage) + m_context << sourceBaseType->getStorageSize(); + else if (_sourceType.location() == DataLocation::Memory) + m_context << sourceBaseType->memoryHeadSize(); + else + m_context << sourceBaseType->getCalldataEncodedSize(true); m_context - << eth::swapInstruction(2 + byteOffsetSize) - << (sourceIsStorage ? sourceBaseType->getStorageSize() : sourceBaseType->getCalldataEncodedSize()) << eth::Instruction::ADD << eth::swapInstruction(2 + byteOffsetSize); + } // increment target if (haveByteOffsetTarget) incrementByteOffset(targetBaseType->getStorageBytes(), byteOffsetSize, byteOffsetSize + 2); @@ -235,8 +242,9 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord { solAssert( _sourceType.getBaseType()->getCalldataEncodedSize() > 0, - "Nested arrays not yet implemented here." + "Nested dynamic arrays not implemented here." ); + CompilerUtils utils(m_context); unsigned baseSize = 1; if (!_sourceType.isByteArray()) // We always pad the elements, regardless of _padToWordBoundaries. @@ -246,7 +254,7 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord { if (!_sourceType.isDynamicallySized()) m_context << _sourceType.getLength(); - if (_sourceType.getBaseType()->getCalldataEncodedSize() > 1) + if (baseSize > 1) m_context << u256(baseSize) << eth::Instruction::MUL; // stack: target source_offset source_len m_context << eth::Instruction::DUP1 << eth::Instruction::DUP3 << eth::Instruction::DUP5; @@ -257,8 +265,36 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord } else if (_sourceType.location() == DataLocation::Memory) { - // memcpy using the built-in contract retrieveLength(_sourceType); + // stack: target source length + if (!_sourceType.getBaseType()->isValueType()) + { + // copy using a loop + m_context << u256(0) << eth::Instruction::SWAP3; + // stack: counter source length target + auto repeat = m_context.newTag(); + m_context << repeat; + m_context << eth::Instruction::DUP2 << eth::Instruction::DUP5; + m_context << eth::Instruction::LT << eth::Instruction::ISZERO; + auto loopEnd = m_context.appendConditionalJump(); + m_context << eth::Instruction::DUP3 << eth::Instruction::DUP5; + accessIndex(_sourceType, false); + MemoryItem(m_context, *_sourceType.getBaseType(), true).retrieveValue(SourceLocation(), true); + if (auto baseArray = dynamic_cast(_sourceType.getBaseType().get())) + copyArrayToMemory(*baseArray, _padToWordBoundaries); + else + utils.storeInMemoryDynamic(*_sourceType.getBaseType()); + m_context << eth::Instruction::SWAP3 << u256(1) << eth::Instruction::ADD; + m_context << eth::Instruction::SWAP3; + m_context.appendJumpTo(repeat); + m_context << loopEnd; + m_context << eth::Instruction::SWAP3; + utils.popStackSlots(3); + // stack: updated_target_pos + return; + } + + // memcpy using the built-in contract if (_sourceType.isDynamicallySized()) { // change pointer to data part @@ -271,7 +307,7 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord // stack: //@TODO do not use ::CALL if less than 32 bytes? m_context << eth::Instruction::DUP1 << eth::Instruction::DUP4 << eth::Instruction::DUP4; - CompilerUtils(m_context).memoryCopy(); + utils.memoryCopy(); m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; // stack: @@ -345,7 +381,7 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord { // actual array data is stored at SHA3(storage_offset) m_context << eth::Instruction::SWAP1; - CompilerUtils(m_context).computeHashStatic(); + utils.computeHashStatic(); m_context << eth::Instruction::SWAP1; } @@ -375,7 +411,10 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord else m_context << eth::Instruction::DUP2 << u256(0); StorageItem(m_context, *_sourceType.getBaseType()).retrieveValue(SourceLocation(), true); - CompilerUtils(m_context).storeInMemoryDynamic(*_sourceType.getBaseType()); + if (auto baseArray = dynamic_cast(_sourceType.getBaseType().get())) + copyArrayToMemory(*baseArray, _padToWordBoundaries); + else + utils.storeInMemoryDynamic(*_sourceType.getBaseType()); // increment storage_data_offset and byte offset if (haveByteOffset) incrementByteOffset(storageBytes, 2, 3); @@ -387,7 +426,8 @@ void ArrayUtils::copyArrayToMemory(const ArrayType& _sourceType, bool _padToWord } } // check for loop condition - m_context << eth::Instruction::DUP1 << eth::dupInstruction(haveByteOffset ? 5 : 4) << eth::Instruction::GT; + m_context << eth::Instruction::DUP1 << eth::dupInstruction(haveByteOffset ? 5 : 4); + m_context << eth::Instruction::GT; m_context.appendConditionalJumpTo(loopStart); // stack here: memory_end_offset storage_data_offset [storage_byte_offset] memory_offset if (haveByteOffset) @@ -597,12 +637,14 @@ void ArrayUtils::convertLengthToSize(ArrayType const& _arrayType, bool _pad) con } else { - solAssert( - _arrayType.getBaseType()->getCalldataEncodedSize() > 0, - "Copying nested dynamic arrays not yet implemented." - ); if (!_arrayType.isByteArray()) - m_context << _arrayType.getBaseType()->getCalldataEncodedSize() << eth::Instruction::MUL; + { + if (_arrayType.location() == DataLocation::Memory) + m_context << _arrayType.getBaseType()->memoryHeadSize(); + else + m_context << _arrayType.getBaseType()->getCalldataEncodedSize(); + m_context << eth::Instruction::MUL; + } else if (_pad) m_context << u256(31) << eth::Instruction::ADD << u256(32) << eth::Instruction::DUP1 @@ -632,7 +674,7 @@ void ArrayUtils::retrieveLength(ArrayType const& _arrayType) const } } -void ArrayUtils::accessIndex(ArrayType const& _arrayType) const +void ArrayUtils::accessIndex(ArrayType const& _arrayType, bool _doBoundsCheck) const { DataLocation location = _arrayType.location(); eth::Instruction load = @@ -640,19 +682,25 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const location == DataLocation::Memory ? eth::Instruction::MLOAD : eth::Instruction::CALLDATALOAD; - // retrieve length - if (!_arrayType.isDynamicallySized()) - m_context << _arrayType.getLength(); - else if (location == DataLocation::CallData) - // length is stored on the stack - m_context << eth::Instruction::SWAP1; - else - m_context << eth::Instruction::DUP2 << load; - // stack: - // check out-of-bounds access - m_context << eth::Instruction::DUP2 << eth::Instruction::LT << eth::Instruction::ISZERO; - // out-of-bounds access throws exception - m_context.appendConditionalJumpTo(m_context.errorTag()); + if (_doBoundsCheck) + { + // retrieve length + if (!_arrayType.isDynamicallySized()) + m_context << _arrayType.getLength(); + else if (location == DataLocation::CallData) + // length is stored on the stack + m_context << eth::Instruction::SWAP1; + else + m_context << eth::Instruction::DUP2 << load; + // stack: + // check out-of-bounds access + m_context << eth::Instruction::DUP2 << eth::Instruction::LT << eth::Instruction::ISZERO; + // out-of-bounds access throws exception + m_context.appendConditionalJumpTo(m_context.errorTag()); + } + else if (location == DataLocation::CallData && _arrayType.isDynamicallySized()) + // remove length if present + m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; // stack: m_context << eth::Instruction::SWAP1; @@ -671,18 +719,13 @@ void ArrayUtils::accessIndex(ArrayType const& _arrayType) const if (!_arrayType.isByteArray()) { m_context << eth::Instruction::SWAP1; - m_context << _arrayType.getBaseType()->getCalldataEncodedSize() << eth::Instruction::MUL; + if (location == DataLocation::CallData) + m_context << _arrayType.getBaseType()->getCalldataEncodedSize(); + else + m_context << u256(_arrayType.memoryHeadSize()); + m_context << eth::Instruction::MUL; } m_context << eth::Instruction::ADD; - //@todo we should also load if it is a reference type of dynamic length - // but we should apply special logic if we load from calldata. - if (_arrayType.getBaseType()->isValueType()) - CompilerUtils(m_context).loadFromMemoryDynamic( - *_arrayType.getBaseType(), - location == DataLocation::CallData, - !_arrayType.isByteArray(), - false - ); break; case DataLocation::Storage: m_context << eth::Instruction::SWAP1; diff --git a/libsolidity/ArrayUtils.h b/libsolidity/ArrayUtils.h index 8d56f3c8f..c047fdcc0 100644 --- a/libsolidity/ArrayUtils.h +++ b/libsolidity/ArrayUtils.h @@ -44,7 +44,11 @@ public: /// Stack pre: source_reference [source_byte_offset/source_length] target_reference target_byte_offset /// Stack post: target_reference target_byte_offset void copyArrayToStorage(ArrayType const& _targetType, ArrayType const& _sourceType) const; - /// Copies an array (which cannot be dynamically nested) from anywhere to memory. + /// Copies the data part of an array (which cannot be dynamically nested) from anywhere + /// to a given position in memory. + /// This always copies contained data as is (i.e. structs and fixed-size arrays are copied in + /// place as required by the ABI encoding). Use CompilerUtils::convertType if you want real + /// memory copies of nested arrays. /// Stack pre: memory_offset source_item /// Stack post: memory_offest + length(padded) void copyArrayToMemory(ArrayType const& _sourceType, bool _padToWordBoundaries = true) const; @@ -74,12 +78,11 @@ public: /// Stack pre: reference (excludes byte offset for dynamic storage arrays) /// Stack post: reference length void retrieveLength(ArrayType const& _arrayType) const; - /// Retrieves the value at a specific index. If the location is storage, only retrieves the - /// position. + /// Performs bounds checking and returns a reference on the stack. /// Stack pre: reference [length] index - /// Stack post for storage: slot byte_offset - /// Stack post for calldata: value - void accessIndex(ArrayType const& _arrayType) const; + /// Stack post (storage): storage_slot byte_offset + /// Stack post: memory/calldata_offset + void accessIndex(ArrayType const& _arrayType, bool _doBoundsCheck = true) const; private: /// Adds the given number of bytes to a storage byte offset counter and also increments diff --git a/libsolidity/Compiler.cpp b/libsolidity/Compiler.cpp index b05a7a9b1..6ed6480f2 100644 --- a/libsolidity/Compiler.cpp +++ b/libsolidity/Compiler.cpp @@ -261,7 +261,7 @@ void Compiler::appendCalldataUnpacker( { // We do not check the calldata size, everything is zero-paddedd - //@todo this does not yet support nested arrays + //@todo this does not yet support nested dynamic arrays if (_startOffset == u256(-1)) _startOffset = u256(CompilerUtils::dataStartOffset); @@ -279,6 +279,12 @@ void Compiler::appendCalldataUnpacker( solAssert(!arrayType.getBaseType()->isDynamicallySized(), "Nested arrays not yet implemented."); if (_fromMemory) { + solAssert( + arrayType.getBaseType()->isValueType(), + "Nested memory arrays not yet implemented here." + ); + // @todo If base type is an array or struct, it is still calldata-style encoded, so + // we would have to convert it like below. solAssert(arrayType.location() == DataLocation::Memory, ""); // compute data pointer m_context << eth::Instruction::DUP1 << eth::Instruction::MLOAD; @@ -311,6 +317,7 @@ void Compiler::appendCalldataUnpacker( } if (arrayType.location() == DataLocation::Memory) { + // stack: calldata_ref [length] next_calldata // copy to memory // move calldata type up again CompilerUtils(m_context).moveIntoStack(calldataType->getSizeOnStack()); @@ -657,7 +664,7 @@ void Compiler::appendStackVariableInitialisation(VariableDeclaration const& _var { CompilerContext::LocationSetter location(m_context, _variable); m_context.addVariable(_variable); - ExpressionCompiler(m_context).appendStackVariableInitialisation(*_variable.getType()); + CompilerUtils(m_context).pushZeroValue(*_variable.getType()); } void Compiler::compileExpression(Expression const& _expression, TypePointer const& _targetType) diff --git a/libsolidity/CompilerUtils.cpp b/libsolidity/CompilerUtils.cpp index 47a9a3542..208d7cecc 100644 --- a/libsolidity/CompilerUtils.cpp +++ b/libsolidity/CompilerUtils.cpp @@ -54,6 +54,13 @@ void CompilerUtils::storeFreeMemoryPointer() m_context << u256(freeMemoryPointer) << eth::Instruction::MSTORE; } +void CompilerUtils::allocateMemory() +{ + fetchFreeMemoryPointer(); + m_context << eth::Instruction::SWAP1 << eth::Instruction::DUP2 << eth::Instruction::ADD; + storeFreeMemoryPointer(); +} + void CompilerUtils::toSizeAfterFreeMemoryPointer() { fetchFreeMemoryPointer(); @@ -101,17 +108,20 @@ void CompilerUtils::storeInMemory(unsigned _offset) void CompilerUtils::storeInMemoryDynamic(Type const& _type, bool _padToWordBoundaries) { - if (_type.getCategory() == Type::Category::Array) - ArrayUtils(m_context).copyArrayToMemory( - dynamic_cast(_type), - _padToWordBoundaries - ); + if (auto ref = dynamic_cast(&_type)) + { + solAssert(ref->location() == DataLocation::Memory, ""); + storeInMemoryDynamic(IntegerType(256), _padToWordBoundaries); + } else { unsigned numBytes = prepareMemoryStore(_type, _padToWordBoundaries); if (numBytes > 0) { - solAssert(_type.getSizeOnStack() == 1, "Memory store of types with stack size != 1 not implemented."); + solAssert( + _type.getSizeOnStack() == 1, + "Memory store of types with stack size != 1 not implemented." + ); m_context << eth::Instruction::DUP2 << eth::Instruction::MSTORE; m_context << u256(numBytes) << eth::Instruction::ADD; } @@ -164,7 +174,10 @@ void CompilerUtils::encodeToMemory( type = _givenTypes[i]; // delay conversion else convertType(*_givenTypes[i], *targetType, true); - storeInMemoryDynamic(*type, _padToWordBoundaries); + if (auto arrayType = dynamic_cast(type.get())) + ArrayUtils(m_context).copyArrayToMemory(*arrayType, _padToWordBoundaries); + else + storeInMemoryDynamic(*type, _padToWordBoundaries); } stackPos += _givenTypes[i]->getSizeOnStack(); } @@ -207,7 +220,7 @@ void CompilerUtils::encodeToMemory( m_context << eth::swapInstruction(arrayType.getSizeOnStack() + 1) << eth::Instruction::POP; // stack: ... // copy data part - storeInMemoryDynamic(arrayType, true); + ArrayUtils(m_context).copyArrayToMemory(arrayType, _padToWordBoundaries); // stack: ... thisDynPointer++; @@ -349,63 +362,64 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp { // stack: (variably sized) unsigned stackSize = typeOnStack.getSizeOnStack(); - fetchFreeMemoryPointer(); - moveIntoStack(stackSize); - // stack: (variably sized) - if (targetType.isDynamicallySized()) + bool fromStorage = (typeOnStack.location() == DataLocation::Storage); + if (fromStorage) { - bool fromStorage = (typeOnStack.location() == DataLocation::Storage); - // store length - if (fromStorage) - { - stackSize--; - // remove storage offset, as requested by ArrayUtils::retrieveLength - m_context << eth::Instruction::POP; - } - ArrayUtils(m_context).retrieveLength(typeOnStack); - // Stack: - m_context << eth::dupInstruction(2 + stackSize) << eth::Instruction::MSTORE; - m_context << eth::dupInstruction(1 + stackSize) << u256(0x20); - m_context << eth::Instruction::ADD; - moveIntoStack(stackSize); - if (fromStorage) - { - m_context << u256(0); - stackSize++; - } + stackSize--; + // remove storage offset, as requested by ArrayUtils::retrieveLength + m_context << eth::Instruction::POP; } - else + ArrayUtils(m_context).retrieveLength(typeOnStack); + + // allocate memory + // stack: (variably sized) + m_context << eth::Instruction::DUP1; + ArrayUtils(m_context).convertLengthToSize(targetType, true); + // stack: (variably sized) + if (targetType.isDynamicallySized()) + m_context << u256(0x20) << eth::Instruction::ADD; + allocateMemory(); + // stack: (variably sized) + m_context << eth::Instruction::DUP1; + moveIntoStack(2 + stackSize); + if (targetType.isDynamicallySized()) { - m_context << eth::dupInstruction(1 + stackSize); - moveIntoStack(stackSize); + m_context << eth::Instruction::DUP2; + storeInMemoryDynamic(IntegerType(256)); } - // Stack: - // Store data part. - storeInMemoryDynamic(typeOnStack); - // Stack - storeFreeMemoryPointer(); - } - else if (typeOnStack.location() == DataLocation::CallData) - { - // Stack: [] - // length is present if dynamically sized - fetchFreeMemoryPointer(); - moveIntoStack(typeOnStack.getSizeOnStack()); - // stack: memptr calldataoffset [] - if (typeOnStack.isDynamicallySized()) + // stack: (variably sized) + if (targetType.getBaseType()->isValueType()) { - solAssert(targetType.isDynamicallySized(), ""); - m_context << eth::Instruction::DUP3 << eth::Instruction::DUP2; - storeInMemoryDynamic(IntegerType(256)); - moveIntoStack(typeOnStack.getSizeOnStack()); + solAssert(typeOnStack.getBaseType()->isValueType(), ""); + copyToStackTop(2 + stackSize, stackSize); + if (fromStorage) + m_context << u256(0); // add byte offset again + ArrayUtils(m_context).copyArrayToMemory(typeOnStack); } else - m_context << eth::Instruction::DUP2 << eth::Instruction::SWAP1; - // stack: mem_ptr mem_data_ptr calldataoffset [] - storeInMemoryDynamic(typeOnStack); - storeFreeMemoryPointer(); + { + m_context << u256(0) << eth::Instruction::SWAP1; + // stack: (variably sized) + auto repeat = m_context.newTag(); + m_context << repeat; + m_context << eth::Instruction::DUP3 << eth::Instruction::DUP3; + m_context << eth::Instruction::LT << eth::Instruction::ISZERO; + auto loopEnd = m_context.appendConditionalJump(); + copyToStackTop(3 + stackSize, stackSize); + copyToStackTop(2 + stackSize, 1); + ArrayUtils(m_context).accessIndex(typeOnStack, false); + convertType(*typeOnStack.getBaseType(), *targetType.getBaseType(), _cleanupNeeded); + storeInMemoryDynamic(*targetType.getBaseType(), true); + m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::ADD; + m_context << eth::Instruction::SWAP1; + m_context.appendJumpTo(repeat); + m_context << loopEnd; + m_context << eth::Instruction::POP; + } + // stack: (variably sized) + popStackSlots(2 + stackSize); + // Stack: } - // nothing to do for memory to memory break; } default: @@ -444,6 +458,57 @@ void CompilerUtils::convertType(Type const& _typeOnStack, Type const& _targetTyp } } +void CompilerUtils::pushZeroValue(const Type& _type) +{ + auto const* referenceType = dynamic_cast(&_type); + if (!referenceType || referenceType->location() == DataLocation::Storage) + { + for (size_t i = 0; i < _type.getSizeOnStack(); ++i) + m_context << u256(0); + return; + } + solAssert(referenceType->location() == DataLocation::Memory, ""); + + m_context << u256(max(32u, _type.getCalldataEncodedSize())); + allocateMemory(); + m_context << eth::Instruction::DUP1; + + if (auto structType = dynamic_cast(&_type)) + for (auto const& member: structType->getMembers()) + { + pushZeroValue(*member.type); + storeInMemoryDynamic(*member.type); + } + else if (auto arrayType = dynamic_cast(&_type)) + { + if (arrayType->isDynamicallySized()) + { + // zero length + m_context << u256(0); + storeInMemoryDynamic(IntegerType(256)); + } + else if (arrayType->getLength() > 0) + { + m_context << arrayType->getLength() << eth::Instruction::SWAP1; + // stack: items_to_do memory_pos + auto repeat = m_context.newTag(); + m_context << repeat; + pushZeroValue(*arrayType->getBaseType()); + storeInMemoryDynamic(*arrayType->getBaseType()); + m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::SWAP1; + m_context << eth::Instruction::SUB << eth::Instruction::SWAP1; + m_context << eth::Instruction::DUP2; + m_context.appendConditionalJumpTo(repeat); + m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; + } + } + else + solAssert(false, "Requested initialisation for unknown type: " + _type.toString()); + + // remove the updated memory pointer + m_context << eth::Instruction::POP; +} + void CompilerUtils::moveToStackVariable(VariableDeclaration const& _variable) { unsigned const stackPosition = m_context.baseToCurrentStackOffset(m_context.getBaseStackOffsetOfVariable(_variable)); diff --git a/libsolidity/CompilerUtils.h b/libsolidity/CompilerUtils.h index a9e07f74f..7dd44da8f 100644 --- a/libsolidity/CompilerUtils.h +++ b/libsolidity/CompilerUtils.h @@ -41,6 +41,10 @@ public: void fetchFreeMemoryPointer(); /// Stores the free memory pointer from the stack. void storeFreeMemoryPointer(); + /// Allocates a number of bytes in memory as given on the stack. + /// Stack pre: + /// Stack post: + void allocateMemory(); /// Appends code that transforms memptr to (memptr - free_memptr) memptr void toSizeAfterFreeMemoryPointer(); @@ -70,7 +74,8 @@ public: /// @param _type type of the data on the stack void storeInMemory(unsigned _offset); /// Dynamic version of @see storeInMemory, expects the memory offset below the value on the stack - /// and also updates that. For arrays, only copies the data part. + /// and also updates that. For reference types, only copies the data pointer. Fails for + /// non-memory-references. /// @param _padToWordBoundaries if true, adds zeros to pad to multiple of 32 bytes. Array elements /// are always padded (except for byte arrays), regardless of this parameter. /// Stack pre: memory_offset value... @@ -107,6 +112,10 @@ public: /// necessary. void convertType(Type const& _typeOnStack, Type const& _targetType, bool _cleanupNeeded = false); + /// Creates a zero-value for the given type and puts it onto the stack. This might allocate + /// memory for memory references. + void pushZeroValue(Type const& _type); + /// Moves the value that is at the top of the stack to a stack variable. void moveToStackVariable(VariableDeclaration const& _variable); /// Copies an item that occupies @a _itemSize stack slots from a stack depth of @a _stackDepth diff --git a/libsolidity/ExpressionCompiler.cpp b/libsolidity/ExpressionCompiler.cpp index fb10eb83b..7ddcc0318 100644 --- a/libsolidity/ExpressionCompiler.cpp +++ b/libsolidity/ExpressionCompiler.cpp @@ -56,62 +56,6 @@ void ExpressionCompiler::appendStateVariableInitialization(VariableDeclaration c StorageItem(m_context, _varDecl).storeValue(*_varDecl.getType(), _varDecl.getLocation(), true); } -void ExpressionCompiler::appendStackVariableInitialisation(Type const& _type, bool _toMemory) -{ - CompilerUtils utils(m_context); - auto const* referenceType = dynamic_cast(&_type); - if (!referenceType || referenceType->location() == DataLocation::Storage) - { - for (size_t i = 0; i < _type.getSizeOnStack(); ++i) - m_context << u256(0); - if (_toMemory) - utils.storeInMemoryDynamic(_type); - return; - } - solAssert(referenceType->location() == DataLocation::Memory, ""); - if (!_toMemory) - { - // allocate memory - utils.fetchFreeMemoryPointer(); - m_context << eth::Instruction::DUP1 << u256(max(32u, _type.getCalldataEncodedSize())); - m_context << eth::Instruction::ADD; - utils.storeFreeMemoryPointer(); - m_context << eth::Instruction::DUP1; - } - - if (auto structType = dynamic_cast(&_type)) - for (auto const& member: structType->getMembers()) - appendStackVariableInitialisation(*member.type, true); - else if (auto arrayType = dynamic_cast(&_type)) - { - if (arrayType->isDynamicallySized()) - { - // zero length - m_context << u256(0); - CompilerUtils(m_context).storeInMemoryDynamic(IntegerType(256)); - } - else if (arrayType->getLength() > 0) - { - m_context << arrayType->getLength() << eth::Instruction::SWAP1; - // stack: items_to_do memory_pos - auto repeat = m_context.newTag(); - m_context << repeat; - appendStackVariableInitialisation(*arrayType->getBaseType(), true); - m_context << eth::Instruction::SWAP1 << u256(1) << eth::Instruction::SWAP1; - m_context << eth::Instruction::SUB << eth::Instruction::SWAP1; - m_context << eth::Instruction::DUP2; - m_context.appendConditionalJumpTo(repeat); - m_context << eth::Instruction::SWAP1 << eth::Instruction::POP; - } - } - else - solAssert(false, "Requested initialisation for unknown type: " + _type.toString()); - - if (!_toMemory) - // remove the updated memory pointer - m_context << eth::Instruction::POP; -} - void ExpressionCompiler::appendStateVariableAccessor(VariableDeclaration const& _varDecl) { CompilerContext::LocationSetter locationSetter(m_context, _varDecl); @@ -211,6 +155,8 @@ bool ExpressionCompiler::visit(Assignment const& _assignment) TypePointer type = _assignment.getRightHandSide().getType(); if (!_assignment.getType()->dataStoredIn(DataLocation::Storage)) { + //@todo we should delay conversion here if RHS is not in memory, LHS is a MemoryItem + // and not dynamically-sized. utils().convertType(*type, *_assignment.getType()); type = _assignment.getType(); } @@ -827,8 +773,9 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess) _indexAccess.getIndexExpression()->accept(*this); // stack layout: [] ArrayUtils(m_context).accessIndex(arrayType); - if (arrayType.location() == DataLocation::Storage) + switch (arrayType.location()) { + case DataLocation::Storage: if (arrayType.isByteArray()) { solAssert(!arrayType.isString(), "Index access to string is not allowed."); @@ -836,6 +783,21 @@ bool ExpressionCompiler::visit(IndexAccess const& _indexAccess) } else setLValueToStorageItem(_indexAccess); + break; + case DataLocation::Memory: + setLValue(_indexAccess, *_indexAccess.getType(), !arrayType.isByteArray()); + break; + case DataLocation::CallData: + //@todo if we implement this, the value in calldata has to be added to the base offset + solAssert(!arrayType.getBaseType()->isDynamicallySized(), "Nested arrays not yet implemented."); + if (arrayType.getBaseType()->isValueType()) + CompilerUtils(m_context).loadFromMemoryDynamic( + *arrayType.getBaseType(), + true, + !arrayType.isByteArray(), + false + ); + break; } } else diff --git a/libsolidity/ExpressionCompiler.h b/libsolidity/ExpressionCompiler.h index 747e241ef..642560c64 100644 --- a/libsolidity/ExpressionCompiler.h +++ b/libsolidity/ExpressionCompiler.h @@ -64,13 +64,6 @@ public: /// Appends code to set a state variable to its initial value/expression. void appendStateVariableInitialization(VariableDeclaration const& _varDecl); - /// Appends code to initialise a local variable. - /// If @a _toMemory is false, leaves the value on the stack. For memory references, this - /// allocates new memory. - /// If @a _toMemory is true, directly stores the data in the memory pos on the stack and - /// updates it. - void appendStackVariableInitialisation(Type const& _type, bool _toMemory = false); - /// Appends code for a State Variable accessor function void appendStateVariableAccessor(VariableDeclaration const& _varDecl); diff --git a/libsolidity/LValue.cpp b/libsolidity/LValue.cpp index 1acf0a3e8..cf640e910 100644 --- a/libsolidity/LValue.cpp +++ b/libsolidity/LValue.cpp @@ -82,6 +82,62 @@ void StackVariable::setToZero(SourceLocation const& _location, bool) const << eth::Instruction::POP; } +MemoryItem::MemoryItem(CompilerContext& _compilerContext, Type const& _type, bool _padded): + LValue(_compilerContext, _type), + m_padded(_padded) +{ +} + +void MemoryItem::retrieveValue(SourceLocation const&, bool _remove) const +{ + if (m_dataType.isValueType()) + { + if (!_remove) + m_context << eth::Instruction::DUP1; + CompilerUtils(m_context).loadFromMemoryDynamic(m_dataType, false, m_padded, false); + } + else + m_context << eth::Instruction::MLOAD; +} + +void MemoryItem::storeValue(Type const& _sourceType, SourceLocation const&, bool _move) const +{ + CompilerUtils utils(m_context); + if (m_dataType.isValueType()) + { + solAssert(_sourceType.isValueType(), ""); + utils.moveIntoStack(_sourceType.getSizeOnStack()); + utils.convertType(_sourceType, m_dataType, true); + if (!_move) + { + utils.moveToStackTop(m_dataType.getSizeOnStack()); + utils.copyToStackTop(2, m_dataType.getSizeOnStack()); + } + utils.storeInMemoryDynamic(m_dataType, m_padded); + m_context << eth::Instruction::POP; + } + else + { + solAssert(_sourceType == m_dataType, "Conversion not implemented for assignment to memory."); + + solAssert(m_dataType.getSizeOnStack() == 1, ""); + if (!_move) + m_context << eth::Instruction::DUP2 << eth::Instruction::SWAP1; + // stack: [value] value lvalue + // only store the reference + m_context << eth::Instruction::MSTORE; + } +} + +void MemoryItem::setToZero(SourceLocation const&, bool _removeReference) const +{ + CompilerUtils utils(m_context); + if (!_removeReference) + m_context << eth::Instruction::DUP1; + utils.pushZeroValue(m_dataType); + utils.storeInMemoryDynamic(m_dataType, m_padded); + m_context << eth::Instruction::POP; +} StorageItem::StorageItem(CompilerContext& _compilerContext, Declaration const& _declaration): StorageItem(_compilerContext, *_declaration.getType()) diff --git a/libsolidity/LValue.h b/libsolidity/LValue.h index 726d63328..882b3626e 100644 --- a/libsolidity/LValue.h +++ b/libsolidity/LValue.h @@ -97,6 +97,29 @@ private: unsigned m_size; }; +/** + * Reference to some item in memory. + */ +class MemoryItem: public LValue +{ +public: + MemoryItem(CompilerContext& _compilerContext, Type const& _type, bool _padded); + virtual unsigned sizeOnStack() const override { return 1; } + virtual void retrieveValue(SourceLocation const& _location, bool _remove = false) const override; + virtual void storeValue( + Type const& _sourceType, + SourceLocation const& _location = SourceLocation(), + bool _move = false + ) const override; + virtual void setToZero( + SourceLocation const& _location = SourceLocation(), + bool _removeReference = true + ) const override; +private: + /// Special flag to deal with byte array elements. + bool m_padded = false; +}; + /** * Reference to some item in storage. On the stack this is , * where 0 <= offset_inside_value < 32 and an offset of i means that the value is multiplied diff --git a/libsolidity/Types.cpp b/libsolidity/Types.cpp index 01876b5a7..15c367421 100644 --- a/libsolidity/Types.cpp +++ b/libsolidity/Types.cpp @@ -827,15 +827,16 @@ TypePointer ArrayType::externalType() const { if (m_arrayKind != ArrayKind::Ordinary) return this->copyForLocation(DataLocation::Memory, true); - if (!m_baseType->externalType()) + TypePointer baseExt = m_baseType->externalType(); + if (!baseExt) return TypePointer(); if (m_baseType->getCategory() == Category::Array && m_baseType->isDynamicallySized()) return TypePointer(); if (isDynamicallySized()) - return std::make_shared(DataLocation::Memory, m_baseType->externalType()); + return std::make_shared(DataLocation::Memory, baseExt); else - return std::make_shared(DataLocation::Memory, m_baseType->externalType(), m_length); + return std::make_shared(DataLocation::Memory, baseExt, m_length); } TypePointer ArrayType::copyForLocation(DataLocation _location, bool _isPointer) const @@ -1268,15 +1269,17 @@ FunctionTypePointer FunctionType::externalFunctionType() const for (auto type: m_parameterTypes) { - if (!type->externalType()) + if (auto ext = type->externalType()) + paramTypes.push_back(ext); + else return FunctionTypePointer(); - paramTypes.push_back(type->externalType()); } for (auto type: m_returnParameterTypes) { - if (!type->externalType()) + if (auto ext = type->externalType()) + retParamTypes.push_back(ext); + else return FunctionTypePointer(); - retParamTypes.push_back(type->externalType()); } return make_shared(paramTypes, retParamTypes, m_parameterNames, m_returnParameterNames, m_location, m_arbitraryParameters); } diff --git a/libsolidity/Types.h b/libsolidity/Types.h index 9d412cd68..9d9aaaba7 100644 --- a/libsolidity/Types.h +++ b/libsolidity/Types.h @@ -179,6 +179,9 @@ public: /// is not a simple big-endian encoding or the type cannot be stored in calldata. /// If @a _padded then it is assumed that each element is padded to a multiple of 32 bytes. virtual unsigned getCalldataEncodedSize(bool _padded) const { (void)_padded; return 0; } + /// @returns the size of this data type in bytes when stored in memory. For memory-reference + /// types, this is the size of the memory pointer. + virtual unsigned memoryHeadSize() const { return getCalldataEncodedSize(); } /// Convenience version of @see getCalldataEncodedSize(bool) unsigned getCalldataEncodedSize() const { return getCalldataEncodedSize(true); } /// @returns true if the type is dynamically encoded in calldata @@ -373,6 +376,8 @@ public: explicit ReferenceType(DataLocation _location): m_location(_location) {} DataLocation location() const { return m_location; } + virtual unsigned memoryHeadSize() const override { return 32; } + /// @returns a copy of this type with location (recursively) changed to @a _location, /// whereas isPointer is only shallowly changed - the deep copy is always a bound reference. virtual TypePointer copyForLocation(DataLocation _location, bool _isPointer) const = 0; diff --git a/mix/DebuggingStateWrapper.cpp b/mix/DebuggingStateWrapper.cpp index b8fbdef30..42c429224 100644 --- a/mix/DebuggingStateWrapper.cpp +++ b/mix/DebuggingStateWrapper.cpp @@ -38,13 +38,17 @@ using namespace dev::mix; namespace { - static QVariantList memDumpToList(bytes const& _bytes, unsigned _width) + static QVariantList memDumpToList(bytes const& _bytes, unsigned _width, bool _includeAddress = false) { QVariantList dumpList; for (unsigned i = 0; i < _bytes.size(); i += _width) { std::stringstream ret; - + if (_includeAddress) + { + ret << std::setfill('0') << std::setw(6) << std::hex << i << " "; + ret << " "; + } for (unsigned j = i; j < i + _width; ++j) if (j < _bytes.size()) if (_bytes[j] >= 32 && _bytes[j] < 127) @@ -137,7 +141,7 @@ QStringList QMachineState::debugStorage() QVariantList QMachineState::debugMemory() { - return memDumpToList(m_state.memory, 16); + return memDumpToList(m_state.memory, 16, true); } QCallData* QMachineState::getDebugCallData(QObject* _owner, bytes const& _data) diff --git a/test/libsolidity/SolidityEndToEndTest.cpp b/test/libsolidity/SolidityEndToEndTest.cpp index 75793abf7..a01e98cf8 100644 --- a/test/libsolidity/SolidityEndToEndTest.cpp +++ b/test/libsolidity/SolidityEndToEndTest.cpp @@ -4691,6 +4691,53 @@ BOOST_AUTO_TEST_CASE(memory_types_initialisation) BOOST_CHECK(callContractFunction("nestedStat()") == encodeArgs(vector(3 * 7))); } +BOOST_AUTO_TEST_CASE(memory_arrays_index_access_write) +{ + char const* sourceCode = R"( + contract Test { + function set(uint24[3][4] x) { + x[2][2] = 1; + x[3][2] = 7; + } + function f() returns (uint24[3][4]){ + uint24[3][4] memory data; + set(data); + return data; + } + } + )"; + compileAndRun(sourceCode, 0, "Test"); + + vector data(3 * 4); + data[3 * 2 + 2] = 1; + data[3 * 3 + 2] = 7; + BOOST_CHECK(callContractFunction("f()") == encodeArgs(data)); +} + +BOOST_AUTO_TEST_CASE(memory_arrays_dynamic_index_access_write) +{ + char const* sourceCode = R"( + contract Test { + uint24[3][][4] data; + function set(uint24[3][][4] x) internal returns (uint24[3][][4]) { + x[1][2][2] = 1; + x[1][3][2] = 7; + return x; + } + function f() returns (uint24[3][]) { + data[1].length = 4; + return set(data)[1]; + } + } + )"; + compileAndRun(sourceCode, 0, "Test"); + + vector data(3 * 4); + data[3 * 2 + 2] = 1; + data[3 * 3 + 2] = 7; + BOOST_CHECK(callContractFunction("f()") == encodeArgs(u256(0x20), u256(4), data)); +} + BOOST_AUTO_TEST_SUITE_END() }