diff --git a/.gitignore b/.gitignore index fc3f898e7d..fa58c6d76a 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ build_clippy/ *~ .DS_Store tests/artifacts/**/*.dbg.json +venv +.venv \ No newline at end of file diff --git a/README.md b/README.md index 6f38fb7133..a36962e7f1 100644 --- a/README.md +++ b/README.md @@ -31,10 +31,13 @@ Unit tests come together with the Rust code. They can be invoked via `cargo test [Getting Started](https://doc.confluxnetwork.org/docs/general/run-a-node/) page for more information. -Integration tests are Python test scripts with the `_test.py` suffix in the `tests` directory. +Integration tests are Python test scripts with the `_test.py` suffix in the `tests` directory and in the `integration_tests/tests` directory. To run these tests, first compile Conflux in _release_ mode using `cargo build --release` and fetch all submodule using `git submodule update --remote --recursive --init`. -Then, you can run all integration tests using the script `tests/test_all.py`. +Then, you can run all integration tests using: + +- `tests/test_all.py` for tests in the `tests` directory +- `pytest ./integration_tests/tests -vv -n 6 --dist loadscope` for tests in the `integration_tests` directory ## Resources diff --git a/dev-support/dep_pip3.sh b/dev-support/dep_pip3.sh index da40ab3401..56c11b4803 100755 --- a/dev-support/dep_pip3.sh +++ b/dev-support/dep_pip3.sh @@ -2,7 +2,7 @@ set -e -pip3 install cfx-account eth-utils py-ecc rlp trie coincurve safe-pysha3 conflux-web3==1.4.0b5 web3 py-solc-x jsonrpcclient==3.3.6 asyncio websockets pyyaml numpy +pip3 install cfx-account eth-utils py-ecc rlp trie coincurve safe-pysha3 conflux-web3==1.4.0b5 web3 py-solc-x jsonrpcclient==3.3.6 asyncio websockets pyyaml numpy pytest pytest-xdist python3 -m solcx.install v0.5.17 diff --git a/dev-support/test-mac.sh b/dev-support/test-mac.sh index 8c190ed408..c8f46fc349 100755 --- a/dev-support/test-mac.sh +++ b/dev-support/test-mac.sh @@ -67,6 +67,7 @@ function check_integration_tests { # Make symbolic link for conflux binary to where integration test assumes its existence. rm -rf target; ln -s build target ./tests/test_all.py + pytest ./integration_tests/tests -vv -n 6 --dist loadscope ) local exit_code=$? popd > /dev/null diff --git a/dev-support/test.sh b/dev-support/test.sh index 955751814c..d2ce11378a 100755 --- a/dev-support/test.sh +++ b/dev-support/test.sh @@ -91,6 +91,7 @@ function check_integration_tests { # Make symbolic link for conflux binary to where integration test assumes its existence. rm -f target; ln -s build target ./tests/test_all.py --max-workers $TEST_MAX_WORKERS --max-retries $TEST_MAX_RETRIES | tee /dev/stderr + pytest ./integration_tests/tests -vv -n 6 --dist loadscope | tee /dev/stderr ) local exit_code=$? popd > /dev/null diff --git a/docs/integration-tests.md b/docs/integration-tests.md new file mode 100644 index 0000000000..d96630cfff --- /dev/null +++ b/docs/integration-tests.md @@ -0,0 +1,81 @@ +# Integration Tests + +The Conflux-rust integration tests are written in Python. Currently, there are two versions of the tests located in the `tests` and `integration_tests` directories. All files in these directories with filenames containing `test` are test cases. + +The second version refactors the first, making the tests more modular, easier to maintain, and introducing the pytest testing framework. + +The integration tests primarily focus on: + +- Verifying the correctness of the consensus algorithm +- Ensuring the implementation of each CIP meets the expected specifications +- Validating the correctness of RPC interfaces + +## test_framework + +The `test_framework` directory contains a blockchain testing framework evolved from Bitcoin's testing framework. This framework allows for setting up a multi-node local test network as needed and provides common blockchain control methods such as block generation, data synchronization, and node termination. + +Additionally, the framework offers commonly used testing infrastructure: + +- RPC Client +- SDK instantiation +- Commonly used contracts +- Accounts with preloaded balances + +## Conflux Utils + +The `conflux` directory includes utilities for common blockchain interactions, such as: + +- Address conversion +- RPC and pubsub +- Transaction definitions +- Type conversions +- Encoding/decoding + +## Contracts + +Integration tests require some contracts, primarily located in: + +- `tests/contracts`: Solidity code and the corresponding ABI and bytecode files compiled using native compilers. +- `tests/test_contracts`: A Hardhat project containing Solidity files and compiled artifact files ready for use. This directory is a git submodule, with its repository hosted at [conflux-chain/conflux-rust-dev-contracts](https://github.com/Conflux-Chain/conflux-rust-dev-contracts.git). + +## SDK + +- In version 1 of the integration tests, the Ethereum Python SDK, web3.py, was partially used. +- In version 2, SDKs are extensively used to write test cases. Core Space uses [python-conflux-sdk](https://github.com/Conflux-Chain/python-conflux-sdk), while eSpace uses [web3.py](https://web3py.readthedocs.io/en/stable/index.html). + +## Miscellaneous + +### Submodules + +The integration test framework depends on two git submodules: + +- `conflux-rust-dev-contracts`: Contract code +- `extra-test-toolkits`: For consensus and fuzzing tests + +Use the following command to fetch the submodule code: + +```sh +git submodule update --remote --recursive --init +``` + +### Node Program + +Before running the tests, you must compile the node program. Refer to the [README.md](../README.md) for compilation instructions. + +### Running Version 1 Integration Tests + +Run all tests: + +```sh +python3 tests/test_all.py +``` + +Run a specific test: + +```sh +python3 tests/erc20_test.py +``` + +### Writing Integration Test Cases + +It is recommended to write test cases using the newer version. Older test cases will be gradually migrated. Refer to [integration_tests/readme](../integration_tests/readme.md) for detailed instructions on writing test cases. \ No newline at end of file diff --git a/integration_tests/__init__.py b/integration_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration_tests/conflux/__init__.py b/integration_tests/conflux/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration_tests/conflux/address.py b/integration_tests/conflux/address.py new file mode 100644 index 0000000000..b6f8b0903a --- /dev/null +++ b/integration_tests/conflux/address.py @@ -0,0 +1,68 @@ +import eth_utils +from eth_utils import decode_hex + +from .address_utils import * +from integration_tests.conflux.config import DEFAULT_PY_TEST_CHAIN_ID +from integration_tests.conflux.utils import encode_hex + +MAINNET_PREFIX = "cfx" +TESTNET_PREFIX = "cfxtest" +OTHER_NET_PREFIX = "net" +VERSION_BYTE = 0x00 +MAINNET_NETWORK_ID = 1029 +TESTNET_NETWORK_ID = 1 +ZERO_ADDRESS = "0x0000000000000000000000000000000000000000" + + +def network_id_to_prefix(network_id): + if network_id == TESTNET_NETWORK_ID: + return TESTNET_PREFIX + elif network_id == MAINNET_NETWORK_ID: + return MAINNET_PREFIX + else: + return OTHER_NET_PREFIX + str(network_id) + + +def prefix_to_network_id(prefix): + if prefix == MAINNET_PREFIX: + return MAINNET_NETWORK_ID + elif prefix == TESTNET_PREFIX: + return TESTNET_NETWORK_ID + elif prefix[:3] == OTHER_NET_PREFIX and int(prefix[3:]) not in [TESTNET_NETWORK_ID, MAINNET_NETWORK_ID]: + return int(prefix[3:]) + else: + assert False, "Invalid address prefix" + + +def encode_b32_address(addr, network_id=DEFAULT_PY_TEST_CHAIN_ID): + payload = convertbits([VERSION_BYTE] + list(addr), 8, 5) + prefix = network_id_to_prefix(network_id) + checksum = calculate_checksum(prefix, payload) + return "{}:{}".format(prefix, b32encode(payload + checksum)) + + +# Note: This function does not return network_id on purpose, because in python tests it is DEFAULT_PY_TEST_CHAIN_ID +# while the prefix is `cfx`. +def decode_b32_address(b32_addr): + b32_addr = b32_addr.lower() + addr_array = b32_addr.split(":") + prefix = addr_array[0] + payload_and_checksum = addr_array[-1] + assert len(payload_and_checksum) == 42 + payload_and_checksum_raw = b32decode(payload_and_checksum) + if not verify_checksum(prefix, payload_and_checksum_raw): + assert False, "Invalid address checksum" + # Remove checksum bits + payload_raw = payload_and_checksum_raw[:-CHECKSUM_SIZE] + # Remove version byte + address_bytes = bytes(convertbits(payload_raw, 5, 8, pad=False))[1:] + return address_bytes + + +def b32_address_to_hex(addr): + return eth_utils.encode_hex(decode_b32_address(addr)) + + +def hex_to_b32_address(addr, network_id=DEFAULT_PY_TEST_CHAIN_ID): + return encode_b32_address(decode_hex(addr), network_id) + diff --git a/integration_tests/conflux/address_utils.py b/integration_tests/conflux/address_utils.py new file mode 100644 index 0000000000..dcd3353ae0 --- /dev/null +++ b/integration_tests/conflux/address_utils.py @@ -0,0 +1,101 @@ +""" +MIT License + +Copyright (c) 2017 Shammah Chancellor + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + + +BASE32_CHARS = 'abcdefghijklmnopqrstuvwxyz0123456789' +EXCLUDE_CHARS = {'o', 'i', 'l', 'q'} +CHARSET = '' +for c in BASE32_CHARS: + if c not in EXCLUDE_CHARS: + CHARSET += c +CHECKSUM_SIZE = 8 + + +def polymod(values): + chk = 1 + generator = [ + (0x01, 0x98f2bc8e61), + (0x02, 0x79b76d99e2), + (0x04, 0xf33e5fb3c4), + (0x08, 0xae2eabe2a8), + (0x10, 0x1e4f43e470)] + for value in values: + top = chk >> 35 + chk = ((chk & 0x07ffffffff) << 5) ^ value + for i in generator: + if top & i[0] != 0: + chk ^= i[1] + return chk ^ 1 + + +def prefix_expand(prefix): + return [ord(x) & 0x1f for x in prefix] + [0] + + +def calculate_checksum(prefix, payload): + poly = polymod(prefix_expand(prefix) + payload + [0, 0, 0, 0, 0, 0, 0, 0]) + out = list() + for i in range(CHECKSUM_SIZE): + out.append((poly >> 5 * (7 - i)) & 0x1f) + return out + + +def verify_checksum(prefix, payload): + return polymod(prefix_expand(prefix) + payload) == 0 + + +def b32decode(inputs): + out = list() + for letter in inputs: + out.append(CHARSET.find(letter)) + return out + + +def b32encode(inputs): + out = '' + for char_code in inputs: + out += CHARSET[char_code] + return out + + +def convertbits(data, frombits, tobits, pad=True): + acc = 0 + bits = 0 + ret = [] + maxv = (1 << tobits) - 1 + max_acc = (1 << (frombits + tobits - 1)) - 1 + for value in data: + if value < 0 or (value >> frombits): + return None + acc = ((acc << frombits) | value) & max_acc + bits += frombits + while bits >= tobits: + bits -= tobits + ret.append((acc >> bits) & maxv) + if pad: + if bits: + ret.append((acc << (tobits - bits)) & maxv) + elif bits >= frombits or ((acc << (tobits - bits)) & maxv): + return None + return ret \ No newline at end of file diff --git a/integration_tests/conflux/block.py b/integration_tests/conflux/block.py new file mode 100644 index 0000000000..21f85e5532 --- /dev/null +++ b/integration_tests/conflux/block.py @@ -0,0 +1,137 @@ +import rlp + +from conflux.messages import BlockHeader +from .utils import ( + normalize_address, hash32, trie_root, big_endian_int, address, int256, + encode_hex, decode_hex, encode_int, sha3, + hash20) +from rlp.sedes import big_endian_int, Binary, binary, CountableList +from . import trie +from . import utils +from .config import default_config +from .transactions import Transaction +import sys +# +# +# class BlockHeader(rlp.Serializable): +# fields = [ +# ('parent_hash', hash32), +# ('author', address), +# ('state_root', trie_root), +# ('transactions_root', trie_root), +# ('difficulty', big_endian_int), +# ('number', big_endian_int), +# ('timestamp', big_endian_int), +# ] +# +# def __init__(self, +# parent_hash=default_config['GENESIS_PREVHASH'], +# author=default_config['GENESIS_COINBASE'], +# state_root=trie.BLANK_ROOT, +# transactions_root=trie.BLANK_ROOT, +# difficulty=default_config['GENESIS_DIFFICULTY'], +# number=0, +# timestamp=0): +# # at the beginning of a method, locals() is a dict of all arguments +# fields = {k: v for k, v in locals().items() if +# k not in ['self', '__class__']} +# self.block = None +# super(BlockHeader, self).__init__(**fields) +# +# @property +# def hash(self): +# """The binary block hash""" +# return utils.sha3(rlp.encode(self)) +# +# @property +# def hex_hash(self): +# return encode_hex(self.hash) +# +# @property +# def mining_hash(self): +# mining_fields = [ +# (field, sedes) for field, sedes in BlockHeader._meta.fields +# if field not in ["nonce"] +# ] +# +# class MiningBlockHeader(rlp.Serializable): +# fields = mining_fields +# +# _self = MiningBlockHeader( +# **{f: getattr(self, f) for (f, sedes) in mining_fields}) +# +# return utils.sha3(rlp.encode( +# _self, MiningBlockHeader)) +# +# @property +# def signing_hash(self): +# # exclude extra_data +# signing_fields = [ +# (field, sedes) for field, sedes in BlockHeader._meta.fields +# if field not in ["extra_data"] +# ] +# +# class SigningBlockHeader(rlp.Serializable): +# fields = signing_fields +# +# _self = SigningBlockHeader( +# **{f: getattr(self, f) for (f, sedes) in signing_fields}) +# +# return utils.sha3(rlp.encode( +# _self, SigningBlockHeader)) +# +# def to_dict(self): +# """Serialize the header to a readable dictionary.""" +# d = {} +# for field in ('prevhash', 'uncles_hash', 'extra_data', 'nonce'): +# d[field] = '0x' + encode_hex(getattr(self, field)) +# for field in ('state_root', 'tx_list_root', 'receipts_root', +# 'coinbase'): +# d[field] = encode_hex(getattr(self, field)) +# for field in ('number', 'difficulty', 'gas_limit', 'gas_used', +# 'timestamp'): +# d[field] = utils.to_string(getattr(self, field)) +# assert len(d) == len(BlockHeader.fields) +# return d +# +# def __repr__(self): +# return '<%s(#%d %s)>' % (self.__class__.__name__, self.number, +# encode_hex(self.hash)[:8]) +# +# def __eq__(self, other): +# """Two blockheaders are equal iff they have the same hash.""" +# return isinstance(other, BlockHeader) and self.hash == other.hash +# +# def __hash__(self): +# return utils.big_endian_to_int(self.hash) +# +# def __ne__(self, other): +# return not self.__eq__(other) +# +# + +# +# class Block(rlp.Serializable): +# fields = [ +# ('header', BlockHeader), +# ('transactions', CountableList(Transaction)), +# ] +# +# def __init__(self, header, transactions=None, db=None): +# # assert isinstance(db, BaseDB), "No database object given" +# # self.db = db +# +# super(Block, self).__init__( +# header=header, +# transactions=(transactions or []), +# ) +# +# def __getattribute__(self, name): +# try: +# return rlp.Serializable.__getattribute__(self, name) +# except AttributeError: +# return getattr(self.header, name) +# +# @property +# def transaction_count(self): +# return len(self.transactions) diff --git a/integration_tests/conflux/config.py b/integration_tests/conflux/config.py new file mode 100644 index 0000000000..30266ff47f --- /dev/null +++ b/integration_tests/conflux/config.py @@ -0,0 +1,67 @@ +from . import trie +from eth_utils import decode_hex + +DEFAULT_PY_TEST_CHAIN_ID = 10 + +default_config = dict( + GENESIS_DIFFICULTY=0, + GENESIS_PREVHASH=decode_hex("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), # KECCAK EMPTY, hash of the empty bytes string. + GENESIS_COINBASE=b'\x10' * 20, + GENESIS_PRI_KEY=decode_hex("46b9e861b63d3509c88b7817275a30d22d62c8cd8fa6486ddee35ef0d8e0495f"), + GENESIS_PRI_KEY_2=decode_hex("9a6d3ba2b0c7514b16a006ee605055d71b9edfad183aeb2d9790e9d4ccced471"), + TOTAL_COIN= 5 * 10**9 * 10**18 * 10**6, + GENESIS_STATE_ROOT=decode_hex("0x3a5ce6e6d182784af05d273b069d2a023e93c43a0281a57810d2f4b63a6ad6b1"), + GENESIS_RECEIPTS_ROOT=trie.EMPTY_EPOCH_RECEIPT_ROOT_BY_NUMBER_OF_BLOCKS[0], + GENESIS_LOGS_BLOOM_HASH=decode_hex("0xd397b3b043d87fcd6fad1291ff0bfd16401c274896d8c63a923727f077b8e0b5"), # KECCAK_EMPTY_BLOOM ~ keccak(b'\0' * 256) + GENESIS_TRANSACTION_ROOT=decode_hex("0x8208dfdbb409f7a3e41386a8eaaa6412ad4df158fc04a09b499c1a004b53d469"), + GENESIS_AUTHOR=decode_hex("1949000000000000000000000000000000001001"), + GENESIS_GAS_LIMIT=30_000_000, + MAX_BLOCK_SIZE_IN_BYTES=200 * 1024, + POS_GENESIS_BLOCK=b'\x00' * 32, + POS_VOTE_COUNT=1000, +) + +default_conflux_conf = dict( + chain_id = DEFAULT_PY_TEST_CHAIN_ID, + db_cache_size = 128, + ledger_cache_size = 1024, + storage_delta_mpts_cache_size = 20_000_000, + storage_delta_mpts_cache_start_size = 2_000_000, + storage_delta_mpts_slab_idle_size = 2_000_000, + tx_pool_size = 500_000, + persist_tx_index = "true", + persist_block_number_index = "true", +) + +production_conf = default_conflux_conf + +small_local_test_conf = dict( + chain_id = DEFAULT_PY_TEST_CHAIN_ID, + check_phase_change_period_ms = 100, + enable_discovery = "false", + log_file = "'./conflux.log'", + log_level = '"debug"', + metrics_output_file = "'./metrics.log'", + metrics_enabled = "true", + mode = '"test"', + session_ip_limits = "'0,0,0,0'", + mining_type = "'disable'", + storage_delta_mpts_cache_size = 200_000, + storage_delta_mpts_cache_start_size = 200_000, + storage_delta_mpts_slab_idle_size = 2_000_000, + subnet_quota = 0, + persist_tx_index = "true", + persist_block_number_index = "true", + execute_genesis = "false", + dev_allow_phase_change_without_peer = "true", + check_status_genesis = "false", + pos_reference_enable_height = 0, + hydra_transition_height = 0, + hydra_transition_number = 0, + cip43_init_end_number = 2 ** 32 - 1, + min_phase_change_normal_peer_count = 1, + dao_vote_transition_number = 2**31, + dao_vote_transition_height = 2**31, + enable_single_mpt_storage = "true", + rpc_enable_metrics = "true", +) diff --git a/integration_tests/conflux/exceptions.py b/integration_tests/conflux/exceptions.py new file mode 100644 index 0000000000..39956a9a24 --- /dev/null +++ b/integration_tests/conflux/exceptions.py @@ -0,0 +1,34 @@ +class UnknownParentException(Exception): + pass + + +class VerificationFailed(Exception): + pass + + +class InvalidTransaction(Exception): + pass + + +class UnsignedTransaction(InvalidTransaction): + pass + + +class InvalidNonce(InvalidTransaction): + pass + + +class InsufficientBalance(InvalidTransaction): + pass + + +class InsufficientStartGas(InvalidTransaction): + pass + + +class BlockGasLimitReached(InvalidTransaction): + pass + + +class GasPriceTooLow(InvalidTransaction): + pass diff --git a/integration_tests/conflux/filter.py b/integration_tests/conflux/filter.py new file mode 100644 index 0000000000..910fb6bbd3 --- /dev/null +++ b/integration_tests/conflux/filter.py @@ -0,0 +1,22 @@ +from integration_tests.conflux.address import hex_to_b32_address +from integration_tests.conflux.config import DEFAULT_PY_TEST_CHAIN_ID + + +class Filter(): + def __init__(self, from_epoch=None, to_epoch=None, from_block = None, to_block = None, block_hashes = None, address = None, topics = [], + encode_address=True, networkid=DEFAULT_PY_TEST_CHAIN_ID): + if encode_address and address is not None: + if isinstance(address, list): + base32_address = [] + for a in address: + base32_address.append(hex_to_b32_address(a, networkid)) + else: + base32_address = hex_to_b32_address(address, networkid) + address = base32_address + self.fromEpoch = from_epoch + self.toEpoch = to_epoch + self.fromBlock = from_block + self.toBlock = to_block + self.blockHashes = block_hashes + self.address = address + self.topics = topics diff --git a/integration_tests/conflux/messages.py b/integration_tests/conflux/messages.py new file mode 100644 index 0000000000..00e18bce16 --- /dev/null +++ b/integration_tests/conflux/messages.py @@ -0,0 +1,514 @@ +#!/usr/bin/env python3 +"""Conflux P2P network half-a-node. + +`P2PConnection: A low-level connection object to a node's P2P interface +P2PInterface: A high-level interface object for communicating to a node over P2P +""" + +from collections import defaultdict +from io import BytesIO + +import eth_utils +import rlp +from rlp.exceptions import ObjectSerializationError, ObjectDeserializationError +from rlp.sedes import binary, big_endian_int, CountableList, boolean +import logging +import socket +import struct +import sys +import threading + +from integration_tests.conflux import trie +from integration_tests.conflux.config import default_config +from integration_tests.conflux.transactions import Transaction +from integration_tests.conflux.utils import hash32, hash20, sha3, bytes_to_int, encode_hex +from integration_tests.test_framework.util import wait_until + +logger = logging.getLogger("TestFramework.mininode") + + +PACKET_HELLO = 0x80 +PACKET_DISCONNECT = 0x01 +PACKET_PROTOCOL = 0x10 + +STATUS_V2 = 0x22 +STATUS_V3 = 0x23 +HEARTBEAT = 0x24 +NEW_BLOCK_HASHES = 0x01 +TRANSACTIONS = 0x02 + +GET_BLOCK_HASHES = 0x03 +GET_BLOCK_HASHES_RESPONSE = 0x04 +GET_BLOCK_HEADERS = 0x05 +GET_BLOCK_HEADERS_RESPONSE = 0x06 +GET_BLOCK_BODIES = 0x07 +GET_BLOCK_BODIES_RESPONSE = 0x08 +NEW_BLOCK = 0x09 +GET_TERMINAL_BLOCK_HASHES_RESPONSE = 0x0a +GET_TERMINAL_BLOCK_HASHES = 0x0b +GET_BLOCKS = 0x0c +GET_BLOCKS_RESPONSE = 0x0d +GET_BLOCKS_WITH_PUBLIC_RESPONSE = 0x0e +GET_CMPCT_BLOCKS = 0x0f +GET_CMPCT_BLOCKS_RESPONSE = 0x10 +GET_BLOCK_TXN = 0x11 +GET_BLOCK_TXN_RESPONSE = 0x12 + +GET_BLOCK_HASHES_BY_EPOCH = 0x17 +GET_BLOCK_HEADER_CHAIN = 0x18 + +from rlp.exceptions import ( + DeserializationError, + SerializationError, +) + +# Copied from rlp.sedes.Boolean, but encode False to 0x00, not empty. +class Boolean: + """A sedes for booleans + """ + def serialize(self, obj): + if not isinstance(obj, bool): + raise SerializationError('Can only serialize bool', obj) + + if obj is False: + return b'\x00' + elif obj is True: + return b'\x01' + else: + raise Exception("Invariant: no other options for boolean values") + + def deserialize(self, serial): + if serial == b'\x00': + return False + elif serial == b'\x01': + return True + else: + raise DeserializationError( + 'Invalid serialized boolean. Must be either 0x01 or 0x00', + serial + ) + +class Capability(rlp.Serializable): + fields = [ + ("protocol", binary), + ("version", big_endian_int) + ] + + +class NodeEndpoint(rlp.Serializable): + fields = [ + ("address", binary), + ("udp_port", big_endian_int), + ("tcp_port", big_endian_int) + ] + + +class Hello(rlp.Serializable): + fields = [ + ("network_id", big_endian_int), + ("capabilities", CountableList(Capability)), + ("node_endpoint", NodeEndpoint), + ("pos_public_key", binary) + ] + + +class Disconnect(rlp.Serializable): + def __init__(self, code:int, msg:str=None): + self.code = code + self.msg = msg + + @classmethod + def deserialize(cls, serial): + return cls(int(serial[0]), str(serial[1:])) + +class ChainIdParams(rlp.Serializable): + fields = [ + ("chain_id", big_endian_int), + ] + +class Status(rlp.Serializable): + fields = [ + ("chain_id", ChainIdParams), + ("genesis_hash", hash32), + ("best_epoch", big_endian_int), + ("node_type", big_endian_int), + ("terminal_block_hashes", CountableList(hash32)), + ] + + +class NewBlockHashes(rlp.Serializable): + def __init__(self, block_hashes=[]): + assert is_sequence(block_hashes) + self.block_hashes = block_hashes + + @classmethod + def serializable(cls, obj): + if is_sequence(obj.block_hashes): + return True + else: + return False + + @classmethod + def serialize(cls, obj): + return CountableList(hash32).serialize(obj.block_hashes) + + @classmethod + def deserialize(cls, serial): + return cls(block_hashes=CountableList(hash32).deserialize(serial)) + + +class Transactions: + def __init__(self, transactions=[]): + assert is_sequence(transactions) + self.transactions = transactions + + @classmethod + def serializable(cls, obj): + if is_sequence(obj.transactions): + return True + else: + return False + + @classmethod + def serialize(cls, obj): + return CountableList(Transaction).serialize(obj.transactions) + + @classmethod + def deserialize(cls, serial): + return cls(transactions=CountableList(Transaction).deserialize(serial)) + + +class GetBlockHashes(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("hash", hash32), + ("max_blocks", big_endian_int) + ] + + def __init__(self, hash, max_blocks, reqid=0): + super().__init__( + reqid=reqid, + hash=hash, + max_blocks=max_blocks, + ) + + +class GetBlockHashesByEpoch(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("epochs", CountableList(big_endian_int)), + ] + + def __init__(self, epochs, reqid=0): + super().__init__( + reqid=reqid, + epochs=epochs + ) + + +class BlockHashes(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("hashes", CountableList(hash32)), + ] + + +class GetBlockHeaders(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("hashes", CountableList(hash32)), + ] + + def __init__(self, hashes, reqid=0): + super().__init__( + reqid=reqid, + hashes=hashes, + ) + +class BlockHeader(rlp.Serializable): + fields = [ + ("parent_hash", binary), + ("height", big_endian_int), + ("timestamp", big_endian_int), + ("author", binary), + ("transactions_root", binary), + ("deferred_state_root", binary), + ("deferred_receipts_root", binary), + ("deferred_logs_bloom_hash", binary), + ("blame", big_endian_int), + ("difficulty", big_endian_int), + ("adaptive", big_endian_int), + ("gas_limit", big_endian_int), + ("referee_hashes", CountableList(binary)), + ("nonce", big_endian_int), + ("pos_reference", CountableList(binary)), + ] + + def __init__(self, + parent_hash=default_config['GENESIS_PREVHASH'], + height=0, + timestamp=0, + author=default_config['GENESIS_COINBASE'], + transactions_root=trie.BLANK_ROOT, + deferred_state_root=sha3(rlp.encode(trie.state_root())), + deferred_receipts_root=trie.EMPTY_EPOCH_RECEIPT_ROOT_BY_NUMBER_OF_BLOCKS[0], + deferred_logs_bloom_hash=default_config['GENESIS_LOGS_BLOOM_HASH'], + blame=0, + difficulty=default_config['GENESIS_DIFFICULTY'], + gas_limit=0, + referee_hashes=[], + adaptive=0, + nonce=0, + pos_reference=[]): + # at the beginning of a method, locals() is a dict of all arguments + fields = {k: v for k, v in locals().items() if + k not in ['self', '__class__']} + self.block = None + super(BlockHeader, self).__init__(**fields) + + @property + def hash(self): + return sha3(rlp.encode(self.rlp_part())) + + def get_hex_hash(self): + return eth_utils.encode_hex(self.hash) + + def problem_hash(self): + return sha3(rlp.encode(self.without_nonce())) + + def pow_decimal(self): + init_buf = bytearray(64); + problem_hash = self.problem_hash() + for i in range(0, 32): + init_buf[i] = problem_hash[i] + n = self.nonce + for i in range(32, 64): + init_buf[i] = n % 256 + n = int(n / 256) + tmp = sha3(bytes(init_buf)) + buf = [] + for i in range(0, 32): + buf.append(tmp[i] ^ self.problem_hash()[i]) + return bytes_to_int(sha3(bytes(buf))) + + def without_nonce(self): + fields = {field: getattr(self, field) for field in BlockHeaderWithoutNonce._meta.field_names} + return BlockHeaderWithoutNonce(**fields) + + def rlp_part(self): + fields = {field: getattr(self, field) for field in BlockHeaderRlpPart._meta.field_names} + return BlockHeaderRlpPart(**fields) + + +class BlockHeaderRlpPart(rlp.Serializable): + fields = [ + (field, sedes) for field, sedes in BlockHeader._meta.fields + ] + + +class BlockHeaderWithoutNonce(rlp.Serializable): + fields = [ + (field, sedes) for field, sedes in BlockHeader._meta.fields if + field not in ["nonce"] + + ] + + +# class BlockHeaders(CountableList(BlockHeader)): +# fields = [ +# ("headers", CountableList(BlockHeader)) +# ] +class BlockHeaders(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("headers", CountableList(BlockHeader)), + ] + + +class GetBlockBodies(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("hashes", CountableList(hash32)), + ] + + def __init__(self, reqid=0, hashes=[]): + super().__init__( + reqid=reqid, + hashes=hashes + ) + + +class Block(rlp.Serializable): + fields = [ + ("block_header", BlockHeader), + ("transactions", CountableList(Transaction)) + ] + + def __init__(self, block_header, transactions=None): + super(Block, self).__init__( + block_header=block_header, + transactions=(transactions or []), + ) + + @property + def hash(self): + return self.block_header.hash + + def hash_hex(self): + return eth_utils.encode_hex(self.hash) + + +class BlockBodies(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("bodies", CountableList(Block)), + ] + + +class NewBlock(rlp.Serializable): + def __init__(self, block): + self.block = block + + @classmethod + def serializable(cls, obj): + return True + + @classmethod + def serialize(cls, obj): + return Block.serialize(obj.block) + + @classmethod + def deserialize(cls, serial): + return cls(block=Block.deserialize(serial)) + + +class TerminalBlockHashes(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("hashes", CountableList(hash32)), + ] + + +class GetTerminalBlockHashes(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ] + + def __init__(self, reqid=0): + super().__init__(reqid) + + +class GetBlocks(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("with_public", Boolean()), + ("hashes", CountableList(hash32)), + ] + + def __init__(self, reqid=0, with_public=False, hashes=[]): + super().__init__( + reqid=reqid, + with_public=with_public, + hashes=hashes + ) + + +class Blocks(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("blocks", CountableList(Block)), + ] + + +class GetCompactBlocks(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("hashes", CountableList(hash32)), + ] + + +class CompactBlock(rlp.Serializable): + fields = [ + ("block_header", BlockHeader), + ("nonce", big_endian_int), + ("tx_short_ids", CountableList(big_endian_int)), + ] + + +class GetCompactBlocksResponse(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("compact_blocks", CountableList(CompactBlock)), + ("blocks", CountableList(Block)) + ] + + +class GetBlockTxn(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("block_hash", hash32), + ("indexes", CountableList(big_endian_int)), + ] + + +class GetBlockTxnResponse(rlp.Serializable): + fields = [ + ("reqid", big_endian_int), + ("block_hash", hash32), + ("block_txn", CountableList(Transaction)) + ] + + +class Account(rlp.Serializable): + fields = [ + ("balance", big_endian_int), + ("nonce", big_endian_int), + ("storage_root", hash32), + ("code_hash", hash32), + ] + + +msg_id_dict = { + Status: STATUS_V3, + NewBlockHashes: NEW_BLOCK_HASHES, + Transactions: TRANSACTIONS, + GetBlockHashes: GET_BLOCK_HASHES, + BlockHashes: GET_BLOCK_HASHES_RESPONSE, + GetBlockHeaders: GET_BLOCK_HEADERS, + BlockHeaders: GET_BLOCK_HEADERS_RESPONSE, + GetBlockBodies: GET_BLOCK_BODIES, + BlockBodies: GET_BLOCK_BODIES_RESPONSE, + NewBlock: NEW_BLOCK, + TerminalBlockHashes: GET_TERMINAL_BLOCK_HASHES_RESPONSE, + GetTerminalBlockHashes: GET_TERMINAL_BLOCK_HASHES, + GetBlocks: GET_BLOCKS, + Blocks: GET_BLOCKS_RESPONSE, + GetCompactBlocks: GET_CMPCT_BLOCKS, + GetCompactBlocksResponse: GET_CMPCT_BLOCKS_RESPONSE, + GetBlockTxn: GET_BLOCK_TXN, + GetBlockTxnResponse: GET_BLOCK_TXN_RESPONSE, + GetBlockHashesByEpoch: GET_BLOCK_HASHES_BY_EPOCH, +} + +msg_class_dict = {} +for c in msg_id_dict: + msg_class_dict[msg_id_dict[c]] = c + + +def get_msg_id(msg): + c = msg.__class__ + if c in msg_id_dict: + return msg_id_dict[c] + else: + return None + + +def get_msg_class(msg): + if msg in msg_class_dict: + return msg_class_dict[msg] + else: + return None + + +def is_sequence(s): + return isinstance(s, list) or isinstance(s, tuple) diff --git a/integration_tests/conflux/pubsub.py b/integration_tests/conflux/pubsub.py new file mode 100644 index 0000000000..eace66e0f7 --- /dev/null +++ b/integration_tests/conflux/pubsub.py @@ -0,0 +1,83 @@ +# import sys +# sys.path.append("..") + +import asyncio +import json +import websockets + +from jsonrpcclient.clients.websockets_client import WebSocketsClient +from jsonrpcclient.requests import Request + +from integration_tests.test_framework.util import pubsub_url + +class PubSubClient: + def __init__(self, node, evm=False): + self.buffer = {} + self.nid = node.index + self.url = pubsub_url(node.index, evm, node.rpchost, node.ethwsport if evm else node.pubsubport) + self.ws = None + self.evm = evm + + async def subscribe(self, topic, *args): + # connect if necessary + if self.ws == None: + self.ws = await websockets.connect(self.url) + + # subscribe + method = "eth_subscribe" if self.evm else "cfx_subscribe" + req = Request(method, topic, *args) + resp = await WebSocketsClient(self.ws).send(req) + + # initialize buffer + id = resp.data.result + self.buffer[id] = [] + return Subscription(self, id, self.evm) + +class Subscription: + def __init__(self, pubsub, id, evm): + self.pubsub = pubsub + self.id = id + self.evm = evm + + async def unsubscribe(self): + assert(self.pubsub.ws != None) + + # unsubscribe + method = "eth_unsubscribe" if self.evm else "cfx_unsubscribe" + req = Request(method, self.id) + resp = await WebSocketsClient(self.pubsub.ws).send(req) + assert(resp.data.result == True) + + # clear buffer + del self.pubsub.buffer[self.id] + + async def next_wo_timeout(self): + # return buffered if possible + if len(self.pubsub.buffer[self.id]) > 0: + return self.pubsub.buffer[self.id].pop() + + # receive new + while True: + resp = await self.pubsub.ws.recv() + resp = json.loads(resp) + + recv_id = resp["params"]["subscription"] + result = resp["params"]["result"] + + if recv_id == self.id: + return result + + self.pubsub.buffer[recv_id].append(result) + + async def next(self, timeout=5.0): + try: + return await asyncio.wait_for(self.next_wo_timeout(), timeout=timeout) + except asyncio.TimeoutError: + raise TimeoutError(f"Received nothing on pub-sub {self.pubsub.url}/{self.id} (node: {self.pubsub.nid}) for {timeout} seconds.") + + async def iter(self, timeout=5): + while True: + try: + yield await self.next(timeout=timeout) + except TimeoutError: + break diff --git a/integration_tests/conflux/rpc.py b/integration_tests/conflux/rpc.py new file mode 100644 index 0000000000..f1ad4124a9 --- /dev/null +++ b/integration_tests/conflux/rpc.py @@ -0,0 +1,775 @@ +import os +import random +from typing import cast, Optional, Union, TypedDict, Any +from web3 import Web3 + +import eth_utils +from cfx_account import Account as CfxAccount +from eth_account.datastructures import SignedTransaction +import rlp +import json + + +from .address import hex_to_b32_address, b32_address_to_hex, DEFAULT_PY_TEST_CHAIN_ID +from .config import DEFAULT_PY_TEST_CHAIN_ID, default_config +from .transactions import CONTRACT_DEFAULT_GAS, Transaction, UnsignedTransaction +from .filter import Filter +from .utils import priv_to_addr, sha3_256, int_to_bytes, convert_to_nodeid, int_to_hex, encode_hex + +import sys + +sys.path.append("..") + +from integration_tests.test_framework.util import ( + assert_greater_than, + assert_greater_than_or_equal, + assert_is_hash_string, + assert_is_hex_string, + assert_equal, + wait_until, checktx, get_contract_instance +) +from integration_tests.test_framework.test_node import TestNode + +file_dir = os.path.dirname(os.path.realpath(__file__)) +REQUEST_BASE = { + 'gas': CONTRACT_DEFAULT_GAS, + 'gasPrice': 1, + 'chainId': 1, + "to": b'', +} + +class CfxFeeHistoryResponse(TypedDict): + baseFeePerGas: list[int] + gasUsedRatio: list[float] + reward: list[list[str]] # does not convert it currently + + +def convert_b32_address_field_to_hex(original_dict: dict, field_name: str): + if original_dict is not None and field_name in original_dict and original_dict[field_name] not in [None, "null"]: + original_dict[field_name] = b32_address_to_hex(original_dict[field_name]) + + +class RpcClient: + def __init__(self, node: Optional[TestNode]=None, auto_restart=False, log=None): + self.node: TestNode = node # type: ignore + self.auto_restart = auto_restart + self.log = log + + # epoch definitions + self.EPOCH_EARLIEST = "earliest" + self.EPOCH_LATEST_MINED = "latest_mined" + self.EPOCH_LATEST_STATE = "latest_state" + self.EPOCH_LATEST_CONFIRMED = "latest_confirmed" + + # update node operations + self.UPDATE_NODE_OP_FAILURE = "Failure" + self.UPDATE_NODE_OP_DEMOTE = "Demotion" + self.UPDATE_NODE_OP_REMOVE = "Remove" + + # hash/address definitions + self.GENESIS_ADDR = eth_utils.encode_hex(priv_to_addr(default_config["GENESIS_PRI_KEY"])) + self.GENESIS_PRI_KEY = default_config["GENESIS_PRI_KEY"] + self.COINBASE_ADDR = eth_utils.encode_hex(default_config["GENESIS_COINBASE"]) + self.GENESIS_ORIGIN_COIN = default_config["TOTAL_COIN"] + self.ZERO_HASH = eth_utils.encode_hex(b'\x00' * 32) + + # default tx values + self.DEFAULT_TX_GAS_PRICE = 1 + self.DEFAULT_TX_GAS = 21000 + self.DEFAULT_TX_FEE = self.DEFAULT_TX_GAS_PRICE * self.DEFAULT_TX_GAS + + def EPOCH_NUM(self, num: int) -> str: + return hex(num) + + def rand_addr(self) -> str: + (addr, _) = self.rand_account() + return addr + + def rand_account(self) -> (str, bytes): + priv_key = eth_utils.encode_hex(os.urandom(32)) + addr = eth_utils.encode_hex(priv_to_addr(priv_key)) + return (Web3.to_checksum_address(addr), priv_key) + + def rand_hash(self, seed: bytes = None) -> str: + if seed is None: + seed = os.urandom(32) + + return eth_utils.encode_hex(sha3_256(seed)) + + def generate_block(self, num_txs: int = 0, + block_size_limit_bytes: int = default_config["MAX_BLOCK_SIZE_IN_BYTES"]) -> str: + assert_greater_than_or_equal(num_txs, 0) + block_hash = self.node.test_generateOneBlock(num_txs, block_size_limit_bytes) + assert_is_hash_string(block_hash) + return block_hash + + def generate_blocks(self, num_blocks: int, num_txs: int = 0, + block_size_limit_bytes: int = default_config["MAX_BLOCK_SIZE_IN_BYTES"]) -> list: + assert_greater_than(num_blocks, 0) + assert_greater_than_or_equal(num_txs, 0) + + blocks = [] + for _ in range(0, num_blocks): + block_hash = self.generate_block(num_txs, block_size_limit_bytes) + blocks.append(block_hash) + + return blocks + + def generate_empty_blocks(self, num_blocks: int): + return self.node.test_generateEmptyBlocks(num_blocks) + + def generate_blocks_to_state(self, num_blocks: int = 5, num_txs: int = 1) -> list: + return self.generate_blocks(num_blocks, num_txs) + + def generate_block_with_parent(self, parent_hash: str, referee: list = None, num_txs: int = 0, + adaptive: bool = False, + difficulty=None, pos_reference=None) -> str: + assert_is_hash_string(parent_hash) + + if referee is None: + referee = [] + + for r in referee: + assert_is_hash_string(r) + + assert_greater_than_or_equal(num_txs, 0) + # print(parent_hash) + block_hash = self.node.test_generateFixedBlock(parent_hash, referee, num_txs, adaptive, difficulty, pos_reference) + assert_is_hash_string(block_hash) + return block_hash + + def generate_custom_block(self, parent_hash: str, referee: list, txs: list[Union[Transaction, SignedTransaction]]) -> str: + assert_is_hash_string(parent_hash) + + for r in referee: + assert_is_hash_string(r) + + raw_txs = [] + for tx in txs: + if isinstance(tx, SignedTransaction): + raw_txs.append(tx.raw_transaction) + elif isinstance(tx, Transaction): + raw_txs.append(rlp.encode(tx)) + else: + raw_txs.append(rlp.encode(tx)) + + encoded_txs = eth_utils.encode_hex(rlp.encode(raw_txs)) + + block_hash = self.node.test_generateCustomBlock(parent_hash, referee, encoded_txs) + assert_is_hash_string(block_hash) + return block_hash + + def generate_block_with_fake_txs(self, txs: list, adaptive=False, tx_data_len: int = 0) -> str: + encoded_txs = eth_utils.hexadecimal.encode_hex(rlp.encode(txs)) + block_hash = self.node.test_generateBlockWithFakeTxs(encoded_txs, adaptive, tx_data_len) + assert_is_hash_string(block_hash) + return block_hash + + def get_logs(self, filter: Filter) -> list: + logs = self.node.cfx_getLogs(filter.__dict__) + for log in logs: + convert_b32_address_field_to_hex(log, "address") + return logs + + def get_storage_at(self, addr: str, pos: str, epoch: str = None) -> str: + assert_is_hash_string(addr, length=40) + addr = hex_to_b32_address(addr) + assert_is_hash_string(pos) + + if epoch is None: + res = self.node.cfx_getStorageAt(addr, pos) + else: + res = self.node.cfx_getStorageAt(addr, pos, epoch) + + return res + + def get_storage_root(self, addr: str, epoch: str = None) -> str: + assert_is_hash_string(addr, length=40) + addr = hex_to_b32_address(addr) + + if epoch is None: + res = self.node.cfx_getStorageRoot(addr) + else: + res = self.node.cfx_getStorageRoot(addr, epoch) + + return res + + def get_code(self, address: str, epoch: Union[str, dict] = None) -> str: + address = hex_to_b32_address(address) + if epoch is None: + code = self.node.cfx_getCode(address) + else: + code = self.node.cfx_getCode(address, epoch) + assert_is_hex_string(code) + return code + + def gas_price(self) -> int: + return int(self.node.cfx_gasPrice(), 0) + + def base_fee_per_gas(self, epoch: Union[int,str] = "latest_mined"): + return int(self.block_by_epoch(epoch).get("baseFeePerGas", "0x0"), 16) + + def get_block_reward_info(self, epoch: str): + reward = self.node.cfx_getBlockRewardInfo(epoch) + convert_b32_address_field_to_hex(reward, "author") + return reward + + def epoch_number(self, epoch: str = None) -> int: + if epoch is None: + return int(self.node.cfx_epochNumber(), 0) + else: + return int(self.node.cfx_epochNumber(epoch), 0) + + def get_balance(self, addr: str, epoch: str = None) -> int: + addr = hex_to_b32_address(addr) + if epoch is None: + return int(self.node.cfx_getBalance(addr), 0) + else: + return int(self.node.cfx_getBalance(addr, epoch), 0) + + def get_staking_balance(self, addr: str, epoch: str = None) -> int: + addr = hex_to_b32_address(addr) + if epoch is None: + return int(self.node.cfx_getStakingBalance(addr), 0) + else: + return int(self.node.cfx_getStakingBalance(addr, epoch), 0) + + def get_vote_list(self, addr: str, epoch: str = None) -> list: + addr = hex_to_b32_address(addr) + if epoch is None: + return self.node.cfx_getVoteList(addr) + else: + return self.node.cfx_getVoteList(addr, epoch) + + def get_deposit_list(self, addr: str, epoch: str = None) -> list: + addr = hex_to_b32_address(addr) + if epoch is None: + return self.node.cfx_getDepositList(addr) + else: + return self.node.cfx_getDepositList(addr, epoch) + + def get_collateral_for_storage(self, addr: str, epoch: str = None) -> int: + addr = hex_to_b32_address(addr) + if epoch is None: + return int(self.node.cfx_getCollateralForStorage(addr), 0) + else: + return int(self.node.cfx_getCollateralForStorage(addr, epoch), 0) + + def get_sponsor_info(self, addr: str, epoch: str = None) -> dict: + addr = hex_to_b32_address(addr) + if epoch is None: + r = self.node.cfx_getSponsorInfo(addr) + else: + r = self.node.cfx_getSponsorInfo(addr, epoch) + convert_b32_address_field_to_hex(r, 'sponsorForGas') + convert_b32_address_field_to_hex(r, 'sponsorForCollateral') + return r + + def get_sponsor_for_gas(self, addr: str, epoch: str = None) -> str: + return self.get_sponsor_info(addr, epoch)['sponsorForGas'] + + def get_sponsor_for_collateral(self, addr: str, epoch: str = None) -> str: + return self.get_sponsor_info(addr, epoch)['sponsorForCollateral'] + + def get_sponsor_balance_for_collateral(self, addr: str, epoch: str = None) -> int: + return int(self.get_sponsor_info(addr, epoch)['sponsorBalanceForCollateral'], 0) + + def get_sponsor_balance_for_gas(self, addr: str, epoch: str = None) -> int: + return int(self.get_sponsor_info(addr, epoch)['sponsorBalanceForGas'], 0) + + def get_sponsor_gas_bound(self, addr: str, epoch: str = None) -> int: + return int(self.get_sponsor_info(addr, epoch)['sponsorGasBound'], 0) + + def get_unused_storage_points(self, addr: str, epoch: str = None) -> int: + return int(self.get_sponsor_info(addr, epoch)['availableStoragePoints'], 0) + + def get_used_storage_points(self, addr: str, epoch: str = None) -> int: + return int(self.get_sponsor_info(addr, epoch)['usedStoragePoints'], 0) + + def get_admin(self, addr: str, epoch: str = None) -> str: + addr = hex_to_b32_address(addr) + if epoch is None: + r = self.node.cfx_getAdmin(addr) + else: + r = self.node.cfx_getAdmin(addr, epoch) + return b32_address_to_hex(r) + + ''' Use the first but not None parameter and ignore the others ''' + + def get_nonce(self, addr: str, epoch: str = None, block_hash: str = None, block_object: dict = None) -> int: + addr = hex_to_b32_address(addr) + if block_hash: + block_hash = "hash:" + block_hash + block_param = epoch or block_hash or block_object + if block_param: + return int(self.node.cfx_getNextNonce(addr, block_param), 0) + else: + return int(self.node.cfx_getNextNonce(addr), 0) + + def send_raw_tx(self, raw_tx: str, wait_for_catchup=True) -> str: + # We wait for the node out of the catch up mode first + if wait_for_catchup: + self.node.wait_for_phase(["NormalSyncPhase"]) + tx_hash = self.node.cfx_sendRawTransaction(raw_tx) + assert_is_hash_string(tx_hash) + return tx_hash + + def clear_tx_pool(self): + self.node.debug_clearTxPool() + + # a temporary patch for transaction compatibility + def send_tx(self, tx: Union[Transaction, SignedTransaction], wait_for_receipt=False, wait_for_catchup=True) -> str: + if isinstance(tx, SignedTransaction): + encoded = cast(str, tx.raw_transaction.to_0x_hex()) + else: + encoded = eth_utils.encode_hex(rlp.encode(tx)) + tx_hash = self.send_raw_tx(encoded, wait_for_catchup=wait_for_catchup) + + if wait_for_receipt: + self.wait_for_receipt(tx_hash) + + return tx_hash + + def send_usable_genesis_accounts(self, account_start_index: int): + self.node.test_sendUsableGenesisAccounts(account_start_index) + + def wait_for_receipt(self, tx_hash: str, num_txs=1, timeout=10, state_before_wait=True): + if state_before_wait: + self.generate_blocks_to_state(num_txs=num_txs) + + def check_tx(): + self.generate_block(num_txs) + return checktx(self.node, tx_hash) + + try: + wait_until(check_tx, timeout=timeout) + except Exception as e: + if self.log is not None: + sender = self.node.cfx_getTransactionByHash(tx_hash)["from"] + self.log.info("wait_for_receipt: pending=%s", self.node.cfx_getAccountPendingTransactions(sender)) + raise e + + def block_by_hash(self, block_hash: str, include_txs: bool = False) -> dict: + block = self.node.cfx_getBlockByHash(block_hash, include_txs) + convert_b32_address_field_to_hex(block, "miner") + return block + + def block_by_hash_with_pivot_assumption(self, block_hash: str, pivot_hash: str, epoch: str) -> dict: + block = self.node.cfx_getBlockByHashWithPivotAssumption(block_hash, pivot_hash, epoch) + convert_b32_address_field_to_hex(block, "miner") + return block + + def block_by_epoch(self, epoch: Union[str, int], include_txs: bool = False) -> dict: + if type(epoch) is int: + epoch = hex(epoch) + + block = self.node.cfx_getBlockByEpochNumber(epoch, include_txs) + convert_b32_address_field_to_hex(block, "miner") + return block + + def block_by_block_number(self, block_number: Union[str, int], include_txs: bool = False) -> dict: + if type(block_number) is int: + block_number = hex(block_number) + + block = self.node.cfx_getBlockByBlockNumber(block_number, include_txs) + convert_b32_address_field_to_hex(block, "miner") + return block + + def best_block_hash(self) -> str: + return self.node.cfx_getBestBlockHash() + + def get_tx(self, tx_hash: str) -> dict: + tx = self.node.cfx_getTransactionByHash(tx_hash) + convert_b32_address_field_to_hex(tx, "from") + convert_b32_address_field_to_hex(tx, "to") + convert_b32_address_field_to_hex(tx, "contractCreated") + return tx + + def new_tx(self, *, sender=None, receiver=None, nonce=None, gas_price=1, gas=21000, value=100, data=b'', sign=True, + priv_key=None, storage_limit=None, epoch_height=None, chain_id=DEFAULT_PY_TEST_CHAIN_ID): + if priv_key is None: + priv_key = default_config["GENESIS_PRI_KEY"] + if sender is None: + sender = eth_utils.encode_hex(priv_to_addr(priv_key)) + + if receiver is None: + receiver = self.COINBASE_ADDR + + if nonce is None: + nonce = self.get_nonce(sender) + + if storage_limit is None: + storage_limit = 0 + + if epoch_height is None: + epoch_height = self.epoch_number() + + action = eth_utils.hexadecimal.decode_hex(receiver) + tx = UnsignedTransaction(nonce, gas_price, gas, action, value, data, storage_limit, epoch_height, chain_id) + + if sign: + return tx.sign(priv_key) + else: + return tx + + def new_typed_tx(self, *, type_=2, receiver=None, nonce=None, max_fee_per_gas=None,max_priority_fee_per_gas=0, access_list=[], gas=21000, value=100, data=b'', + priv_key=None, storage_limit=0, epoch_height=None, chain_id=DEFAULT_PY_TEST_CHAIN_ID + ) -> SignedTransaction: + + if priv_key: + acct = CfxAccount.from_key(priv_key, DEFAULT_PY_TEST_CHAIN_ID) + else: + acct = CfxAccount.from_key(default_config["GENESIS_PRI_KEY"], DEFAULT_PY_TEST_CHAIN_ID) + if receiver is None: + receiver = self.COINBASE_ADDR + tx = {} + tx["type"] = type_ + tx["gas"] = gas + tx["storageLimit"] = storage_limit + tx["value"] = value + tx["data"] = data + tx["maxPriorityFeePerGas"] = max_priority_fee_per_gas + tx["chainId"] = chain_id + tx["to"] = receiver + + if nonce is None: + nonce = self.get_nonce(acct.hex_address) + tx["nonce"] = nonce + + if access_list != []: + def format_access_list(a_list): + rtn = [] + for item in a_list: + rtn.append({"address": item['address'], "storageKeys": item['storage_keys']}) + + access_list = format_access_list(access_list) + tx["accessList"] = access_list + + if epoch_height is None: + epoch_height = self.epoch_number() + tx["epochHeight"] = epoch_height + + # ensuring transaction can be sent + if max_fee_per_gas is None: + max_fee_per_gas = self.base_fee_per_gas('latest_mined') + 1 + tx["maxFeePerGas"] = max_fee_per_gas + return acct.sign_transaction(tx) + + def new_contract_tx(self, receiver: Optional[str], data_hex: str = None, sender=None, priv_key=None, nonce=None, + gas_price=1, + gas=CONTRACT_DEFAULT_GAS, value=0, storage_limit=0, epoch_height=0, + chain_id=DEFAULT_PY_TEST_CHAIN_ID): + if priv_key is None: + priv_key = default_config["GENESIS_PRI_KEY"] + elif priv_key == -1: + priv_key = default_config["GENESIS_PRI_KEY_2"] + + if sender is None: + sender = encode_hex(priv_to_addr(priv_key)) + + if nonce is None: + nonce = self.get_nonce(sender) + elif type(nonce) is str: + nonce = int(nonce, 0) + + if receiver is not None: + action = eth_utils.hexadecimal.decode_hex(receiver) + else: + action = b'' + if data_hex is None: + data_hex = "0x" + data = eth_utils.hexadecimal.decode_hex(data_hex) + + if type(gas) is str: + gas = int(gas, 0) + + if type(storage_limit) is str: + storage_limit = int(storage_limit, 0) + + tx = UnsignedTransaction(nonce, gas_price, gas, action, value, data, storage_limit, epoch_height, chain_id) + + return tx.sign(priv_key) + + def block_hashes_by_epoch(self, epoch: str) -> list: + blocks = self.node.cfx_getBlocksByEpoch(epoch) + for b in blocks: + assert_is_hash_string(b) + return blocks + + def get_peers(self) -> list: + return self.node.test_getPeerInfo() + + def get_peer(self, node_id: str): + for p in self.get_peers(): + if p["nodeid"] == node_id: + return p + + return None + + def get_node(self, node_id: str): + return self.node.debug_getNetNode(node_id) + + def add_node(self, node_id: str, ip: str, port: int): + self.node.test_addNode(node_id, "{}:{}".format(ip, port)) + + def disconnect_peer(self, node_id: str, node_op: str = None) -> int: + return self.node.debug_disconnectNetNode(node_id, node_op) + + def chain(self) -> list: + return self.node.test_getChain() + + def get_transaction_receipt(self, tx_hash: str) -> dict[str, Any]: + assert_is_hash_string(tx_hash) + r = self.node.cfx_getTransactionReceipt(tx_hash) + if r is None: + return None + + convert_b32_address_field_to_hex(r, "contractCreated") + convert_b32_address_field_to_hex(r, "from") + convert_b32_address_field_to_hex(r, "to") + + if "storageCollateralized" in r: + r["storageCollateralized"] = int(r["storageCollateralized"], 0) + + if "storageReleased" in r: + storage_released = { b32_address_to_hex(item["address"]): int(item["collaterals"], 0) for item in r["storageReleased"] } + r["storageReleased"] = storage_released + return r + + def txpool_status(self) -> (int, int): + status = self.node.txpool_status() + return (eth_utils.to_int(hexstr=status["deferred"]), eth_utils.to_int(hexstr=status["ready"])) + + def new_tx_for_call(self, contract_addr: str = None, data_hex: str = None, nonce: int = None, sender: str = None): + if sender is None: + sender = self.GENESIS_ADDR + if nonce is None: + nonce = self.get_nonce(sender) + if data_hex is None: + data_hex = "0x" + sender = hex_to_b32_address(sender) + if contract_addr is not None: + contract_addr = hex_to_b32_address(contract_addr) + + return { + "hash": "0x" + "0" * 64, + "nonce": hex(nonce), + "from": sender, + "to": contract_addr, + "value": hex(0), + "gasPrice": hex(1), + "gas": hex(CONTRACT_DEFAULT_GAS), + "data": data_hex, + "v": hex(0), + "r": hex(0), + "s": hex(0), + } + + def estimate_gas(self, contract_addr: str = None, data_hex: str = None, sender: str = None, + nonce: int = None) -> int: + tx = self.new_tx_for_call(contract_addr, data_hex, sender=sender, nonce=nonce) + response = self.node.cfx_estimateGasAndCollateral(tx) + return int(response['gasUsed'], 0) + + def estimate_collateral(self, contract_addr: str = None, data_hex: str = None, sender: str = None, + nonce: int = None) -> int: + tx = self.new_tx_for_call(contract_addr, data_hex, sender=sender, nonce=nonce) + if contract_addr == "0x": + del tx['to'] + if sender is None: + del tx['from'] + response = self.node.cfx_estimateGasAndCollateral(tx) + return response['storageCollateralized'] + + def check_balance_against_transaction(self, account_addr: str, contract_addr: str, gas_limit: int, gas_price: int, + storage_limit: int) -> dict: + account_addr = hex_to_b32_address(account_addr) + contract_addr = hex_to_b32_address(contract_addr) + return self.node.cfx_checkBalanceAgainstTransaction(account_addr, contract_addr, hex(gas_limit), hex(gas_price), + hex(storage_limit)) + + def call(self, contract_addr: str, data_hex: str, nonce=None, epoch: str = None, sender: str = None) -> str: + tx = self.new_tx_for_call(contract_addr, data_hex, nonce=nonce, sender=sender) + if epoch is None: + return self.node.cfx_call(tx) + else: + return self.node.cfx_call(tx, epoch) + + def get_supply_info(self, epoch: str = None): + if epoch is None: + return self.node.cfx_getSupplyInfo() + else: + return self.node.cfx_getSupplyInfo(epoch) + + def get_collateral_info(self, epoch: str = None): + if epoch is None: + return self.node.cfx_getCollateralInfo() + else: + return self.node.cfx_getCollateralInfo(epoch) + + def get_params_from_vote(self, epoch: str = None): + if epoch is None: + return self.node.cfx_getParamsFromVote() + else: + return self.node.cfx_getParamsFromVote(epoch) + + def get_block_count(self): + return self.node.test_getBlockCount() + + def get_account(self, addr: str, epoch: str = None): + addr = hex_to_b32_address(addr) + account = self.node.cfx_getAccount(addr, epoch) + convert_b32_address_field_to_hex(account, "admin") + return account + + def get_accumulate_interest_rate(self, epoch: str = None): + return self.node.cfx_getAccumulateInterestRate(epoch) + + def get_interest_rate(self, epoch: str = None): + return self.node.cfx_getInterestRate(epoch) + + def get_node_id(self): + challenge = random.randint(0, 2 ** 32 - 1) + signature = self.node.test_getNodeId(list(int_to_bytes(challenge))) + node_id, _, _ = convert_to_nodeid(signature, challenge) + return node_id + + def current_sync_phase(self): + return self.node.debug_currentSyncPhase() + + def get_status(self): + return self.node.cfx_getStatus() + + def get_block_trace(self, block_hash: str): + return self.node.trace_block(block_hash) + + def get_transaction_trace(self, tx_hash: str): + return self.node.trace_transaction(tx_hash) + + def filter_trace(self, filter: dict): + return self.node.trace_filter(filter) + + def fee_history(self, epoch_count: int, last_epoch: Union[int, str], reward_percentiles: Optional[list[float]]=None) -> CfxFeeHistoryResponse: + if reward_percentiles is None: + reward_percentiles = [50] + if isinstance(last_epoch, int): + last_epoch = hex(last_epoch) + rtn = self.node.cfx_feeHistory(hex(epoch_count), last_epoch, reward_percentiles) + rtn[ + 'baseFeePerGas' + ] = [ int(v, 16) for v in rtn['baseFeePerGas'] ] + return rtn + + + def wait_for_pos_register(self, priv_key=None, stake_value=2_000_000, voting_power=None, legacy=True, should_fail=False): + if priv_key is None: + priv_key = self.node.pow_sk + if voting_power is None: + voting_power = stake_value // default_config["POS_VOTE_COUNT"] + address = eth_utils.encode_hex(priv_to_addr(priv_key)) + initial_tx = self.new_tx(receiver=address, value=(stake_value + 20) * 10 ** 18) + self.send_tx(initial_tx, wait_for_receipt=True) + stake_tx = self.new_tx(priv_key=priv_key, data=stake_tx_data(stake_value), value=0, + receiver="0x0888000000000000000000000000000000000002", gas=CONTRACT_DEFAULT_GAS) + self.send_tx(stake_tx, wait_for_receipt=True) + data, pos_identifier = self.node.test_posRegister(int_to_hex(voting_power), 0 if legacy else 1) + register_tx = self.new_tx(priv_key=priv_key, data=eth_utils.hexadecimal.decode_hex(data), value=0, + receiver="0x0888000000000000000000000000000000000005", gas=CONTRACT_DEFAULT_GAS, + storage_limit=1024) + register_tx_hash = self.send_tx(register_tx, wait_for_receipt=True) + assert_equal( + int(self.node.cfx_getTransactionReceipt(register_tx_hash)["outcomeStatus"], 0), + 1 if should_fail else 0 + ) + return pos_identifier, priv_key + + def wait_for_unstake(self, priv_key=None, unstake_value=2_000_000): + if priv_key is None: + priv_key = self.node.pow_sk + unstake_tx = self.new_tx(priv_key=priv_key, data=unstake_tx_data(unstake_value), value=0, + receiver="0x0888000000000000000000000000000000000002", gas=CONTRACT_DEFAULT_GAS) + self.send_tx(unstake_tx, wait_for_receipt=True) + + def pos_retire_self(self, unlock_vote: int): + retire_tx = self.new_tx(priv_key=self.node.pow_sk, data=retire_tx_data(unlock_vote), value=0, + receiver="0x0888000000000000000000000000000000000005", gas=6_000_000) + self.send_tx(retire_tx, wait_for_receipt=True) + + def pos_get_consensus_blocks(self): + return self.node.pos_getConsensusBlocks() + + def pos_status(self): + return self.node.pos_getStatus() + + def pos_get_block(self, block): + if isinstance(block, str) and len(block) == 34: + return self.node.pos_getBlockByHash(block) + else: + if isinstance(block, int): + block = int_to_hex(block) + return self.node.pos_getBlockByNumber(block) + + def pos_proposal_timeout(self): + return self.node.test_posTriggerTimeout("proposal") + + def pos_local_timeout(self): + return self.node.test_posTriggerTimeout("local") + + def pos_new_round_timeout(self): + return self.node.test_posTriggerTimeout("new_round") + + def pos_force_sign_pivot_decision(self, block_hash, height): + return self.node.test_posForceSignPivotDecision(block_hash, height) + + def pos_get_chosen_proposal(self): + return self.node.test_posGetChosenProposal() + + def pos_get_account(self, account_address, view=None): + if view is None: + return self.node.pos_getAccount(account_address) + else: + return self.node.pos_getAccount(account_address, view) + + def pos_get_account_by_pow_address(self, address, view=None): + address = hex_to_b32_address(address) + if view is None: + return self.node.pos_getAccountByPowAddress(address) + else: + return self.node.pos_getAccountByPowAddress(address, view) + + +def stake_tx_data(staking_value: int): + staking_contract_dict = json.loads( + open(os.path.join(file_dir, "../../internal_contract/metadata/Staking.json"), "r").read()) + staking_contract = get_contract_instance(contract_dict=staking_contract_dict) + return get_contract_function_data(staking_contract, "deposit", args=[staking_value * 10 ** 18]) + + +def unstake_tx_data(unstaking_value: int): + staking_contract_dict = json.loads( + open(os.path.join(file_dir, "../../internal_contract/metadata/Staking.json"), "r").read()) + staking_contract = get_contract_instance(contract_dict=staking_contract_dict) + return get_contract_function_data(staking_contract, "withdraw", args=[unstaking_value * 10 ** 18]) + + +def retire_tx_data(unlock_vote: int): + register_contract_dict = json.loads( + open(os.path.join(file_dir, "../../internal_contract/metadata/PoSRegister.json"), "r").read()) + register_contract = get_contract_instance(contract_dict=register_contract_dict) + return get_contract_function_data(register_contract, "retire", args=[unlock_vote]) + + +def lock_tx_data(locked_value: int, unlock_block_number: int): + staking_contract_dict = json.loads( + open(os.path.join(file_dir, "../../internal_contract/metadata/Staking.json"), "r").read()) + staking_contract = get_contract_instance(contract_dict=staking_contract_dict) + return get_contract_function_data(staking_contract, "voteLock", args=[locked_value * 10 ** 18, unlock_block_number]) + + +def get_contract_function_data(contract, name, args): + func = getattr(contract.functions, name) + attrs = { + **REQUEST_BASE, + } + tx_data = func(*args).build_transaction(attrs) + return eth_utils.hexadecimal.decode_hex(tx_data['data']) diff --git a/integration_tests/conflux/transactions.py b/integration_tests/conflux/transactions.py new file mode 100644 index 0000000000..4203a7a2ac --- /dev/null +++ b/integration_tests/conflux/transactions.py @@ -0,0 +1,223 @@ +import copy + +import eth_utils +import rlp +# import sender as sender +from rlp.sedes import big_endian_int, binary + +from .exceptions import InvalidTransaction +from . import utils +from .utils import TT256, mk_contract_address, zpad, int_to_32bytearray, \ + big_endian_to_int, ecsign, ecrecover_to_pub, normalize_key, str_to_bytes, \ + encode_hex, address + +CONTRACT_DEFAULT_GAS = 3_000_000 +COLLATERAL_UNIT_IN_DRIP = 10 ** 18 // 1024 + +def charged_of_huge_gas(gas): + return gas - gas // 4 + + +class EthLikeUnsignedTransaction(rlp.Serializable): + fields = [ + ('nonce', big_endian_int), + ('gas_price', big_endian_int), + ('gas', big_endian_int), + ('action', address), + ('value', big_endian_int), + ('data', binary), + ('chain_id', big_endian_int), + ('reserved1', binary), + ('reserved2', binary) + ] + + def __init__(self, unsigned_tx): + super(EthLikeUnsignedTransaction, self).__init__( + nonce=unsigned_tx.nonce, + gas_price=unsigned_tx.gas_price, + gas=unsigned_tx.gas, + value=unsigned_tx.value, + action=unsigned_tx.action, + data=unsigned_tx.data, + chain_id=unsigned_tx.chain_id, + reserved1=b"", + reserved2=b"", + ) + + +class UnsignedTransaction(rlp.Serializable): + fields = [ + ('nonce', big_endian_int), + ('gas_price', big_endian_int), + ('gas', big_endian_int), + ('action', address), + ('value', big_endian_int), + ('storage_limit', big_endian_int), + ('epoch_height', big_endian_int), + ('chain_id', big_endian_int), + ('data', binary), + ] + + def __init__(self, nonce, gas_price, gas, action, value, data, storage_limit, epoch_height, chain_id): + if gas_price >= TT256 or \ + value >= TT256 or nonce >= TT256: + raise InvalidTransaction("Values way too high!") + + self.eth_like = (epoch_height == 0xffff_ffff_ffff_ffff) + + super(UnsignedTransaction, self).__init__( + nonce=nonce, + gas_price=gas_price, + gas=gas, + value=value, + action=action, + data=data, + storage_limit=storage_limit, + epoch_height=epoch_height, + chain_id=chain_id + ) + + def get_rawhash(self): + if not self.eth_like: + return utils.sha3( + rlp.encode(self, UnsignedTransaction)) + else: + eth_like_tx = EthLikeUnsignedTransaction(self) + return utils.sha3( + rlp.encode(eth_like_tx, EthLikeUnsignedTransaction)) + + def sign(self, key): + rawhash = self.get_rawhash() + + key = normalize_key(key) + + v, r, s = ecsign(rawhash, key) + v = v - 27 + ret = Transaction(transaction=copy.deepcopy(self), v=v, r=r, s=s) + ret._sender = utils.priv_to_addr(key) + return ret + +class UnsignedTransaction1559(rlp.Serializable): + fields = [ + ('nonce', big_endian_int), + ('max_priority_fee_per_gas', big_endian_int), + ('max_fee_per_gas', big_endian_int), + ('gas', big_endian_int), + ('action', address), + ('value', big_endian_int), + ('storage_limit', big_endian_int), + ('epoch_height', big_endian_int), + ('chain_id', big_endian_int), + ('data', binary), + ] + + def __init__(self, nonce, gas_price, gas, action, value, data, storage_limit, epoch_height, chain_id): + if gas_price >= TT256 or \ + value >= TT256 or nonce >= TT256: + raise InvalidTransaction("Values way too high!") + + self.eth_like = (epoch_height == 0xffff_ffff_ffff_ffff) + + super(UnsignedTransaction1559, self).__init__( + nonce=nonce, + gas_price=gas_price, + gas=gas, + value=value, + action=action, + data=data, + storage_limit=storage_limit, + epoch_height=epoch_height, + chain_id=chain_id + ) + + @property + def gas_price(self): + return self.max + + def get_rawhash(self): + if not self.eth_like: + return utils.sha3( + rlp.encode(self, UnsignedTransaction)) + else: + eth_like_tx = EthLikeUnsignedTransaction(self) + return utils.sha3( + rlp.encode(eth_like_tx, EthLikeUnsignedTransaction)) + + def sign(self, key): + rawhash = self.get_rawhash() + + key = normalize_key(key) + + v, r, s = ecsign(rawhash, key) + v = v - 27 + ret = Transaction(transaction=copy.deepcopy(self), v=v, r=r, s=s) + ret._sender = utils.priv_to_addr(key) + return ret + +class Transaction(rlp.Serializable): + """ + A transaction is stored as: + [[nonce, gas_price, gas, action, value, storage_limit, epoch_height, chain_id, data], v, r, s] + + nonce is the number of transactions already sent by that account, encoded + in binary form (eg. 0 -> '', 7 -> '\x07', 1000 -> '\x03\xd8'). + + (v,r,s) is the raw Electrum-style signature of the transaction without the + signature made with the private key corresponding to the sending account, + with 0 <= v <= 1. From an Electrum-style signature (65 bytes) it is + possible to extract the public key, and thereby the address, directly. + + A valid transaction is one where: + (i) the signature is well-formed, and + (ii) the sending account has enough funds to pay the fee and the value. + """ + + fields = [ + ('transaction', UnsignedTransaction), + ('v', big_endian_int), + ('r', big_endian_int), + ('s', big_endian_int), + ] + + _sender = None + + @property + def sender(self): + return self._sender + + @sender.setter + def sender(self, value): + self._sender = value + + @property + def hash(self): + return utils.sha3(rlp.encode(self)) + + def hash_hex(self): + return eth_utils.encode_hex(self.hash) + + def to_dict(self): + d = {} + for name, _ in self.__class__._meta.fields: + d[name] = getattr(self, name) + d['sender'] = '0x' + encode_hex(self.sender) + d['hash'] = '0x' + encode_hex(self.hash) + return d + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.hash == other.hash + + def __lt__(self, other): + return isinstance(other, self.__class__) and self.hash < other.hash + + def __hash__(self): + return utils.big_endian_to_int(self.hash) + + def __ne__(self, other): + return not self.__eq__(other) + + def __repr__(self): + return '' % encode_hex(self.hash)[:4] + + def __getattr__(self, item): + return getattr(self.transaction, item) diff --git a/integration_tests/conflux/trie.py b/integration_tests/conflux/trie.py new file mode 100644 index 0000000000..cfd8698b59 --- /dev/null +++ b/integration_tests/conflux/trie.py @@ -0,0 +1,50 @@ +from . import utils +from eth_utils import decode_hex +import rlp + +BLANK_NODE = b'' +BLANK_ROOT = utils.sha3rlp(b'') +NULL_ROOT = utils.sha3(b'') +# The receipt root of the block itself is KECCAK_EMPTY, however the +# epoch RECEIPT_ROOT is the Merkle Root of the MPT with a single +# key value of (0, KECCAK_EMPTY). +EMPTY_BLOCK_RECEIPT_ROOT = utils.sha3(b'n' + NULL_ROOT * 16 + b'v' + NULL_ROOT) + + +def state_root( + snapshot_root = NULL_ROOT, + intermediate_delta_root = NULL_ROOT, + delta_root = NULL_ROOT): + return [snapshot_root, intermediate_delta_root, delta_root] + + +def precompute_epoch_receipt_root_by_number_of_blocks(): + receipt_root_by_number_of_blocks = [] + + # 1 block is a special case for the Receipt Root MPT. + path_bytes = bytearray() + path_bytes.extend([128 + 64 + 1, 0]) + epoch_receipt_root_one_block_path_merkle = utils.sha3( + bytes(path_bytes) + EMPTY_BLOCK_RECEIPT_ROOT) + receipt_root_by_number_of_blocks.append( + epoch_receipt_root_one_block_path_merkle + ) + for number_of_blocks in range(2, 16): + epoch_receipt_root = utils.sha3( + b'n' + EMPTY_BLOCK_RECEIPT_ROOT * number_of_blocks \ + + NULL_ROOT * (16 - number_of_blocks)) + receipt_root_by_number_of_blocks.append(epoch_receipt_root) + + return receipt_root_by_number_of_blocks + + +def compute_transaction_root_for_single_transaction(tx_hash): + node_hash = utils.sha3(b'n' + NULL_ROOT * 16 + b'v' + tx_hash) + path_bytes = bytearray() + path_bytes.extend([128 + 64 + 1, 0]) + return utils.sha3( + bytes(path_bytes) + node_hash) + + +UNINITIALIZED_STATE_ROOT = utils.sha3(rlp.encode(state_root())) +EMPTY_EPOCH_RECEIPT_ROOT_BY_NUMBER_OF_BLOCKS = precompute_epoch_receipt_root_by_number_of_blocks() diff --git a/integration_tests/conflux/utils.py b/integration_tests/conflux/utils.py new file mode 100644 index 0000000000..33bb36f584 --- /dev/null +++ b/integration_tests/conflux/utils.py @@ -0,0 +1,623 @@ +import re + +import sha3 as _sha3 +from py_ecc.secp256k1.secp256k1 import privtopub, ecdsa_raw_sign, ecdsa_raw_recover +import rlp +from rlp.sedes import big_endian_int, BigEndianInt, Binary +from eth_utils.hexadecimal import ( + encode_hex as encode_hex_0x, + decode_hex +) +from eth_utils.encoding import int_to_big_endian, big_endian_to_int +from rlp.utils import ALL_BYTES +import random +import coincurve + + +def sha3_256(x): return _sha3.keccak_256(x).digest() + + +class Memoize: + def __init__(self, fn): + self.fn = fn + self.memo = {} + + def __call__(self, *args): + if args not in self.memo: + self.memo[args] = self.fn(*args) + return self.memo[args] + + +TT256 = 2 ** 256 +TT256M1 = 2 ** 256 - 1 +TT255 = 2 ** 255 +SECP256K1P = 2 ** 256 - 4294968273 + + +def is_numeric(x): return isinstance(x, int) + + +def is_string(x): return isinstance(x, bytes) + + +def to_string(value): + if isinstance(value, bytes): + return value + if isinstance(value, str): + return bytes(value, 'utf-8') + if isinstance(value, int): + return bytes(str(value), 'utf-8') + + +def int_to_bytes(value): + if isinstance(value, bytes): + return value + return int_to_big_endian(value) + + +def to_string_for_regexp(value): + return str(to_string(value), 'utf-8') + + +unicode = str + + +def bytearray_to_bytestr(value): + return bytes(value) + + +def encode_int32(v): + return v.to_bytes(32, byteorder='big') + + +def bytes_to_int(value): + return int.from_bytes(value, byteorder='big') + + +def str_to_bytes(value): + if isinstance(value, bytearray): + value = bytes(value) + if isinstance(value, bytes): + return value + return bytes(value, 'utf-8') + + +def ascii_chr(n): + return ALL_BYTES[n] + + +def encode_hex(n): + if isinstance(n, str): + return encode_hex(n.encode('ascii')) + return encode_hex_0x(n)[2:] + + +def ecrecover_to_pub(rawhash, v, r, s): + if coincurve and hasattr(coincurve, "PublicKey"): + try: + pk = coincurve.PublicKey.from_signature_and_message( + zpad(bytearray_to_bytestr(int_to_32bytearray(r)), 32) + zpad( + bytearray_to_bytestr(int_to_32bytearray(s)), 32) + + ascii_chr(v - 27), + rawhash, + hasher=None, + ) + pub = pk.format(compressed=False)[1:] + x, y = pk.point() + except BaseException: + x, y = 0, 0 + pub = b"\x00" * 64 + else: + result = ecdsa_raw_recover(rawhash, (v, r, s)) + if result: + x, y = result + pub = encode_int32(x) + encode_int32(y) + else: + raise ValueError('Invalid VRS') + assert len(pub) == 64 + return pub, x, y + + +def ecsign(rawhash, key): + if coincurve and hasattr(coincurve, 'PrivateKey'): + pk = coincurve.PrivateKey(key) + signature = pk.sign_recoverable(rawhash, hasher=None) + v = safe_ord(signature[64]) + 27 + r = big_endian_to_int(signature[0:32]) + s = big_endian_to_int(signature[32:64]) + else: + v, r, s = ecdsa_raw_sign(rawhash, key) + return v, r, s + + +def ec_random_keys(): + priv_key = random.randint(0, 2 ** 256).to_bytes(32, "big") + pub_key = privtopub(priv_key) + return priv_key, pub_key + + +def convert_to_nodeid(signature, challenge): + r = big_endian_to_int(signature[:32]) + s = big_endian_to_int(signature[32:64]) + v = big_endian_to_int(signature[64:]) + 27 + signed = int_to_bytes(challenge) + h_signed = sha3_256(signed) + return ecrecover_to_pub(h_signed, v, r, s) + + +def get_nodeid(node): + challenge = random.randint(0, 2**32-1) + signature = node.test_getNodeId(list(int_to_bytes(challenge))) + return convert_to_nodeid(signature, challenge) + + +def mk_contract_address(sender, nonce): + return sha3(rlp.encode([normalize_address(sender), nonce]))[12:] + + +def mk_metropolis_contract_address(sender, initcode): + return sha3(normalize_address(sender) + initcode)[12:] + + +def safe_ord(value): + if isinstance(value, int): + return value + else: + return ord(value) + + +# decorator + + +def debug(label): + def deb(f): + def inner(*args, **kwargs): + i = random.randrange(1000000) + print(label, i, 'start', args) + x = f(*args, **kwargs) + print(label, i, 'end', x) + return x + + return inner + + return deb + + +def flatten(li): + o = [] + for l in li: + o.extend(l) + return o + + +def bytearray_to_int(arr): + o = 0 + for a in arr: + o = (o << 8) + a + return o + + +def int_to_32bytearray(i): + o = [0] * 32 + for x in range(32): + o[31 - x] = i & 0xff + i >>= 8 + return o + + +# sha3_count = [0] + + +def sha3(seed): + return sha3_256(to_string(seed)) + + +assert encode_hex(sha3(b'')) == 'c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470' +assert encode_hex(sha3(b'\x00' * 256)) == 'd397b3b043d87fcd6fad1291ff0bfd16401c274896d8c63a923727f077b8e0b5' + + +@Memoize +def priv_to_addr(k): + k = normalize_key(k) + x, y = privtopub(k) + addr = bytearray(sha3(encode_int32(x) + encode_int32(y))[12:]) + addr[0] &= 0x0f + addr[0] |= 0x10 + return bytes(addr) + + +def priv_to_pub(k): + k = normalize_key(k) + x, y = privtopub(k) + return bytes(encode_int32(x) + encode_int32(y)) + + +def pub_to_addr(k): + x = big_endian_to_int(decode_hex(k[2:34])) + y = big_endian_to_int(decode_hex(k[34:66])) + addr = sha3(encode_int32(x) + encode_int32(y))[12:] + addr[0] &= 0x0f + addr[0] |= 0x10 + return bytes(addr) + +def checksum_encode(addr): # Takes a 20-byte binary address as input + addr = normalize_address(addr) + o = '' + v = big_endian_to_int(sha3(encode_hex(addr))) + for i, c in enumerate(encode_hex(addr)): + if c in '0123456789': + o += c + else: + o += c.upper() if (v & (2 ** (255 - 4 * i))) else c.lower() + return '0x' + o + + +def check_checksum(addr): + return checksum_encode(normalize_address(addr)) == addr + + +def normalize_address(x, allow_blank=False): + if is_numeric(x): + return int_to_addr(x) + if allow_blank and x in {'', b''}: + return b'' + if len(x) in (42, 50) and x[:2] in {'0x', b'0x'}: + x = x[2:] + if len(x) in (40, 48): + x = decode_hex(x) + if len(x) == 24: + assert len(x) == 24 and sha3(x[:20])[:4] == x[-4:] + x = x[:20] + if len(x) != 20: + raise Exception("Invalid address format: %r" % x) + return x + + +def normalize_key(key): + if is_numeric(key): + o = encode_int32(key) + elif len(key) == 32: + o = key + elif len(key) == 64: + o = decode_hex(key) + elif len(key) == 66 and key[:2] == '0x': + o = decode_hex(key[2:]) + else: + raise Exception("Invalid key format: %r" % key) + if o == b'\x00' * 32: + raise Exception("Zero privkey invalid") + return o + + +def zpad(x, l): + """ Left zero pad value `x` at least to length `l`. + + >>> zpad('', 1) + '\x00' + >>> zpad('\xca\xfe', 4) + '\x00\x00\xca\xfe' + >>> zpad('\xff', 1) + '\xff' + >>> zpad('\xca\xfe', 2) + '\xca\xfe' + """ + return b'\x00' * max(0, l - len(x)) + x + + +def rzpad(value, total_length): + """ Right zero pad value `x` at least to length `l`. + + >>> zpad('', 1) + '\x00' + >>> zpad('\xca\xfe', 4) + '\xca\xfe\x00\x00' + >>> zpad('\xff', 1) + '\xff' + >>> zpad('\xca\xfe', 2) + '\xca\xfe' + """ + return value + b'\x00' * max(0, total_length - len(value)) + + +def int_to_addr(x): + o = [b''] * 20 + for i in range(20): + o[19 - i] = ascii_chr(x & 0xff) + x >>= 8 + return b''.join(o) + + +def coerce_addr_to_bin(x): + if is_numeric(x): + return encode_hex(zpad(big_endian_int.serialize(x), 20)) + elif len(x) == 40 or len(x) == 0: + return decode_hex(x) + else: + return zpad(x, 20)[-20:] + + +def coerce_addr_to_hex(x): + if is_numeric(x): + return encode_hex(zpad(big_endian_int.serialize(x), 20)) + elif len(x) == 40 or len(x) == 0: + return x + else: + return encode_hex(zpad(x, 20)[-20:]) + + +def coerce_to_int(x): + if is_numeric(x): + return x + elif len(x) == 40: + return big_endian_to_int(decode_hex(x)) + else: + return big_endian_to_int(x) + + +def coerce_to_bytes(x): + if is_numeric(x): + return big_endian_int.serialize(x) + elif len(x) == 40: + return decode_hex(x) + else: + return x + + +def parse_int_or_hex(s): + if is_numeric(s): + return s + elif s[:2] in (b'0x', '0x'): + s = to_string(s) + tail = (b'0' if len(s) % 2 else b'') + s[2:] + return big_endian_to_int(decode_hex(tail)) + else: + return int(s) + + +def ceil32(x): + return x if x % 32 == 0 else x + 32 - (x % 32) + + +def to_signed(i): + return i if i < TT255 else i - TT256 + + +def sha3rlp(x): + return sha3(rlp.encode(x)) + + +# Format encoders/decoders for bin, addr, int + + +def decode_bin(v): + """decodes a bytearray from serialization""" + if not is_string(v): + raise Exception("Value must be binary, not RLP array") + return v + + +def decode_addr(v): + """decodes an address from serialization""" + if len(v) not in [0, 20]: + raise Exception("Serialized addresses must be empty or 20 bytes long!") + return encode_hex(v) + + +def decode_int(v): + """decodes and integer from serialization""" + if len(v) > 0 and (v[0] == b'\x00' or v[0] == 0): + raise Exception("No leading zero bytes allowed for integers") + return big_endian_to_int(v) + + +def decode_int256(v): + return big_endian_to_int(v) + + +def encode_bin(v): + """encodes a bytearray into serialization""" + return v + + +def encode_root(v): + """encodes a trie root into serialization""" + return v + + +def encode_int(v): + """encodes an integer into serialization""" + if not is_numeric(v) or v < 0 or v >= TT256: + raise Exception("Integer invalid or out of range: %r" % v) + return int_to_big_endian(v) + + +def encode_int256(v): + return zpad(int_to_big_endian(v), 256) + + +def scan_bin(v): + if v[:2] in ('0x', b'0x'): + return decode_hex(v[2:]) + else: + return decode_hex(v) + + +def scan_int(v): + if v[:2] in ('0x', b'0x'): + return big_endian_to_int(decode_hex(v[2:])) + else: + return int(v) + + +# Decoding from RLP serialization +decoders = { + "bin": decode_bin, + "addr": decode_addr, + "int": decode_int, + "int256b": decode_int256, +} + +# Encoding to RLP serialization +encoders = { + "bin": encode_bin, + "int": encode_int, + "trie_root": encode_root, + "int256b": encode_int256, +} + +# Encoding to printable format +printers = { + "bin": lambda v: '0x' + encode_hex(v), + "addr": lambda v: v, + "int": lambda v: to_string(v), + "trie_root": lambda v: encode_hex(v), + "int256b": lambda x: encode_hex(zpad(encode_int256(x), 256)) +} + +# Decoding from printable format +scanners = { + "bin": scan_bin, + "addr": lambda x: x[2:] if x[:2] in (b'0x', '0x') else x, + "int": scan_int, + "trie_root": lambda x: scan_bin, + "int256b": lambda x: big_endian_to_int(decode_hex(x)) +} + + +def int_to_hex(x): + o = encode_hex(encode_int(x)) + return '0x' + (o[1:] if (len(o) > 0 and o[0] == b'0') else o) + + +def remove_0x_head(s): + return s[2:] if s[:2] in (b'0x', '0x') else s + + +def parse_as_bin(s): + return decode_hex(s[2:] if s[:2] == '0x' else s) + + +def parse_as_int(s): + return s if is_numeric(s) else int( + '0' + s[2:], 16) if s[:2] == '0x' else int(s) + + +def print_func_call(ignore_first_arg=False, max_call_number=100): + """ utility function to facilitate debug, it will print input args before + function call, and print return value after function call + + usage: + + @print_func_call + def some_func_to_be_debu(): + pass + + :param ignore_first_arg: whether print the first arg or not. + useful when ignore the `self` parameter of an object method call + """ + from functools import wraps + + def display(x): + x = to_string(x) + try: + x.decode('ascii') + except BaseException: + return 'NON_PRINTABLE' + return x + + local = {'call_number': 0} + + def inner(f): + + @wraps(f) + def wrapper(*args, **kwargs): + local['call_number'] += 1 + tmp_args = args[1:] if ignore_first_arg and len(args) else args + this_call_number = local['call_number'] + print(('{0}#{1} args: {2}, {3}'.format( + f.__name__, + this_call_number, + ', '.join([display(x) for x in tmp_args]), + ', '.join(display(key) + '=' + to_string(value) + for key, value in kwargs.items()) + ))) + res = f(*args, **kwargs) + print(('{0}#{1} return: {2}'.format( + f.__name__, + this_call_number, + display(res)))) + + if local['call_number'] > 100: + raise Exception("Touch max call number!") + return res + + return wrapper + + return inner + + +def dump_state(trie): + res = '' + for k, v in list(trie.to_dict().items()): + res += '%r:%r\n' % (encode_hex(k), encode_hex(v)) + return res + + +class Denoms(): + + def __init__(self): + self.wei = 1 + self.babbage = 10 ** 3 + self.ada = 10 ** 3 + self.kwei = 10 ** 3 + self.lovelace = 10 ** 6 + self.mwei = 10 ** 6 + self.shannon = 10 ** 9 + self.gwei = 10 ** 9 + self.szabo = 10 ** 12 + self.finney = 10 ** 15 + self.mether = 10 ** 15 + self.ether = 10 ** 18 + self.turing = 2 ** 256 - 1 + + +denoms = Denoms() + +address = Binary.fixed_length(20, allow_empty=True) +int20 = BigEndianInt(20) +int32 = BigEndianInt(32) +int256 = BigEndianInt(256) +hash32 = Binary.fixed_length(32) +hash20 = Binary.fixed_length(20) +trie_root = Binary.fixed_length(32, allow_empty=True) + + +class bcolors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKGREEN = '\033[92m' + WARNING = '\033[91m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + +def to_snake(s): + return re.sub(r'(? Suppose rust binary is built, refer to [README.md](../README.md) for more details. + +### Python Environment Setup + +It is recommended to run tests under a virtual environment. For example, use `venv` or `conda` to create a virtual environment. + +Use `venv` to create a virtual environment: + +```bash +python3 -m venv .venv +source .venv/bin/activate +``` + +Use `conda` to create a virtual environment: + +```bash +conda create -n conflux_test python=3.10 +conda activate conflux_test +``` + +### Python Dependencies + +Use `dev-support/dep_pip3.sh` to install python dependencies. + +```bash +./dev-support/dep_pip3.sh +``` + +### Run Tests + +#### Command Line + +Run tests with `pytest`: + +```bash +pytest integration_tests/tests -vv -n logical --dist loadscope +``` + +> `-vv` is to show more logs. +> +> `-n logical` is to run tests in parallel. You can replace `logical` with `2` or more to run tests in parallel with the specified number of processes. +> +> `--dist loadscope` controls how tests are distributed. The provided configuration groups tests by their scope—functions within the same module or methods within the same test class—and assigns each group to a single worker. + +Use pytest options to filter tests: + +```bash +pytest integration_tests/tests -k test_name + +# Run tests in a specific file +pytest integration_tests/tests/test_file.py + +# Run tests in a specific class +pytest integration_tests/tests/test_file.py::test_name +``` + +#### VSCode + +Put the below configuration in `.vscode/settings.json`: + +```json +{ + "python.testing.pytestArgs": [ + "integration_tests/tests", + "-vv", + "-n", "logical", + "--dist", "loadscope", + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true +} +``` + +Then you can see the tests in VSCode test explorer. You can run tests by clicking the test name. + +## Add New Tests + +### Fixture Configuration + +Pytest fixtures are something setup before tests are run. You can find fixtures in the test files as well as `*/conftest.py`. + +> Refer to [pytest documentation](https://docs.pytest.org/en/latest/how-to/fixtures.html) for more details. + +In [global conftest.py](./tests/conftest.py), the `framework_class` fixture and `network` fixture are defined for the test framework setup. + +```python + +@pytest.fixture(scope="module") +def framework_class() -> Type[ConfluxTestFramework]: + class DefaultFramework(ConfluxTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.conf_parameters = { + "executive_trace": "true", + "public_rpc_apis": "\"cfx,debug,test,pubsub,trace\"", + # Disable 1559 for RPC tests temporarily + "cip1559_transition_height": str(99999999), + } + def setup_network(self): + self.setup_nodes() + self.rpc = RpcClient(self.nodes[0]) + return DefaultFramework + +@pytest.fixture(scope="module") +def network(framework_class: Type[ConfluxTestFramework], port_min: int, request: pytest.FixtureRequest): + try: + framework = framework_class(port_min) + except Exception as e: + pytest.fail(f"Failed to setup framework: {e}") + yield framework + framework.teardown(request) +``` + +In the tests, you can start by directly using the `network` fixture for the test framework setup. + +You can overwrite the `framework_class` fixture in the test file to customize the test framework setup. Check [cip137_test.py](./tests/cip137_test.py) for an example. + +#### Common Fixtures + +Basic Fixtures: + +1. **`framework_class`**: The test framework class used to configure test parameters and start the test network. If a custom test framework (e.g., with specific parameters) is needed, this fixture can be overridden. +2. **`network`**: An instance of the `framework_class`. + +Core Space Fixtures: + +1. **`cw3`**: An instance of `python-conflux-sdk`. +2. **`core_accounts`**: Core Space accounts with a predefined CFX balance, ready for sending transactions. +3. **`client`**: An instance of [`RpcClient`](./conflux/rpc.py) that wraps Core Space RPC interfaces for easier usage. + +eSpace Fixtures: + +1. **`ew3`**: An instance of `web3.py`. +2. **`evm_accounts`**: eSpace accounts with a predefined CFX balance, ready for sending transactions. + +### Add tests + +Create a new test file in the `tests` directory. The filename must include `test`, for example, `my_test.py`. Then, add test methods in the file with names that include `test`. + +```python +def test_hello(): + assert True +``` + +## FAQs diff --git a/integration_tests/test_framework/__init__.py b/integration_tests/test_framework/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration_tests/test_framework/authproxy.py b/integration_tests/test_framework/authproxy.py new file mode 100644 index 0000000000..4b64c7a330 --- /dev/null +++ b/integration_tests/test_framework/authproxy.py @@ -0,0 +1,185 @@ +# Copyright (c) 2011 Jeff Garzik +# +# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py: +# +# Copyright (c) 2007 Jan-Klaas Kollhof +# +# This file is part of jsonrpc. +# +# jsonrpc is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this software; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +"""HTTP proxy for opening RPC connection to conflux. + +AuthServiceProxy has the following improvements over python-jsonrpc's +ServiceProxy class: + +- HTTP connections persist for the life of the AuthServiceProxy object + (if server supports HTTP/1.1) +- sends protocol 'version', per JSON-RPC 2.0 +- sends proper, incrementing 'id' +- sends Basic HTTP authentication headers +- parses all JSON numbers that look like floats as Decimal +- uses standard Python json lib +""" + +import base64 +import decimal +import http.client +import json +import logging +import socket +import time +import urllib.parse +from threading import Lock + +HTTP_TIMEOUT = 30 +USER_AGENT = "AuthServiceProxy/0.1" + +log = logging.getLogger("ConfluxRPC") + +class JSONRPCException(Exception): + def __init__(self, rpc_error): + try: + errmsg = '%(message)s (%(code)i)' % rpc_error + except (KeyError, TypeError): + errmsg = '' + super().__init__(errmsg) + self.error = rpc_error + + +def EncodeDecimal(o): + if isinstance(o, decimal.Decimal): + return str(o) + raise TypeError(repr(o) + " is not JSON serializable") + +class AuthServiceProxy(): + __id_count = 0 + + # ensure_ascii: escape unicode as \uXXXX, passed to json.dumps + def __init__(self, service_url, service_name=None, lock=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True): + if lock is None: + self.lock = Lock() + else: + self.lock = lock + self.__service_url = service_url + self._service_name = service_name + self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests + self.__url = urllib.parse.urlparse(service_url) + port = 80 if self.__url.port is None else self.__url.port + user = None if self.__url.username is None else self.__url.username.encode('utf8') + passwd = None if self.__url.password is None else self.__url.password.encode('utf8') + + if connection is not None: + # Callables re-use the connection of the original proxy + self.__conn = connection + elif self.__url.scheme == 'https': + self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout) + else: + self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout) + + def __getattr__(self, name): + if name.startswith('__') and name.endswith('__'): + # Python internal stuff + raise AttributeError + if self._service_name is not None: + name = "%s.%s" % (self._service_name, name) + return AuthServiceProxy(self.__service_url, name, lock=self.lock, connection=self.__conn) + + def _request(self, method, path, postdata): + ''' + Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout). + This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5. + ''' + headers = {'Content-type': 'application/json', "Connection": "keep-alive"} + try: + self.__conn.request(method, path, postdata, headers) + return self._get_response() + except http.client.BadStatusLine as e: + if e.line == "''": # if connection was closed, try again + self.__conn.close() + self.__conn.request(method, path, postdata, headers) + return self._get_response() + else: + raise + except (BrokenPipeError, ConnectionResetError): + # Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset + # ConnectionResetError happens on FreeBSD with Python 3.4 + self.__conn.close() + self.__conn.request(method, path, postdata, headers) + return self._get_response() + + def get_request(self, *args, **argsn): + AuthServiceProxy.__id_count += 1 + + log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name, + json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + if argsn: + raise ValueError('json rpc 2 only supports array arguments') + + return {'jsonrpc': '2.0', + 'method': self._service_name, + 'params': args, + 'id': AuthServiceProxy.__id_count} + + def __call__(self, *args, **argsn): + postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) + self.lock.acquire() + try: + response = self._request('POST', self.__url.path, postdata.encode('utf-8')) + finally: + self.lock.release() + if ('error' in response) and (response['error'] is not None): + raise JSONRPCException(response['error']) + elif 'result' not in response: + raise JSONRPCException({ + 'code': -343, 'message': 'missing JSON-RPC result'}) + else: + return response['result'] + + def batch(self, rpc_call_list): + postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii) + log.debug("--> " + postdata) + return self._request('POST', self.__url.path, postdata.encode('utf-8')) + + def _get_response(self): + req_start_time = time.time() + try: + http_response = self.__conn.getresponse() + except socket.timeout: + raise JSONRPCException({ + 'code': -344, + 'message': '%r RPC took longer than %f seconds. Consider ' + 'using larger timeout for calls that take ' + 'longer to return.' % (self._service_name, + self.__conn.timeout)}) + if http_response is None: + raise JSONRPCException({ + 'code': -342, 'message': 'missing HTTP response from server'}) + + content_type = http_response.getheader('Content-Type') + if content_type != 'application/json; charset=utf-8': + raise JSONRPCException({ + 'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)}) + + responsedata = http_response.read().decode('utf8') + response = json.loads(responsedata, parse_float=decimal.Decimal) + elapsed = time.time() - req_start_time + if "error" in response and response["error"] is None: + log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii))) + else: + log.debug("<-- [%.6f] %s" % (elapsed, responsedata)) + return response + + def __truediv__(self, relative_uri): + return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn) diff --git a/integration_tests/test_framework/block_gen_thread.py b/integration_tests/test_framework/block_gen_thread.py new file mode 100644 index 0000000000..8589b483d3 --- /dev/null +++ b/integration_tests/test_framework/block_gen_thread.py @@ -0,0 +1,76 @@ +from integration_tests.conflux.rpc import RpcClient +import random +import time +import threading + +class BlockGenThread(threading.Thread): + def __init__(self, nodes, log, num_txs=1000, interval_fixed=None, interval_base=1): + threading.Thread.__init__(self, daemon=True) + self.nodes = nodes + self.clients = [] + for node in nodes: + self.clients.append(RpcClient(node)) + self.log = log + self.num_txs = num_txs + self.interval_fixed = interval_fixed + self.interval_base = interval_base + + self.local_random = random.Random() + self.local_random.seed(random.random()) + self.stopped = False + + def run(self): + while not self.stopped: + try: + if self.interval_fixed is None: + time.sleep(self.local_random.random() * self.interval_base) + else: + time.sleep(self.interval_fixed) + + r = self.local_random.randint(0, len(self.nodes) - 1) + self.log.debug("choose %d to generate block", r) + h = self.clients[r].generate_block(self.num_txs) + + self.log.debug("%s generate block %s", r, h) + except Exception as e: + self.log.info("Node[%d] fails to generate blocks", r) + self.log.info(e) + + def stop(self): + self.stopped = True + + +class PoWGenerateThread(threading.Thread): + def __init__(self, name, node, generation_period_ms, log, report_progress_blocks=None, fixed_period=False): + threading.Thread.__init__(self, daemon=True) + self.name = name + self.node = node + self.generation_period_ms = generation_period_ms + self.log = log + self.report_progress_blocks = report_progress_blocks + self.fixed_period = fixed_period + + def generate_block(self): + self.node.test_generateEmptyBlocks(1) + + def run(self): + # generate blocks + i = 0 + period_start_time = time.time() + while True: + i += 1 + if self.report_progress_blocks is not None: + if i % self.report_progress_blocks == 0: + period_elapsed = time.time() - period_start_time + self.log.info("[%s]: %d blocks generated in %f seconds", self.name, self.report_progress_blocks, period_elapsed) + period_start_time = time.time() + + if self.fixed_period: + wait_sec = self.generation_period_ms / 1000 + else: + wait_sec = random.expovariate(1000 / self.generation_period_ms) + start = time.time() + self.generate_block() + elapsed = time.time() - start + if elapsed < wait_sec: + time.sleep(wait_sec - elapsed) \ No newline at end of file diff --git a/integration_tests/test_framework/blocktools.py b/integration_tests/test_framework/blocktools.py new file mode 100644 index 0000000000..5abf788e36 --- /dev/null +++ b/integration_tests/test_framework/blocktools.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +import rlp +from eth_utils import decode_hex +from rlp.sedes import CountableList + +from integration_tests.conflux import utils, trie +from integration_tests.conflux.config import DEFAULT_PY_TEST_CHAIN_ID, default_config +from integration_tests.conflux.messages import BlockHeader, Block, Transactions, Account +from integration_tests.conflux.transactions import Transaction, UnsignedTransaction +from integration_tests.conflux.utils import * +from integration_tests.conflux.rpc import RpcClient +from trie import HexaryTrie +import time +import jsonrpcclient + +TEST_DIFFICULTY = 4 +HASH_MAX = 1 << 256 + + +def create_block(parent_hash=default_config["GENESIS_PREVHASH"], height=0, timestamp=None, difficulty=TEST_DIFFICULTY, + gas_limit=default_config["GENESIS_GAS_LIMIT"], referee_hashes=[], author=default_config["GENESIS_COINBASE"], + deferred_state_root=default_config["GENESIS_STATE_ROOT"], deferred_receipts_root=trie.EMPTY_EPOCH_RECEIPT_ROOT_BY_NUMBER_OF_BLOCKS[0], + deferred_logs_bloom_hash=default_config["GENESIS_LOGS_BLOOM_HASH"], adaptive=0, transaction_root=None, + transactions=None, pos_reference=[default_config["POS_GENESIS_BLOCK"]]): + if timestamp is None: + timestamp = int(time.time()) + if transaction_root is None: + # So far we can not compute the transaction root in python, + # therefore we don't support filling in transactions without providing transaction root. + assert transactions is None + transaction_root = trie.NULL_ROOT + transactions=[] + nonce = 0 + while True: + header = BlockHeader(parent_hash=parent_hash, height=height, difficulty=difficulty, timestamp=timestamp, + author=author, transactions_root=transaction_root, gas_limit=gas_limit, + referee_hashes=referee_hashes, nonce=nonce, deferred_state_root=deferred_state_root, + deferred_receipts_root=deferred_receipts_root, + deferred_logs_bloom_hash=deferred_logs_bloom_hash, adaptive=adaptive, + pos_reference=pos_reference) + if header.pow_decimal() * difficulty < HASH_MAX: + break + nonce += 1 + block = Block(block_header=header, transactions=transactions) + return block + + +def create_block_with_nonce( + parent_hash=default_config["GENESIS_PREVHASH"], + height=0, + timestamp=None, + difficulty=TEST_DIFFICULTY, + transactions=[], + gas_limit=default_config["GENESIS_GAS_LIMIT"], + referee_hashes=[], + author=default_config["GENESIS_COINBASE"], + deferred_state_root=default_config["GENESIS_STATE_ROOT"], + deferred_receipts_root=trie.EMPTY_EPOCH_RECEIPT_ROOT_BY_NUMBER_OF_BLOCKS[0], + deferred_logs_bloom_hash=default_config["GENESIS_LOGS_BLOOM_HASH"], + adaptive=0, + nonce=0): + if timestamp is None: + timestamp = int(time.time()) + tx_root = utils.sha3(rlp.encode(Transactions(transactions))) + header = BlockHeader( + parent_hash=parent_hash, + height=height, + difficulty=difficulty, + timestamp=timestamp, + author=author, + transactions_root=tx_root, + gas_limit=gas_limit, + referee_hashes=referee_hashes, + nonce=nonce, + deferred_state_root=deferred_state_root, + deferred_receipts_root=deferred_receipts_root, + deferred_logs_bloom_hash=deferred_logs_bloom_hash, + adaptive=adaptive) + block = Block(block_header=header, transactions=transactions) + return block + + +def create_chain_of_blocks(parent_hash, parent_height, count): + chain = [] + for i in range(count): + b = create_block(parent_hash, parent_height + 1) + chain.append(b) + parent_hash = b.hash + parent_height += 1 + return chain + + + +def create_transaction(nonce=0, gas_price=1, gas=21000, value=0, receiver=default_config['GENESIS_COINBASE'], + data=b'', pri_key=default_config["GENESIS_PRI_KEY"], storage_limit=0, epoch_height = 0, chain_id = DEFAULT_PY_TEST_CHAIN_ID, node=None): + transaction = UnsignedTransaction(nonce, gas_price, gas, receiver, value, data, storage_limit, epoch_height, chain_id) + return transaction.sign(pri_key) + + +def wait_for_initial_nonce_for_privkey(node, key, timeout=10): + key = normalize_key(key) + addr = priv_to_addr(key) + return wait_for_initial_nonce_for_address(node, addr, timeout) + + +def wait_for_initial_nonce_for_address(node, addr, timeout=10): + return 0 + addr = encode_hex_0x(addr) + if addr == encode_hex_0x(priv_to_addr(default_config["GENESIS_PRI_KEY"])).lower(): + return 0 + nonce = 0 + start = time.time() + last_exception = None + while nonce == 0: + if time.time() - start > timeout: + raise AssertionError("Wait for initial nonce for address {} timeout after {} seconds, last exception is {}" + .format(addr, timeout, last_exception)) + try: + nonce = RpcClient(node).get_nonce(addr) + except jsonrpcclient.exceptions.ReceivedErrorResponseError as e: + # It's possible that + last_exception = e + pass + return nonce + + +# Wait until that all accounts have stable start nonce. +# FIXME: 10 seconds is just an empirical value. We need confirmation for this. +def wait_for_account_stable(): + time.sleep(10) + + +def make_genesis(): +# txs = [] +# for i in range(num_txs): +# sp = decode_hex("46b9e861b63d3509c88b7817275a30d22d62c8cd8fa6486ddee35ef0d8e0495f") +# addr = privtoaddr(sp) +# tx = create_transaction(0, 10**15, 200, 10**9, addr) +# signed_tx = tx.sign(sp) +# txs.append(signed_tx) +# sp = default_config["GENESIS_PRI_KEY"] +# addr = privtoaddr(sp) +# state_trie = HexaryTrie(db={}) +# state_trie[addr] = rlp.encode(Account(balance=10**9, nonce=0, storage_root=b'\x00' * 32, code_hash=trie.BLANK_ROOT)) + genesis = create_block(difficulty=0, author=default_config["GENESIS_AUTHOR"], timestamp=0, + deferred_receipts_root=default_config["GENESIS_RECEIPTS_ROOT"], + transaction_root=default_config["GENESIS_TRANSACTION_ROOT"], pos_reference=[]) + return genesis diff --git a/integration_tests/test_framework/contracts.py b/integration_tests/test_framework/contracts.py new file mode 100644 index 0000000000..5e9620e61a --- /dev/null +++ b/integration_tests/test_framework/contracts.py @@ -0,0 +1,281 @@ +from os.path import dirname, join +from pathlib import Path +import json +from dataclasses import dataclass + +from typing import Literal, Dict +import types + +from eth_utils.abi import get_abi_output_types +from web3 import Web3 +from web3.contract.contract import ContractFunction, Contract, ContractConstructor +from integration_tests.conflux.address import b32_address_to_hex +from integration_tests.conflux.config import default_config +from integration_tests.conflux.utils import priv_to_addr +from integration_tests.test_framework.blocktools import encode_hex_0x +from integration_tests.test_framework.test_framework import ConfluxTestFramework, RpcClient, start_p2p_connection +from integration_tests.test_framework.util import * +from eth_utils.hexadecimal import decode_hex + + +BASE = int(1e18) +ZERO_ADDRESS = f"0x{'0'*40}" + +InternalContractName = Literal["AdminControl", "SponsorWhitelistControl", + "Staking", "ConfluxContext", "PoSRegister", "CrossSpaceCall", "ParamsControl"] + +INTERNAL_CONTRACT_MAP: Dict[InternalContractName, str] = { + "AdminControl": "0x0888000000000000000000000000000000000000", + "SponsorWhitelistControl": "0x0888000000000000000000000000000000000001", + "Staking": "0x0888000000000000000000000000000000000002", + "ConfluxContext": "0x0888000000000000000000000000000000000004", + "PoSRegister": "0x0888000000000000000000000000000000000005", + "CrossSpaceCall": "0x0888000000000000000000000000000000000006", + "ParamsControl": "0x0888000000000000000000000000000000000007", +} + + +def _load_contract_metadata(name: str): + path = Path(join(dirname(__file__), "..", "test_contracts", "artifacts")) + try: + found_file = next(path.rglob(f"{name}.json")) + return json.loads(open(found_file, "r").read()) + except StopIteration: + raise Exception(f"Cannot found contract {name}'s metadata") + + +def cfx_contract(name: str, framework: ConfluxTestFramework = None) -> Contract: + metadata = _load_contract_metadata(name) + w3 = Web3() + contract = w3.eth.contract( + abi=metadata["abi"], bytecode=metadata["bytecode"]) + + contract.framework = framework + _enact_contract(contract) + return contract + + +def cfx_internal_contract(name: InternalContractName, framework: ConfluxTestFramework = None) -> Contract: + contract_addr = INTERNAL_CONTRACT_MAP[name] + return cfx_contract(name, framework).at(contract_addr) + + +def _add_address(self: Contract, address: str) -> Contract: + w3 = Web3() + new_contract = w3.eth.contract( + abi=self.abi, bytecode=self.bytecode, address=Web3.to_checksum_address(address)) + + new_contract.framework = self.framework + _enact_contract(new_contract) + return new_contract + + +def _deploy_contract(self: Contract, transact_args = None, *args, **kwargs) -> Contract: + if not hasattr(self, "framework"): + raise Exception("Contract does not hold Conflux framework") + + if transact_args is None: + transact_args = {} + + receipt = _cfx_transact(self.constructor( + *args, **kwargs), framework=self.framework, **transact_args) + return _add_address(self, receipt["contractCreated"]) + + +def _deploy_create2_contract(self: Contract, seed, *args, **kwargs) -> Contract: + if not hasattr(self, "framework"): + raise Exception("Contract does not hold Conflux framework") + + if not hasattr(self.framework, "create2factory"): + raise Exception("Create2Factory is not deployed") + + deployCode = _cfx_data(self.constructor(*args, **kwargs)) + receipt = self.framework.create2factory.functions.callCreate2( + seed, deployCode).cfx_transact() + + trace = self.framework.rpc.trace_transaction(receipt["transactionHash"]) + deploy_item = [t for t in trace if t["type"] == "create_result"][0] + created_address = b32_address_to_hex(deploy_item["action"]["addr"]) + + return _add_address(self, created_address) + + +def _enact_contract(contract: Contract): + framework = contract.framework + + contract.at = types.MethodType(_add_address, contract) + contract.deploy = types.MethodType(_deploy_contract, contract) + contract.deploy2 = types.MethodType(_deploy_create2_contract, contract) + + for _, obj in contract.functions.__dict__.items(): + if isinstance(obj, ContractFunction): + obj.framework = framework + + +def _get_framework(fn: ContractFunction) -> ConfluxTestFramework: + if hasattr(fn, "framework") and isinstance(fn.framework, ConfluxTestFramework): + pass + else: + raise Exception( + f"Not bind test framework when making call for {fn.function_identifier}") + + return fn.framework + + +def _cfx_transact(self: ContractFunction, value=None, decimals: int = 18, gas=None, storage_limit=None, priv_key=None, err_msg = None, framework=None): + if framework is None: + framework = _get_framework(self) + + tx = self.build_transaction( + {"gas": 3000000, "gasPrice": 1, "chainId": 1}) + data = bytes.fromhex(tx["data"][2:]) + + if value is not None: + value = int(value * (10**decimals)) + else: + value = 0 + + if storage_limit is None: + if len(tx["to"]) == 0: + storage_limit = 30000 + else: + storage_limit = 1024 + + if gas is None: + if len(tx["to"]) == 0: + gas = 10_000_000 + else: + gas = 3_000_000 + + if len(tx["to"]) == 0: + receiver = None + else: + receiver = tx["to"] + + if gas is None: + if len(data) > 0: + gas = 3000000 + else: + gas = 21000 + + tx = framework.client.new_contract_tx( + receiver=receiver, value=value, data_hex=tx["data"], priv_key=priv_key, gas=gas, storage_limit=storage_limit) + framework.client.send_tx(tx, True) + framework.wait_for_tx([tx], err_msg is None) + receipt = framework.client.get_transaction_receipt(tx.hash_hex()) + if err_msg is not None: + assert_equal(receipt["txExecErrorMsg"], err_msg) + # self.log.info(receipt) + return receipt + + + +def _cfx_call(self: ContractFunction, framework=None, sender=None, raw_output=False): + if framework is None: + framework = _get_framework(self) + + tx = self.build_transaction( + {"gas": 3000000, "gasPrice": 1, "chainId": 1}) + result = framework.client.call(tx["to"], tx["data"], sender=sender) + + if not raw_output: + output_types = get_abi_output_types(self.abi) + ans = self.w3.codec.decode(output_types, decode_hex(result)) + if len(ans) == 0: + return + elif len(ans) == 1: + return ans[0] + else: + return ans + else: + return result + + +def _cfx_data(self: ContractFunction): + tx = self.build_transaction( + {"gas": 3000000, "gasPrice": 1, "chainId": 1}) + return tx["data"] + + +setattr(ContractFunction, 'cfx_transact', _cfx_transact) +setattr(ContractFunction, 'cfx_call', _cfx_call) +setattr(ContractFunction, 'data', _cfx_data) + +setattr(ContractConstructor, 'cfx_transact', _cfx_transact) +setattr(ContractConstructor, 'cfx_call', _cfx_call) +setattr(ContractConstructor, 'data', _cfx_data) + + +@dataclass +class Account: + address: str + key: str + +class ConfluxTestFrameworkForContract(ConfluxTestFramework): + def __init__(self): + super().__init__() + + def set_test_params(self): + self.num_nodes = 1 + self.conf_parameters["executive_trace"] = "true" + + def before_test(self): + if "executive_trace" not in self.conf_parameters or not bool(self.conf_parameters["executive_trace"]): + raise AssertionError( + "Trace should be enabled for contract toolkit") + super().before_test() + self.rpc = self.nodes[0].rpc + self.client = RpcClient(self.nodes[0]) + ip = self.nodes[0].ip + port = self.nodes[0].ethrpcport + self.w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) + start_p2p_connection(self.nodes) + self.deploy_create2() + + self.genesis_key = default_config["GENESIS_PRI_KEY"] + self.genesis_addr = Web3.to_checksum_address(encode_hex_0x(priv_to_addr(self.genesis_key))) + self.genesis_key2 = default_config["GENESIS_PRI_KEY_2"] + self.genesis_addr2 = Web3.to_checksum_address(encode_hex_0x(priv_to_addr(self.genesis_key2))) + + def cfx_contract(self, name): + return cfx_contract(name, self) + + def internal_contract(self, name: InternalContractName): + return cfx_internal_contract(name, self) + + def cfx_transfer(self, receiver, value=None, gas_price=1, priv_key=None, decimals: int = 18, nonce = None, execute: bool = True): + if value is not None: + value = int(value * (10**decimals)) + else: + value = 0 + + tx = self.client.new_tx( + receiver=receiver, gas_price=gas_price, priv_key=priv_key, value=value, nonce=nonce) + self.client.send_tx(tx, execute) + if execute: + self.wait_for_tx([tx], True) + receipt = self.client.get_transaction_receipt(tx.hash_hex()) + # self.log.info(receipt) + return receipt + else: + return tx.hash_hex() + + def initialize_accounts(self, number = 10, value = 100) -> List[Account]: + def initialize_new_account() -> (str, bytes): + (address, priv) = self.client.rand_account() + if value > 0: + self.cfx_transfer(address, value = value) + return Account(address, priv) + + return [initialize_new_account() for _ in range(number)] + + @property + def adminControl(self): + return self.internal_contract("AdminControl") + + @property + def sponsorControl(self): + return self.internal_contract("SponsorWhitelistControl") + + def deploy_create2(self): + self.create2factory: Contract = self.cfx_contract("Create2Factory").deploy() diff --git a/integration_tests/test_framework/coverage.py b/integration_tests/test_framework/coverage.py new file mode 100644 index 0000000000..7705dd3e4d --- /dev/null +++ b/integration_tests/test_framework/coverage.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +# Copyright (c) 2015-2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Utilities for doing coverage analysis on the RPC interface. + +Provides a way to track which RPC commands are exercised during +testing. +""" + +import os + + +REFERENCE_FILENAME = 'rpc_interface.txt' + + +class AuthServiceProxyWrapper(): + """ + An object that wraps AuthServiceProxy to record specific RPC calls. + + """ + def __init__(self, auth_service_proxy_instance, coverage_logfile=None): + """ + Kwargs: + auth_service_proxy_instance (AuthServiceProxy): the instance + being wrapped. + coverage_logfile (str): if specified, write each service_name + out to a file when called. + + """ + self.auth_service_proxy_instance = auth_service_proxy_instance + self.coverage_logfile = coverage_logfile + + def __getattr__(self, name): + return_val = getattr(self.auth_service_proxy_instance, name) + if not isinstance(return_val, type(self.auth_service_proxy_instance)): + # If proxy getattr returned an unwrapped value, do the same here. + return return_val + return AuthServiceProxyWrapper(return_val, self.coverage_logfile) + + def __call__(self, *args, **kwargs): + """ + Delegates to AuthServiceProxy, then writes the particular RPC method + called to a file. + + """ + return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs) + self._log_call() + return return_val + + def _log_call(self): + rpc_method = self.auth_service_proxy_instance._service_name + + if self.coverage_logfile: + with open(self.coverage_logfile, 'a+', encoding='utf8') as f: + f.write("%s\n" % rpc_method) + + def __truediv__(self, relative_uri): + return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri, + self.coverage_logfile) + + def get_request(self, *args, **kwargs): + self._log_call() + return self.auth_service_proxy_instance.get_request(*args, **kwargs) + +def get_filename(dirname, n_node): + """ + Get a filename unique to the test process ID and node. + + This file will contain a list of RPC commands covered. + """ + pid = str(os.getpid()) + return os.path.join( + dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node))) + + +def write_all_rpc_commands(dirname, node): + """ + Write out a list of all RPC functions available in `bitcoin-cli` for + coverage comparison. This will only happen once per coverage + directory. + + Args: + dirname (str): temporary test dir + node (AuthServiceProxy): client + + Returns: + bool. if the RPC interface file was written. + + """ + filename = os.path.join(dirname, REFERENCE_FILENAME) + + if os.path.isfile(filename): + return False + + help_output = node.help().split('\n') + commands = set() + + for line in help_output: + line = line.strip() + + # Ignore blanks and headers + if line and not line.startswith('='): + commands.add("%s\n" % line.split()[0]) + + with open(filename, 'w', encoding='utf8') as f: + f.writelines(list(commands)) + + return True diff --git a/integration_tests/test_framework/mininode.py b/integration_tests/test_framework/mininode.py new file mode 100644 index 0000000000..d5b4b9ad40 --- /dev/null +++ b/integration_tests/test_framework/mininode.py @@ -0,0 +1,494 @@ +#!/usr/bin/env python3 +"""Conflux P2P network half-a-node. + +`P2PConnection: A low-level connection object to a node's P2P interface +P2PInterface: A high-level interface object for communicating to a node over P2P +""" +import time +from eth_utils import decode_hex + +from integration_tests.conflux import utils +from integration_tests.conflux.config import DEFAULT_PY_TEST_CHAIN_ID +from integration_tests.conflux.messages import * +import asyncio +from collections import defaultdict +from io import BytesIO +import rlp +from rlp.sedes import big_endian_int, CountableList, boolean +import logging +import socket +import struct +import sys +import threading + +from integration_tests.conflux.transactions import Transaction +from integration_tests.conflux.utils import hash32, hash20, sha3, int_to_bytes, sha3_256, ecrecover_to_pub, ec_random_keys, ecsign, \ + bytes_to_int, encode_int32, int_to_hex, int_to_32bytearray, zpad, rzpad +from integration_tests.test_framework.blocktools import make_genesis +from integration_tests.test_framework.util import wait_until, get_ip_address + +logger = logging.getLogger("TestFramework.mininode") + + +class P2PConnection(asyncio.Protocol): + """A low-level connection object to a node's P2P interface. + + This class is responsible for: + + - opening and closing the TCP connection to the node + - reading bytes from and writing bytes to the socket + - deserializing and serializing the P2P message header + - logging messages as they are sent and received + + This class contains no logic for handling the P2P message payloads. It must be + sub-classed and the on_message() callback overridden.""" + + protocol: bytes + + def __init__(self): + self.chain_id = None + self._transport = None + + def set_chain_id(self, chain_id): + self.chain_id = chain_id + + @property + def is_connected(self): + return self._transport is not None + + def peer_connect(self, dstaddr, dstport): + assert not self.is_connected + self.dstaddr = dstaddr + self.dstport = dstport + self.recvbuf = b"" + self.had_hello = False + + logger.debug('Connecting to Conflux Node: %s:%d' % + (self.dstaddr, self.dstport)) + + loop = NetworkThread.network_event_loop + conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) + conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe) + return conn_gen + + + def peer_disconnect(self): + NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort()) + + # Connection and disconnection methods + + def connection_made(self, transport): + """asyncio callback when a connection is opened.""" + assert not self._transport + logger.debug("Connected & Listening: %s:%d" % + (self.dstaddr, self.dstport)) + self._transport = transport + self.on_open() + + def on_open(self): + pass + + def connection_lost(self, exc): + """asyncio callback when a connection is closed.""" + if exc: + logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc)) + else: + logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport)) + self._transport = None + self.recvbuf = b"" + self.on_close() + + + def on_close(self): + pass + + # Socket read methods + + def data_received(self, buf): + """asyncio callback when data is read from the socket.""" + if len(buf) > 0: + self.recvbuf += buf + self._on_data() + + def read_connection_packet(self): + if len(self.recvbuf) < 3: + return None + + packet_size = struct.unpack(" 3: + packet = packet[-3:] + packet[:-3] + + return packet + + def assemble_connection_packet(self, data): + data_len = struct.pack(" 3: + return data_len + data[3:] + data[:3] + else: + return data_len + data + + def read_session_packet(self, packet): + if packet[-2] == 0: + return (packet[-1], None, packet[:-2]) + else: + return (packet[-1], packet[-5:-2], packet[:-5]) + + def assemble_session_packet(self, packet_id, protocol, payload): + packet_id = struct.pack(" bool: + return False + + def on_hello(self, payload): + self.had_hello = True + + def on_disconnect(self, disconnect): + self.on_close() + + def on_protocol_packet(self, protocol, payload): + """Callback for processing a protocol-specific P2P payload. Must be overridden by derived class.""" + raise NotImplementedError + + def send_packet(self, packet_id, payload): + """Send a P2P message over the socket. + + This method takes a P2P payload, builds the P2P header and adds + the message to the send buffer to be sent over the socket.""" + self._log_message("send", packet_id) + buf = self.assemble_session_packet(packet_id, None, payload) + + self.send_data(buf) + + + def send_data(self, data): + if not self.is_connected: + raise IOError('Not connected, no pushbuf') + + buf = self.assemble_connection_packet(data) + + NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.write(buf)) + + def send_protocol_packet(self, payload): + """Send packet of protocols""" + buf = self.assemble_session_packet(PACKET_PROTOCOL, self.protocol, payload) + self.send_data(buf) + + def send_protocol_msg(self, msg): + """Send packet of protocols""" + payload = self.assemble_protocol_msg(msg) + self.send_protocol_packet(payload) + + # Class utility methods + + def _log_message(self, direction, msg): + """Logs a message being sent or received over the connection.""" + if direction == "send": + log_message = "Send message to " + elif direction == "receive": + log_message = "Received message from " + log_message += "%s:%d: %s" % (self.dstaddr, + self.dstport, repr(msg)[:500]) + if len(log_message) > 500: + log_message += "... (msg truncated)" + logger.debug(log_message) + + +class P2PInterface(P2PConnection): + """A high-level P2P interface class for communicating with a Conflux node. + + This class provides high-level callbacks for processing P2P message + payloads, as well as convenience methods for interacting with the + node over P2P. + + Individual testcases should subclass this and override the on_* methods + if they want to alter message handling behaviour.""" + + def __init__(self, genesis: str, remote=False): + super().__init__() + + # Track number of messages of each type received and the most recent + # message of each type + self.message_count = defaultdict(int) + self.protocol_message_count = defaultdict(int) + self.last_message = {} + self.last_protocol_message = {} + + # Default protocol version + self.protocol = b'cfx' + self.protocol_version = 3 + # Store genesis_hash + self.genesis = decode_hex(genesis) + self.best_block_hash = self.genesis + self.blocks = {self.genesis: self.genesis} + self.peer_pubkey = None + self.priv_key, self.pub_key = ec_random_keys() + x, y = self.pub_key + self.key = "0x" + utils.encode_hex(bytes(int_to_32bytearray(x))) + utils.encode_hex(bytes(int_to_32bytearray(y))) + self.had_status = False + self.on_packet_func = {} + self.remote = remote + + def peer_connect(self, *args, **kwargs): + return super().peer_connect(*args, **kwargs) + + def wait_for_status(self, timeout=60): + wait_until(lambda: self.had_status, timeout=timeout, lock=mininode_lock) + + def set_callback(self, msgid, func): + self.on_packet_func[msgid] = func + + def reset_callback(self, msgid): + del self.on_packet_func[msgid] + + # Message receiving methods + + def send_status(self): + status = Status( + ChainIdParams(self.chain_id), + self.genesis, 0, 0, [self.best_block_hash]) + self.send_protocol_msg(status) + + def on_protocol_packet(self, protocol, payload): + """Receive message and dispatch message to appropriate callback. + + We keep a count of how many of each message type has been received + and the most recent message of each type.""" + with mininode_lock: + try: + assert(protocol == self.protocol) # Possible to be false? + packet_type, payload = self.read_protocol_msg(payload) + self.protocol_message_count[packet_type] += 1 + msg = None + msg_class = get_msg_class(packet_type) + logger.debug("%s %s", packet_type, rlp.decode(payload)) + if msg_class is not None: + msg = rlp.decode(payload, msg_class) + if packet_type == STATUS_V3: + self._log_message("receive", "STATUS, terminal_hashes:{}" + .format([utils.encode_hex(i) for i in msg.terminal_block_hashes])) + self.had_status = True + elif packet_type == GET_BLOCK_HEADERS: + self._log_message("receive", "GET_BLOCK_HEADERS of {}".format(msg.hashes)) + elif packet_type == GET_BLOCK_HEADER_CHAIN: + self._log_message("receive", "GET_BLOCK_HEADER_CHAIN of {} {}".format(msg.hash, msg.max_blocks)) + elif packet_type == GET_BLOCK_BODIES: + hashes = msg.hashes + self._log_message("receive", "GET_BLOCK_BODIES of {} blocks".format(len(hashes))) + elif packet_type == GET_BLOCK_HEADERS_RESPONSE: + self._log_message("receive", "BLOCK_HEADERS of {} headers".format(len(msg.headers))) + elif packet_type == GET_BLOCK_BODIES_RESPONSE: + self._log_message("receive", "BLOCK_BODIES of {} blocks".format(len(msg))) + elif packet_type == NEW_BLOCK: + self._log_message("receive", "NEW_BLOCK, hash:{}".format(msg.block.block_header.hash)) + elif packet_type == GET_BLOCK_HASHES: + self._log_message("receive", "GET_BLOCK_HASHES, hash:{}, max_blocks:{}" + .format(msg.hash, msg.max_blocks)) + elif packet_type == GET_BLOCK_HASHES_RESPONSE: + self._log_message("receive", "BLOCK_HASHES, {} hashes".format(len(msg.hashes))) + elif packet_type == GET_TERMINAL_BLOCK_HASHES: + self._log_message("receive", "GET_TERMINAL_BLOCK_HASHES") + elif packet_type == TRANSACTIONS: + self._log_message("receive", "TRANSACTIONS, {} transactions".format(len(msg.transactions))) + elif packet_type == GET_TERMINAL_BLOCK_HASHES_RESPONSE: + self._log_message("receive", "TERMINAL_BLOCK_HASHES, {} hashes".format(len(msg.hashes))) + elif packet_type == NEW_BLOCK_HASHES: + self._log_message("receive", "NEW_BLOCK_HASHES, {} hashes".format(len(msg.block_hashes))) + elif packet_type == GET_BLOCKS_RESPONSE: + self._log_message("receive", "BLOCKS, {} blocks".format(len(msg.blocks))) + elif packet_type == GET_CMPCT_BLOCKS_RESPONSE: + self._log_message("receive", "GET_CMPCT_BLOCKS_RESPONSE, {} blocks".format(len(msg.blocks))) + elif packet_type == GET_BLOCK_TXN_RESPONSE: + self._log_message("receive", "GET_BLOCK_TXN_RESPONSE, block:{}".format(len(msg.block_hash))) + elif packet_type == GET_BLOCKS: + self._log_message("receive", "GET_BLOCKS, {} hashes".format(len(msg.hashes))) + self.on_get_blocks(msg) + elif packet_type == GET_CMPCT_BLOCKS: + self._log_message("receive", "GET_CMPCT_BLOCKS, {} hashes".format(len(msg.hashes))) + self.on_get_compact_blocks(msg) + elif packet_type == GET_BLOCK_TXN: + self._log_message("receive", "GET_BLOCK_TXN, hash={}".format(len(msg.block_hash))) + self.on_get_blocktxn(msg) + elif packet_type == GET_BLOCK_HASHES_BY_EPOCH: + self._log_message("receive", "GET_BLOCK_HASHES_BY_EPOCH, epochs: {}".format(msg.epochs)) + self.on_get_block_hashes_by_epoch(msg) + else: + self._log_message("receive", "Unknown packet {}".format(packet_type)) + return + if packet_type in self.on_packet_func and msg is not None: + self.on_packet_func[packet_type](self, msg) + except: + raise + + def on_hello(self, payload): + hello = rlp.decode(payload, Hello) + + capabilities = [] + for c in hello.capabilities: + capabilities.append((c.protocol, c.version)) + self._log_message( + "receive", "Hello, capabilities:{}".format(capabilities)) + ip = [127, 0, 0, 1] + if self.remote: + ip = get_ip_address() + endpoint = NodeEndpoint(address=bytes(ip), tcp_port=32325, udp_port=32325) + # FIXME: Use a valid pos_public_key. + hello = Hello(DEFAULT_PY_TEST_CHAIN_ID, [Capability(self.protocol, self.protocol_version)], endpoint, + decode_hex('ac4a9103a323cf3a0d64712de2cbacf6df5d4c2cad7458aa612696f60a6de0a0958da59c7736b71cf24139b1be94be1503efefa083263438fd07edd1e03246683ff58da8bdde286c321032765258d0c34f')) + + self.send_packet(PACKET_HELLO, rlp.encode(hello, Hello)) + self.had_hello = True + self.send_status() + + # Callback methods. Can be overridden by subclasses in individual test + # cases to provide custom message handling behaviour. + + def on_open(self): + self.handshake = Handshake(self) + self.handshake.write_auth() + + def on_close(self): pass + + def on_handshake(self, payload) -> bool: + if self.handshake.state == "ReadingAck": + self.handshake.read_ack(payload) + return True + + assert self.handshake.state == "StartSession" + + return False + + def on_get_blocks(self, msg): + resp = Blocks(reqid=msg.reqid, blocks=[]) + self.send_protocol_msg(resp) + + def on_get_compact_blocks(self, msg): + resp = GetCompactBlocksResponse(reqid=msg.reqid, compact_blocks=[], blocks=[]) + self.send_protocol_msg(resp) + + def on_get_blocktxn(self, msg): + resp = GetBlockTxnResponse(reqid=msg.reqid, block_hash=b'\x00'*32, block_txn=[]) + self.send_protocol_msg(resp) + + def on_get_block_hashes_by_epoch(self, msg): + resp = BlockHashes(reqid=msg.reqid, hashes=[]) + self.send_protocol_msg(resp) + + +# One lock for synchronizing all data access between the networking thread (see +# NetworkThread below) and the thread running the test logic. For simplicity, +# P2PConnection acquires this lock whenever delivering a message to a P2PInterface, +# and whenever adding anything to the send buffer (in send_message()). This +# lock should be acquired in the thread running the test logic to synchronize +# access to any data shared with the P2PInterface or P2PConnection. +mininode_lock = threading.RLock() + +class DefaultNode(P2PInterface): + def __init__(self, genesis: str, remote = False): + super().__init__(genesis, remote) + +class NetworkThread(threading.Thread): + network_event_loop: asyncio.AbstractEventLoop = None # type: ignore + + def __init__(self): + super().__init__(name="NetworkThread") + # There is only one event loop and no more than one thread must be created + assert not self.network_event_loop + + NetworkThread.network_event_loop = asyncio.new_event_loop() + + def run(self): + """Start the network thread.""" + self.network_event_loop.run_forever() + + def close(self, timeout=10): + """Close the connections and network event loop.""" + self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) + wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout) + self.network_event_loop.close() + self.join(timeout) + NetworkThread.network_event_loop = None # type: ignore + + +def start_p2p_connection(nodes: list, remote=False): + if len(nodes) == 0: + return + p2p_connections = [] + # TODO(lpl): Figure out why pos slows down node starting. + time.sleep(1) + genesis = nodes[0].cfx_getBlockByEpochNumber("0x0", False)["hash"] + + for node in nodes: + conn = DefaultNode(genesis, remote) + p2p_connections.append(conn) + node.add_p2p_connection(conn) + + for p2p in p2p_connections: + p2p.wait_for_status() + + return p2p_connections + +class Handshake: + def __init__(self, peer: P2PInterface): + self.peer = peer + self.state = "New" + + def write_auth(self): + node_id = utils.decode_hex(self.peer.key) + self.peer.send_data(node_id) + self.state = "ReadingAck" + + def read_ack(self, remote_node_id: bytes): + assert len(remote_node_id) == 64, "invalid node id length {}".format(len(remote_node_id)) + self.peer.peer_key = utils.encode_hex(remote_node_id) + self.state = "StartSession" diff --git a/integration_tests/test_framework/simple_rpc_proxy.py b/integration_tests/test_framework/simple_rpc_proxy.py new file mode 100644 index 0000000000..3fbe37cb06 --- /dev/null +++ b/integration_tests/test_framework/simple_rpc_proxy.py @@ -0,0 +1,67 @@ +import time +from typing import Any + +import jsonrpcclient.client +from jsonrpcclient.requests import Request +from jsonrpcclient.exceptions import ReceivedErrorResponseError + +jsonrpcclient.client.request_log.propagate = False +jsonrpcclient.client.response_log.propagate = False + +class SimpleRpcProxy: + def __init__(self, url, timeout, node): + self.url = url + self.timeout = timeout + self.node = node + from jsonrpcclient.clients.http_client import HTTPClient + self.client = HTTPClient(url) + + def __getattr__(self, name): + return RpcCaller(self.client, name, self.timeout, self.node) + + +class RpcCaller: + def __init__(self, client, method, timeout, node): + self.client = client + self.method = method + self.timeout = timeout + self.node = node + + def __call__(self, *args, **argsn) -> Any: + if argsn: + raise ValueError('json rpc 2 only supports array arguments') + + request = Request(self.method, *args) + try: + response = self.client.send(request, timeout=self.timeout) + return response.data.result + except Exception as e: + node = self.node + if node is not None and node.auto_recovery: + # wait to ensure that the process has completely exited + retry = 10 + return_code = None + while return_code is None and retry > 0: + return_code = node.process.poll() + time.sleep(0.5) + retry -= 1 + # TODO Parameterize return_code + # -11 means segfault, which may be triggered if rocksdb is not properly dropped. + # 100 is our random db crash exit code. + if return_code in [-11, 100]: + print(node.index, "recover from exit code", return_code, "during calling", + self.method, "exception is", e) + # TODO Handle extra_args + node.start(stdout=node.stdout, stderr=node.stderr) + node.wait_for_rpc_connection() + node.wait_for_nodeid() + node.wait_for_recovery("NormalSyncPhase", node.recovery_timeout) + response = self.client.send(request, timeout=self.timeout) + return response.data.result + else: + print(node.index, "exit with code", return_code, "during calling", self.method, "exception is", e) + raise e + else: + if isinstance(e, ReceivedErrorResponseError): + print(f"rpc exception method {self.method} code {e.response.code}, message: {e.response.message}, data: {e.response.data}") + raise e diff --git a/integration_tests/test_framework/smart_contract_bench_base.py b/integration_tests/test_framework/smart_contract_bench_base.py new file mode 100644 index 0000000000..e1069fd639 --- /dev/null +++ b/integration_tests/test_framework/smart_contract_bench_base.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +from integration_tests.test_framework.blocktools import create_transaction, wait_for_initial_nonce_for_address +from eth_utils.hexadecimal import decode_hex +from integration_tests.test_framework.block_gen_thread import BlockGenThread +from integration_tests.test_framework.util import * +from integration_tests.test_framework.mininode import * +from integration_tests.test_framework.test_framework import ConfluxTestFramework +from integration_tests.conflux.transactions import CONTRACT_DEFAULT_GAS +from integration_tests.conflux.utils import ec_random_keys, priv_to_addr, encode_hex_0x + + +class SmartContractBenchBase(ConfluxTestFramework): + REQUEST_BASE = { + 'gas': CONTRACT_DEFAULT_GAS, + 'gasPrice': 1, + 'chainId': 1, + } + + def add_options(self, parser): + parser.add_argument( + "--iter", + dest="iter", + default=1, + type=int, + help= + "The number of iterations the benchmark will be executed." + ) + + def set_test_params(self): + self.num_nodes = 1 + + def setup_network(self): + self.setup_nodes() + sync_blocks(self.nodes) + + def setup_contract(self): + pass + + def generate_transactions(self, i): + pass + + def run_test(self): + start_p2p_connection(self.nodes) + block_gen_thread = BlockGenThread(self.nodes, self.log) + block_gen_thread.start() + + self.setup_contract() + for i in range(self.options.iter): + self.generate_transactions(i) + + def __init__(self): + super().__init__() + self.nonce_map = {} + self.default_account_key = default_config["GENESIS_PRI_KEY"] + self.default_account_address = priv_to_addr(self.default_account_key) + + def get_nonce(self, sender): + if sender not in self.nonce_map: + self.nonce_map[sender] = wait_for_initial_nonce_for_address(self.nodes[0], sender) + else: + self.nonce_map[sender] += 1 + return self.nonce_map[sender] + + def call_contract_function(self, contract, name, args, sender_key, contract_addr=None, wait=False, + check_status=False, storage_limit=0): + # If contract address is empty, call the constructor. + if contract_addr: + func = getattr(contract.functions, name) + else: + func = getattr(contract, name) + attributes = { + 'nonce': self.get_nonce(priv_to_addr(sender_key)), + ** SmartContractBenchBase.REQUEST_BASE + } + if contract_addr: + attributes['receiver'] = decode_hex(contract_addr) + attributes['to'] = contract_addr + else: + attributes['receiver'] = b'' + tx_data = func(*args).build_transaction(attributes) + tx_data['data'] = decode_hex(tx_data['data']) + tx_data['pri_key'] = sender_key + tx_data['gas_price'] = tx_data['gasPrice'] + tx_data['storage_limit'] = storage_limit + tx_data.pop('gasPrice', None) + tx_data.pop('chainId', None) + tx_data.pop('to', None) + transaction = create_transaction(**tx_data) + self._send_transaction(transaction, wait, check_status) + return transaction + + def new_address_and_transfer(self, count=1, amount=int(1e22), wait=False, check_status=False): + results = [] + for _ in range(count): + pri_key, pub_key = ec_random_keys() + transaction = self.transfer(self.default_account_key, priv_to_addr(pri_key), amount, wait, check_status) + results.append([pri_key, transaction]) + return results + + def transfer(self, sender_key, receiver, amount, wait=False, check_status=False): + nonce = self.get_nonce(priv_to_addr(sender_key)) + transaction = create_transaction(nonce, 1, 21000, amount, receiver, pri_key=sender_key) + self._send_transaction(transaction, wait, check_status) + return transaction + + def _send_transaction(self, transaction, wait, check_status): + self.nodes[0].p2p.send_protocol_msg(Transactions(transactions=[transaction])) + if wait: + self.wait_for_tx([transaction], check_status) diff --git a/integration_tests/test_framework/test_framework.py b/integration_tests/test_framework/test_framework.py new file mode 100644 index 0000000000..5ae9dd306a --- /dev/null +++ b/integration_tests/test_framework/test_framework.py @@ -0,0 +1,733 @@ +#!/usr/bin/env python3 +# Copyright (c) 2014-2018 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Base class for RPC testing.""" +import pytest +from typing import List, Literal, Union, Any, cast, Type +from integration_tests.conflux.config import DEFAULT_PY_TEST_CHAIN_ID +from integration_tests.conflux.messages import Transactions +from integration_tests.conflux.rpc import RpcClient, default_config +from enum import Enum +from http.client import CannotSendRequest +import logging +import argparse +import os +import pdb +import shutil +import sys +import tempfile +import time +import random +from functools import cached_property +from conflux_web3 import Web3 as CWeb3 +from conflux_web3.middleware.base import ConfluxWeb3Middleware +from conflux_web3._utils.rpc_abi import ( + RPC +) +from conflux_web3.contract import ConfluxContract +from web3 import Web3 +from web3.middleware import Web3Middleware +from web3.middleware.signing import SignAndSendRawMiddlewareBuilder +from web3.types import RPCEndpoint +from cfx_account import Account as CoreAccount +from eth_account import Account + +from .authproxy import JSONRPCException +from . import coverage +from .mininode import start_p2p_connection, NetworkThread +from .test_node import TestNode +from .util import ( + CONFLUX_RPC_WAIT_TIMEOUT, + MAX_NODES, + PortMin, + assert_equal, + check_json_precision, + checktx, + connect_nodes, + connect_sample_nodes, + disconnect_nodes, + get_datadir_path, + initialize_datadir, + initialize_tg_config, + p2p_port, + set_node_times, + sync_blocks, + sync_mempools, + wait_until, + assert_tx_exec_error, + load_contract_metadata, + InternalContractName, +) +from .block_gen_thread import BlockGenThread + +class TestStatus(Enum): + PASSED = 1 + FAILED = 2 + SKIPPED = 3 + + +TEST_EXIT_PASSED = 0 +TEST_EXIT_FAILED = 1 +TEST_EXIT_SKIPPED = 77 + +Web3NotSetupError = ValueError("w3 is not initialized, please call self.setup_w3() first") + + +class ConfluxTestFramework: + """Base class for a bitcoin test script. + + Individual bitcoin test scripts should subclass this class and override the set_test_params() and run_test() methods. + + Individual tests can also override the following methods to customize the test setup: + + - add_options() + - setup_chain() + - setup_network() + - setup_nodes() + + The __init__() and main() methods should not be overridden. + + This class also contains various public and private helper methods.""" + + _cw3: Union[CWeb3, None] = None + _ew3: Union[Web3, None] = None + num_nodes: int + rpc: RpcClient + + def _get_parser(self) -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(usage="%(prog)s [options]") + parser.add_argument( + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error") + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution") + parser.add_argument( + "--cachedir", + dest="cachedir", + default=os.path.abspath( + os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), + help= + "Directory for caching pregenerated datadirs (default: %(default)s)" + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", help="Root directory for datadirs") + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="INFO", + help= + "log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory." + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made") + parser.add_argument( + "--portseed", + dest="port_seed", + default=os.getpid(), + type=int, + help= + "The seed to use for assigning port numbers (default: current process id)" + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + help="Write tested RPC commands into this directory") + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails") + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands") + parser.add_argument( + "--randomseed", + dest="random_seed", + type=int, + help="Set a random seed") + parser.add_argument( + "--metrics-report-interval-ms", + dest="metrics_report_interval_ms", + default=0, + type=int) + + parser.add_argument( + "--conflux-binary", + dest="conflux", + default=os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "../../target/release/conflux"), + type=str) + return parser + + def __init__(self, port_min: int, additional_secrets: int=0): + """Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method""" + arg_parser = self._get_parser() + self.core_secrets: list[str] = [default_config["GENESIS_PRI_KEY"].hex()] # type: ignore + self.evm_secrets: list[str] = [default_config["GENESIS_PRI_KEY_2"].hex()] # type: ignore + self._add_genesis_secrets(additional_secrets) + self.port_min = port_min + self.setup_clean_chain = True + self.nodes: list[TestNode] = [] + self.mocktime = 0 + self.rpc_timewait = CONFLUX_RPC_WAIT_TIMEOUT + self.supports_cli = False + self.bind_to_localhost_only = True + self.conf_parameters = {} + self.pos_parameters = {"round_time_ms": 1000} + # The key is file name, and the value is a string as file content. + self.extra_conf_files = {} + self.set_test_params() + self.predicates = {} + self.snapshot = {} + + assert hasattr( + self, + "num_nodes"), "Test must set self.num_nodes in set_test_params()" + + self.add_options(arg_parser) + self.options, _ = arg_parser.parse_known_args() + + PortMin.n = port_min # This line sets the port range for the test nodes + + check_json_precision() + + self.options.cachedir = os.path.abspath(self.options.cachedir) + + # Set up temp directory and start logging + if self.options.tmpdir: + self.options.tmpdir = os.path.abspath(self.options.tmpdir) + os.makedirs(self.options.tmpdir, exist_ok=True) + else: + self.options.tmpdir = os.getenv( + "CONFLUX_TESTS_LOG_DIR", + default=tempfile.mkdtemp(prefix="conflux_test_")) + + self._start_logging() + + self.log.debug('Setting up network thread') + self.network_thread = NetworkThread() + self.network_thread.start() + + if self.options.random_seed is not None: + random.seed(self.options.random_seed) + + self.after_options_parsed() + + if self.options.usecli and not self.supports_cli: + raise SkipTest( + "--usecli specified but test does not support using CLI") + self.setup_chain() + self.setup_network() + self.before_test() + + def teardown(self, request: pytest.FixtureRequest): + success = TestStatus.PASSED + if request.session.testsfailed > 0: + success = TestStatus.FAILED + self.log.exception(f"{request.session.testsfailed} tests failed") + + self.log.debug('Closing down network thread') + self.network_thread.close() + + if not self.options.noshutdown: + self.log.info("Stopping nodes") + if self.nodes: + self.stop_nodes() + else: + for node in self.nodes: + node.cleanup_on_exit = False + self.log.info( + "Note: bitcoinds were not stopped and may still be running") + + if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED: + self.log.info("Cleaning up {} on exit".format(self.options.tmpdir)) + cleanup_tree_on_exit = True + else: + self.log.warning("Not cleaning up dir %s" % self.options.tmpdir) + cleanup_tree_on_exit = False + + if success == TestStatus.PASSED: + self.log.info("Tests successful") + exit_code = TEST_EXIT_PASSED + else: + self.log.error( + "Test failed. Test logging available at %s/test_framework.log", + self.options.tmpdir) + self.log.error("Hint: Call {} '{}' to consolidate all logs".format( + os.path.normpath( + os.path.dirname(os.path.realpath(__file__)) + + "/../combine_logs.py"), self.options.tmpdir)) + handlers = self.log.handlers[:] + for handler in handlers: + self.log.removeHandler(handler) + handler.close() + logging.shutdown() + if cleanup_tree_on_exit: + shutil.rmtree(self.options.tmpdir) + + + def _add_genesis_secrets( + self, + additional_secrets: int, + space: Union[List[Literal["evm", "core"]], Literal["evm", "core"]]=["evm", "core"] + ): + """ + Add random secrets to `self.core_secrets` and `self.evm_secrets`. + When node starts, `self.core_secrets` and `self.evm_secrets` will be used + to generate genesis account for both EVM and Core + each with 10000 CFX (10^21 drip). + + The generated accounts can be used from `self.core_accounts` or `self.evm_accounts`. + """ + for _ in range(additional_secrets): + if "evm" in space or "evm" == space: + self.evm_secrets.append(Account.create().key.hex()) + if "core" in space or "core" == space: + self.core_secrets.append(Account.create().key.hex()) + + @cached_property + def client(self) -> RpcClient: + """Get the RPC client, using the first node. + The RPC client is + + Returns: + RpcClient: used to send RPC requests to the node. + For example, self.client.cfx_getBalance(...) or self.client.eth_getBalance(...) + It should be noticed that the parameters are usually not formatted. + Certain methods also provide formatted parameters, for example, self.client.epoch_number(). + Please check the source code for more details. + """ + return RpcClient(self.nodes[0]) + + @property + def cw3(self) -> CWeb3: + """Get the Conflux Web3 instance, initialized by self.setup_w3(). + + Raises: + Web3NotSetupError: If the Web3 instance is not initialized. + + Returns: + CWeb3: The Conflux Web3 instance. + """ + if self._cw3 is None: + raise Web3NotSetupError + return self._cw3 + + @property + def ew3(self) -> Web3: + """Get the EVM Web3 instance, initialized by self.setup_w3(). + + Raises: + Web3NotSetupError: If the Web3 instance is not initialized. + + Returns: + Web3: The EVM Web3 instance. + """ + if self._ew3 is None: + raise Web3NotSetupError + return self._ew3 + + @property + def cfx(self): + if self._cw3 is None: + raise Web3NotSetupError + return self._cw3.cfx + + @property + def eth(self): + if self._ew3 is None: + raise Web3NotSetupError + return self._ew3.eth + + @property + def core_accounts(self): + """ + Get the core space genesis accounts. + Amount can be added by `self._add_genesis_secrets(additional_secrets_count)`. + """ + return [CoreAccount.from_key(key, network_id=DEFAULT_PY_TEST_CHAIN_ID) for key in self.core_secrets] + + @property + def evm_accounts(self): + """ + Get the eSpace genesis accounts. + Amount can be added by `self._add_genesis_secrets(additional_secrets_count)`. + """ + return [Account.from_key(key) for key in self.evm_secrets] + + # Methods to override in subclass test scripts. + def set_test_params(self): + """Tests must this method to change default values for number of nodes, topology, etc""" + raise NotImplementedError + + def add_options(self, parser): + """Override this method to add command-line options to the test""" + pass + + def after_options_parsed(self): + if self.options.metrics_report_interval_ms > 0: + self.conf_parameters["metrics_enabled"] = "true" + self.conf_parameters["metrics_report_interval_ms"] = str(self.options.metrics_report_interval_ms) + + def setup_chain(self): + """Override this method to customize blockchain setup""" + self.log.info("Initializing test directory " + self.options.tmpdir) + self._initialize_chain_clean() + + def setup_network(self): + """Override this method to customize test network topology""" + self.setup_nodes() + + # Connect the nodes as a "chain". This allows us + # to split the network between nodes 1 and 2 to get + # two halves that can work on competing chains. + for i in range(self.num_nodes - 1): + connect_nodes(self.nodes, i, i + 1) + sync_blocks(self.nodes) + + def setup_nodes(self, genesis_nodes=None, binary=None, is_consortium=True): + """Override this method to customize test node setup""" + self.add_nodes(self.num_nodes, genesis_nodes=genesis_nodes, binary=binary, is_consortium=is_consortium) + self.start_nodes() + + def setup_w3(self): + """Setup w3 and ew3 for EVM and Conflux. + This method should be called before any test. + Use self.cw3 and self.ew3 to access the web3 instances. + Use self.cw3.cfx and self.ew3.eth to access the Conflux and EVM RPC clients. + """ + client = RpcClient(self.nodes[0]) + log = self.log + self._cw3 = CWeb3(CWeb3.HTTPProvider(f'http://{self.nodes[0].ip}:{self.nodes[0].rpcport}/')) + self._ew3 = Web3(Web3.HTTPProvider(f'http://{self.nodes[0].ip}:{self.nodes[0].ethrpcportv2}/')) + self.cw3.wallet.add_accounts(self.core_accounts) + self.cw3.cfx.default_account = self.core_accounts[0].address + + self.ew3.middleware_onion.add(SignAndSendRawMiddlewareBuilder.build(self.evm_secrets)) # type: ignore + self.eth.default_account = self.evm_accounts[0].address + + class TestNodeMiddleware(ConfluxWeb3Middleware): + def request_processor(self, method: RPCEndpoint, params: Any) -> Any: + if method == RPC.cfx_sendRawTransaction or method == RPC.cfx_sendTransaction or method == "eth_sendRawTransaction" or method == "eth_sendTransaction": + client.node.wait_for_phase(["NormalSyncPhase"]) + + if method == RPC.cfx_maxPriorityFeePerGas or method == "eth_maxPriorityFeePerGas": + if client.epoch_number() == 0: + # enable cfx_maxPriorityFeePerGas + # or Error(Epoch number larger than the current pivot chain tip) would be raised + client.generate_blocks_to_state(num_txs=1) + return super().request_processor(method, params) + + def response_processor(self, method: RPCEndpoint, response: Any): + if method == RPC.cfx_getTransactionReceipt or method == "eth_getTransactionReceipt": + if "result" in response and response["result"] is None: + log.debug("Auto generate 5 blocks because did not get tx receipt") + client.generate_blocks_to_state(num_txs=1) # why num_txs=1? + return response + + self.cw3.middleware_onion.add(TestNodeMiddleware) + self.ew3.middleware_onion.add(TestNodeMiddleware) + + def add_nodes(self, num_nodes, genesis_nodes=None, rpchost=None, binary=None, auto_recovery=False, + recovery_timeout=30, is_consortium=True): + """Instantiate TestNode objects""" + if binary is None: + binary = [self.options.conflux] * num_nodes + assert_equal(len(binary), num_nodes) + if genesis_nodes is None: + genesis_nodes = num_nodes + if is_consortium: + initialize_tg_config(self.options.tmpdir, num_nodes, genesis_nodes, DEFAULT_PY_TEST_CHAIN_ID, + start_index=len(self.nodes), pos_round_time_ms=self.pos_parameters["round_time_ms"]) + for i in range(num_nodes): + node_index = len(self.nodes) + self.nodes.append( + TestNode( + node_index, + get_datadir_path(self.options.tmpdir, node_index), + rpchost=rpchost, + rpc_timeout=self.rpc_timewait, + confluxd=binary[i], + auto_recovery=auto_recovery, + recovery_timeout=recovery_timeout + )) + + def add_remote_nodes(self, num_nodes, ip, user, rpchost=None, binary=None, no_pssh=True): + """Instantiate TestNode objects""" + if binary is None: + binary = [self.options.conflux] * num_nodes + assert_equal(len(binary), num_nodes) + for i in range(num_nodes): + self.nodes.append( + TestNode( + i, + get_datadir_path(self.options.tmpdir, i), + rpchost=rpchost, + ip=ip, + user=user, + rpc_timeout=self.rpc_timewait, + confluxd=binary[i], + remote=True, + no_pssh=no_pssh, + )) + + def start_node(self, i, extra_args=None, phase_to_wait=["NormalSyncPhase"], wait_time=30, *args, **kwargs): + """Start a bitcoind""" + + node = self.nodes[i] + + node.start(extra_args, *args, **kwargs) + node.wait_for_rpc_connection() + node.wait_for_nodeid() + # try: + # node.test_posStart() + # except Exception as e: + # print(e) + if phase_to_wait is not None: + node.wait_for_recovery(phase_to_wait, wait_time) + + if self.options.coveragedir is not None: + coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc) + + def start_nodes(self, extra_args=None, *args, **kwargs): + """Start multiple bitcoinds""" + + try: + for i, node in enumerate(self.nodes): + node.start(extra_args, *args, **kwargs) + for node in self.nodes: + node.wait_for_rpc_connection() + node.wait_for_nodeid() + node.wait_for_recovery(["NormalSyncPhase"], 10) + except: + # If one node failed to start, stop the others + self.stop_nodes() + raise + + if self.options.coveragedir is not None: + for node in self.nodes: + coverage.write_all_rpc_commands(self.options.coveragedir, + node.rpc) + + def stop_node(self, i, expected_stderr='', kill=False, wait=True, clean=False): + """Stop a bitcoind test node""" + self.nodes[i].stop_node(expected_stderr, kill, wait) + if clean: + self.nodes[i].clean_data() + + def stop_nodes(self): + """Stop multiple bitcoind test nodes""" + for node in self.nodes: + # Issue RPC to stop nodes + node.stop_node() + + def wait_for_node_exit(self, i, timeout): + self.nodes[i].process.wait(timeout) + + def maybe_restart_node(self, i, stop_probability, clean_probability, wait_time=300): + if random.random() <= stop_probability: + self.log.info("stop %s", i) + clean_data = True if random.random() <= clean_probability else False + self.stop_node(i, clean=clean_data) + self.start_node(i, wait_time=wait_time, phase_to_wait=("NormalSyncPhase")) + + # Private helper methods. These should not be accessed by the subclass test scripts. + + def _start_logging(self): + # Add logger and logging handlers + self.log = logging.getLogger('TestFramework') + self.log.setLevel(logging.DEBUG) + # Create file handler to log all messages + fh = logging.FileHandler( + self.options.tmpdir + '/test_framework.log', encoding='utf-8') + fh.setLevel(logging.DEBUG) + # Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel. + ch = logging.StreamHandler(sys.stdout) + # User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int + ll = int(self.options.loglevel) if self.options.loglevel.isdigit( + ) else self.options.loglevel.upper() + ch.setLevel(ll) + # Format logs the same as bitcoind's debug.log with microprecision (so log files can be concatenated and sorted) + formatter = logging.Formatter( + fmt= + '%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', + datefmt='%Y-%m-%dT%H:%M:%S') + formatter.converter = time.gmtime + fh.setFormatter(formatter) + ch.setFormatter(formatter) + # add the handlers to the logger + self.log.addHandler(fh) + self.log.addHandler(ch) + + if self.options.trace_rpc: + rpc_logger = logging.getLogger("ConfluxRPC") + rpc_logger.setLevel(logging.DEBUG) + rpc_handler = logging.StreamHandler(sys.stdout) + rpc_handler.setLevel(logging.DEBUG) + rpc_logger.addHandler(rpc_handler) + + def _initialize_chain_clean(self): + """Initialize empty blockchain for use by the test. + + Create an empty blockchain and num_nodes wallets. + Useful if a test case wants complete control over initialization.""" + + for i in range(self.num_nodes): + initialize_datadir(self.options.tmpdir, i, self.port_min, self.conf_parameters, + self.extra_conf_files, self.core_secrets, self.evm_secrets) + + def before_test(self): + self.setup_w3() + + # wait for core space tx + def wait_for_tx(self, all_txs, check_status=False): + for tx in all_txs: + self.log.debug("Wait for tx to confirm %s", tx.hash_hex()) + for i in range(3): + try: + retry = True + while retry: + try: + wait_until(lambda: checktx(self.nodes[0], tx.hash_hex()), timeout=20) + retry = False + except CannotSendRequest: + time.sleep(0.01) + break + except AssertionError as _: + self.nodes[0].p2p.send_protocol_msg(Transactions(transactions=[tx])) + if i == 2: + raise AssertionError("Tx {} not confirmed after 30 seconds".format(tx.hash_hex())) + # After having optimistic execution, get_receipts may get receipts with not deferred block, these extra blocks + # ensure that later get_balance can get correct executed balance for all transactions + client = RpcClient(self.nodes[0]) + for _ in range(5): + client.generate_block() + receipts = [client.get_transaction_receipt(tx.hash_hex()) for tx in all_txs] + self.log.debug("Receipts received: {}".format(receipts)) + if check_status: + for i in receipts: + if int(i["outcomeStatus"], 0) != 0: + raise AssertionError("Receipt states the execution failes: {}".format(i)) + return receipts + + def start_block_gen(self): + BlockGenThread(self.nodes, self.log).start() + + def cfx_contract(self, name) -> Type[ConfluxContract]: + metadata = load_contract_metadata(name) + return self.cfx.contract( + abi=metadata["abi"], bytecode=metadata["bytecode"]) + + def internal_contract(self, name: InternalContractName): + return self.cfx.contract(name=name, with_deployment_info=True) + + def deploy_contract(self, name, transact_args = {}) -> ConfluxContract: + tx_hash = self.cfx_contract(name).constructor().transact(transact_args) + receipt = tx_hash.executed(timeout=30) + return self.cfx_contract(name)(cast(str, receipt["contractCreated"])) + +class SkipTest(Exception): + """This exception is raised to skip a test""" + + def __init__(self, message): + self.message = message + + +class DefaultConfluxTestFramework(ConfluxTestFramework): + def set_test_params(self): + self.num_nodes = 8 + + def setup_network(self): + self.log.info("setup nodes ...") + self.setup_nodes() + self.log.info("connect peers ...") + connect_sample_nodes(self.nodes, self.log) + self.log.info("sync up with blocks among nodes ...") + sync_blocks(self.nodes) + self.log.info("start P2P connection ...") + start_p2p_connection(self.nodes) + + +class OptionHelper: + def to_argument_str(arg_name): + return "--" + str(arg_name).replace("_", "-") + + def parsed_options_to_args(parsed_arg: dict): + args = [] + for arg_name, value in parsed_arg.items(): + if type(value) is not bool: + args.append(OptionHelper.to_argument_str(arg_name)) + args.append(str(value)) + elif value: + # FIXME: This only allows setting boolean to True. + args.append(OptionHelper.to_argument_str(arg_name)) + return args + + """ + arg_definition is a key-value pair of arg_name and its default value. + When the default value is set to None, argparse.SUPPRESS is passed to + argument parser, which means that in the absence of this argument, + the value is unset, and in this case we assign the type to str. + + arg_filter is either None or a set of arg_names to add. By setting + arg_filter, A class may use a subset of arg_definition of another + class, without changing default value. + """ + + def add_options( + parser: argparse.ArgumentParser, + arg_definition: dict, + arg_filter: Union[None, set, dict] = None): + for arg_name, default_value in arg_definition.items(): + if arg_filter is None or arg_name in arg_filter: + try: + if default_value is None: + parser.add_argument( + OptionHelper.to_argument_str(arg_name), + dest=arg_name, + default=SUPPRESS, + type=str + ) + elif type(default_value) is bool: + parser.add_argument( + OptionHelper.to_argument_str(arg_name), + dest=arg_name, + action='store_false' if default_value else 'store_true', + ) + else: + parser.add_argument( + OptionHelper.to_argument_str(arg_name), + dest=arg_name, + default=default_value, + type=type(default_value) + ) + except argparse.ArgumentError as e: + print(f"Ignored argparse error: {e}") + + def conflux_options_to_config(parsed_args: dict, arg_filter: Union[None, set, dict] = None) -> dict: + conflux_config = {} + for arg_name, value in parsed_args.items(): + if arg_filter is None or arg_name in arg_filter: + if type(value) is bool: + conflux_config[arg_name] = "true" if value else "false" + else: + conflux_config[arg_name] = repr(value) + return conflux_config diff --git a/integration_tests/test_framework/test_node.py b/integration_tests/test_framework/test_node.py new file mode 100644 index 0000000000..895befcb44 --- /dev/null +++ b/integration_tests/test_framework/test_node.py @@ -0,0 +1,410 @@ +#!/usr/bin/env python3 +"""Class for conflux node under test""" + +import decimal +import errno +from enum import Enum +import http.client +import json +import logging +import os +import re +import subprocess +import tempfile +import shutil + +import requests +import time +import urllib.parse + +import eth_utils + +from integration_tests.conflux.utils import get_nodeid, sha3, encode_int32 +from integration_tests.conflux.config import DEFAULT_PY_TEST_CHAIN_ID +from .authproxy import JSONRPCException +from .util import * + + +class FailedToStartError(Exception): + """Raised when a node fails to start correctly.""" + + +class ErrorMatch(Enum): + FULL_TEXT = 1 + FULL_REGEX = 2 + PARTIAL_REGEX = 3 + + +class TestNode: + def __init__(self, index, datadir, rpchost, confluxd, rpc_timeout=None, remote=False, ip=None, user=None, + rpcport=None, auto_recovery=False, recovery_timeout=30, chain_id=DEFAULT_PY_TEST_CHAIN_ID, + no_pssh=True): + self.chain_id = chain_id + self.index = index + self.datadir = datadir + self.stdout_dir = os.path.join(self.datadir, "stdout") + self.stderr_dir = os.path.join(self.datadir, "stderr") + self.log = os.path.join(self.datadir, "node" + str(index) + ".log") + self.remote = remote + self.no_pssh = no_pssh + self.rpchost = rpchost + self.auto_recovery = auto_recovery + self.recovery_timeout = recovery_timeout + if remote: + self.ip = ip + self.user = user + self.rpcport = rpcport if rpcport is not None else remote_rpc_port(self.index) + else: + self.ip = "127.0.0.1" + self.rpcport = rpc_port(self.index) + self.ethwsport = evm_rpc_ws_port(self.index) + self.pubsubport = pubsub_port(self.index) + self.ethrpcport = evm_rpc_port(self.index) + self.ethrpcportv2 = evm_rpc_port_v2(self.index) + self.port = str(p2p_port(index)) + if self.rpchost is None: + self.rpchost = ip # + ":" + str(rpc_port(index)) + self.rpc_timeout = CONFLUX_RPC_WAIT_TIMEOUT if rpc_timeout is None else rpc_timeout + self.binary = confluxd + self.args = [ + self.binary, "--config", + os.path.join(self.datadir, "conflux.conf") + ] + + self.running = False + self.process = None + self.rpc_connected = False + self.rpc: SimpleRpcProxy = None # type: ignore + self.ethrpc: SimpleRpcProxy = None + self.ethrpc_connected = False + self.log = logging.getLogger('TestFramework.node%d' % index) + self.cleanup_on_exit = True + # self.key = "0x" + "0"*125+"{:03d}".format(self.index); + self.p2ps = [] + if os.path.exists(os.path.join(self.datadir, "pow_sk")): + self.pow_sk = open(os.path.join(self.datadir, "pow_sk"), "rb").read() + else: + self.pow_sk = None + + def _node_msg(self, msg: str) -> str: + """Return a modified msg that identifies this node by its index as a debugging aid.""" + return "[node %d] %s" % (self.index, msg) + + def _raise_assertion_error(self, msg: str): + """Raise an AssertionError with msg modified to identify this node.""" + raise AssertionError(self._node_msg(msg)) + + def __del__(self): + # Ensure that we don't leave any bitcoind processes lying around after + # the test ends + if self.process and self.cleanup_on_exit: + # Should only happen on test failure + # Avoid using logger, as that may have already been shutdown when + # this destructor is called. + print(self._node_msg("Cleaning up leftover process")) + self.process.terminate() + if self.remote == True: + cli_kill = "ssh {}@{} killall conflux".format( + self.user, self.ip) + print(self.ip, self.index, subprocess.Popen( + cli_kill, shell=True).wait()) + + def __getattr__(self, name): + """Dispatches any unrecognised messages to the RPC connection.""" + assert self.rpc_connected and self.rpc is not None, self._node_msg( + "Error: no RPC connection") + if name.startswith("eth_") or name.startswith("parity_"): + return getattr(self.ethrpc, name) + else: + return getattr(self.rpc, name) + + def best_block_hash(self) -> str: + return self.cfx_getBestBlockHash() + + def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs): + # Add a new stdout and stderr file each time conflux is started + if stderr is None: + stderr = tempfile.NamedTemporaryFile( + dir=self.stderr_dir, + suffix="_" + str(self.index) + "_" + self.ip, + delete=False) + if stdout is None: + stdout = tempfile.NamedTemporaryFile( + dir=self.stdout_dir, + suffix="_" + str(self.index) + "_" + self.ip, + delete=False) + self.stderr = stderr + self.stdout = stdout + if extra_args is not None: + self.args += extra_args + if "--public-address" not in self.args: + self.args += ["--public-address", "{}".format(self.ip)] + + # Delete any existing cookie file -- if such a file exists (eg due to + # unclean shutdown), it will get overwritten anyway by bitcoind, and + # potentially interfere with our attempt to authenticate + delete_cookie_file(self.datadir) + my_env = os.environ.copy() + my_env["RUST_BACKTRACE"] = "1" + if self.remote: + # If no_pssh is False, we have started the conflux nodes before this, so + # we can just skip the start here. + if self.no_pssh: + ssh_args = '-o "StrictHostKeyChecking no"' + cli_mkdir = "ssh {} {}@{} mkdir -p {};".format( + ssh_args, self.user, self.ip, self.datadir + ) + cli_conf = "scp {3} -r {0} {1}@{2}:`dirname {0}`;".format( + self.datadir, self.user, self.ip, ssh_args + ) + cli_kill = "ssh {}@{} killall -9 conflux;".format(self.user, self.ip) + cli_exe = 'ssh {} {}@{} "{} > ~/stdout"'.format( + ssh_args, + self.user, + self.ip, + "cd {} && export RUST_BACKTRACE=full && cgexec -g net_cls:limit{} ".format(self.datadir, self.index+1) + + " ".join(self.args), + ) + print(cli_mkdir + cli_kill + cli_conf + cli_exe) + self.process = subprocess.Popen( + cli_mkdir + cli_kill + cli_conf + cli_exe, + stdout=stdout, + stderr=stderr, + cwd=self.datadir, + shell=True, + **kwargs, + ) + else: + self.process = subprocess.Popen( + self.args, stdout=stdout, stderr=stderr, cwd=self.datadir, env=my_env, **kwargs) + + self.running = True + self.log.debug("conflux started, waiting for RPC to come up") + + def wait_for_rpc_connection(self): + """Sets up an RPC connection to the conflux process. Returns False if unable to connect.""" + # Poll at a rate of four times per second + poll_per_s = 4 + for _ in range(poll_per_s * self.rpc_timeout): + if not self.remote and self.process.poll() is not None: + raise FailedToStartError( + self._node_msg( + 'conflux exited with status {} during initialization'. + format(self.process.returncode))) + try: + self.rpc = get_simple_rpc_proxy( + rpc_url(self.index, self.rpchost, self.rpcport), + node=self, + timeout=self.rpc_timeout) + self.rpc.cfx_getBestBlockHash() + # If the call to get_best_block_hash() succeeds then the RPC connection is up + self.rpc_connected = True + self.url = self.rpc.url + self.log.debug("RPC successfully started") + # setup ethrpc + self.ethrpc = get_simple_rpc_proxy( + rpc_url(self.index, self.rpchost, self.ethrpcport), + node=self, + timeout=self.rpc_timeout) + self.ethrpc_connected = True + return + except requests.exceptions.ConnectionError as e: + # TODO check if it's ECONNREFUSED` + pass + except IOError as e: + if e.errno != errno.ECONNREFUSED: # Port not yet open? + raise # unknown IO error + except JSONRPCException as e: # Initialization phase + if e.error['code'] != -28: # RPC in warmup? + raise # unknown JSON RPC exception + except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting + if "No RPC credentials" not in str(e): + raise + except jsonrpcclient.exceptions.ReceivedNon2xxResponseError as e: + if e.code != 500: + raise + time.sleep(1.0 / poll_per_s) + self._raise_assertion_error("failed to get RPC proxy: index = {}, ip = {}, rpchost = {}, p2pport={}, rpcport = {}, rpc_url = {}".format( + self.index, self.ip, self.rpchost, self.port, self.rpcport, rpc_url(self.index, self.rpchost, self.rpcport) + )) + + def wait_for_recovery(self, phase_to_wait, wait_time): + self.wait_for_phase(phase_to_wait, wait_time=wait_time) + + def wait_for_phase(self, phases, wait_time=10): + sleep_time = 0.1 + retry = 0 + max_retry = wait_time / sleep_time + + while self.debug_currentSyncPhase() not in phases and retry <= max_retry: + time.sleep(0.1) + retry += 1 + + if retry > max_retry: + current_phase = self.debug_currentSyncPhase() + raise AssertionError(f"Node did not reach any of {phases} after {wait_time} seconds, current phase is {current_phase}") + + def wait_for_nodeid(self): + pubkey, x, y = get_nodeid(self) + self.key = eth_utils.encode_hex(pubkey) + addr_tmp = bytearray(sha3(encode_int32(x) + encode_int32(y))[12:]) + addr_tmp[0] &= 0x0f + addr_tmp[0] |= 0x10 + self.addr = addr_tmp + self.log.debug("Get node {} nodeid {}".format(self.index, self.key)) + + def clean_data(self): + shutil.rmtree(os.path.join(self.datadir, "blockchain_data/blockchain_db")) + shutil.rmtree(os.path.join(self.datadir, "blockchain_data/storage_db")) + shutil.rmtree(os.path.join(self.datadir, "pos_db"), ignore_errors=True) + self.log.info("Cleanup data for node %d", self.index) + + def stop_node(self, expected_stderr='', kill=False, wait=True): + """Stop the node.""" + if not self.running: + return + self.log.debug("Stopping node") + try: + if kill: + self.process.kill() + else: + self.process.terminate() + except http.client.CannotSendRequest: + self.log.exception("Unable to stop node.") + + if wait: + self.wait_until_stopped() + # Check that stderr is as expected + self.stderr.seek(0) + stderr = self.stderr.read().decode('utf-8').strip() + # TODO: Check how to avoid `pthread lock: Invalid argument`. + if stderr != expected_stderr and stderr != "pthread lock: Invalid argument" and "pthread_mutex_lock" not in stderr: + if self.return_code is None: + self.log.info("Process is still running") + else: + self.log.info("Process has terminated with code {}".format(self.return_code)) + raise AssertionError("Unexpected stderr {} != {} from {}:{} index={}".format( + stderr, expected_stderr, self.ip, self.port, self.index)) + + self.stdout.close() + self.stderr.close() + + del self.p2ps[:] + + def is_node_stopped(self): + """Checks whether the node has stopped. + + Returns True if the node has stopped. False otherwise. + This method is responsible for freeing resources (self.process).""" + if not self.running: + return True + return_code = self.process.poll() + if return_code is None: + return False + + # process has stopped. Assert that it didn't return an error code. + # assert return_code == 0, self._node_msg( + # "Node returned non-zero exit code (%d) when stopping" % + # return_code) + self.running = False + self.process = None + self.rpc_connected = False + self.rpc = None + self.log.debug("Node stopped") + self.return_code = return_code + return True + + def wait_until_stopped(self, timeout=CONFLUX_GRACEFUL_SHUTDOWN_TIMEOUT): + wait_until(self.is_node_stopped, timeout=timeout) + + def assert_start_raises_init_error(self, + extra_args=None, + expected_msg=None, + match=ErrorMatch.FULL_TEXT, + *args, + **kwargs): + """Attempt to start the node and expect it to raise an error. + + extra_args: extra arguments to pass through to bitcoind + expected_msg: regex that stderr should match when bitcoind fails + + Will throw if bitcoind starts without an error. + Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" + with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ + tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: + try: + self.start( + extra_args, + stdout=log_stdout, + stderr=log_stderr, + *args, + **kwargs) + self.wait_for_rpc_connection() + self.stop_node() + self.wait_until_stopped() + except FailedToStartError as e: + self.log.debug('bitcoind failed to start: %s', e) + self.running = False + self.process = None + # Check stderr for expected message + if expected_msg is not None: + log_stderr.seek(0) + stderr = log_stderr.read().decode('utf-8').strip() + if match == ErrorMatch.PARTIAL_REGEX: + if re.search( + expected_msg, stderr, + flags=re.MULTILINE) is None: + self._raise_assertion_error( + 'Expected message "{}" does not partially match stderr:\n"{}"'. + format(expected_msg, stderr)) + elif match == ErrorMatch.FULL_REGEX: + if re.fullmatch(expected_msg, stderr) is None: + self._raise_assertion_error( + 'Expected message "{}" does not fully match stderr:\n"{}"'. + format(expected_msg, stderr)) + elif match == ErrorMatch.FULL_TEXT: + if expected_msg != stderr: + self._raise_assertion_error( + 'Expected message "{}" does not fully match stderr:\n"{}"'. + format(expected_msg, stderr)) + else: + if expected_msg is None: + assert_msg = "bitcoind should have exited with an error" + else: + assert_msg = "bitcoind should have exited with expected error " + expected_msg + self._raise_assertion_error(assert_msg) + + def add_p2p_connection(self, p2p_conn, *args, **kwargs): + """Add a p2p connection to the node. + + This method adds the p2p connection to the self.p2ps list and also + returns the connection to the caller.""" + if 'dstport' not in kwargs: + kwargs['dstport'] = int(self.port) + if 'dstaddr' not in kwargs: + kwargs['dstaddr'] = self.ip + + p2p_conn.set_chain_id(self.chain_id) + + # if self.ip is not None: + # kwargs['dstaddr'] = self.ip + # print(args, kwargs) + p2p_conn.peer_connect(*args, **kwargs)() + self.p2ps.append(p2p_conn) + + return p2p_conn + + @property + def p2p(self): + """Return the first p2p connection + + Convenience property - most tests only use a single p2p connection to each + node, so this saves having to write node.p2ps[0] many times.""" + assert self.p2ps, "No p2p connection" + return self.p2ps[0] + + def disconnect_p2ps(self): + """Close all p2p connections to the node.""" + for p in self.p2ps: + p.peer_disconnect() + del self.p2ps[:] diff --git a/integration_tests/test_framework/util/__init__.py b/integration_tests/test_framework/util/__init__.py new file mode 100644 index 0000000000..c53875d4f6 --- /dev/null +++ b/integration_tests/test_framework/util/__init__.py @@ -0,0 +1,955 @@ +#!/usr/bin/env python3 + +import integration_tests.conflux.config +from decimal import Decimal, ROUND_DOWN +import inspect +import json +import logging +import os +import random +import re +from subprocess import CalledProcessError, check_output +import time +from typing import Optional, Callable, List, TYPE_CHECKING, cast, Tuple, Union, Literal +import socket +import threading +import jsonrpcclient.exceptions +import solcx +import conflux_web3 # should be imported before web3 +import web3 +from cfx_account import Account as CfxAccount +from cfx_account.signers.local import LocalAccount as CfxLocalAccount +from sys import platform +import yaml +import shutil +import math +from os.path import dirname, join +from pathlib import Path + +from integration_tests.test_framework.simple_rpc_proxy import SimpleRpcProxy +from .. import coverage +from ..authproxy import AuthServiceProxy, JSONRPCException +if TYPE_CHECKING: + from conflux.rpc import RpcClient + +solcx.set_solc_version('v0.5.17') + +CONFLUX_RPC_WAIT_TIMEOUT = 60 +CONFLUX_GRACEFUL_SHUTDOWN_TIMEOUT = 1220 + +logger = logging.getLogger("TestFramework.utils") + +# Assert functions +################## + + +def assert_fee_amount(fee, tx_size, fee_per_kB): + """Assert the fee was in range""" + target_fee = round(tx_size * fee_per_kB / 1000, 8) + if fee < target_fee: + raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % + (str(fee), str(target_fee))) + # allow the wallet's estimation to be at most 2 bytes off + if fee > (tx_size + 2) * fee_per_kB / 1000: + raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % + (str(fee), str(target_fee))) + + +def assert_storage_occupied(receipt, addr, expected): + if receipt["storageCoveredBySponsor"]: + assert_equal(receipt["to"], addr.lower()) + else: + assert_equal(receipt["from"], addr.lower()) + assert_equal(receipt["storageCollateralized"], expected) + + +def assert_storage_released(receipt, addr, expected): + assert_equal(receipt["storageReleased"].get(addr.lower(), 0), expected) + + +def assert_equal(thing1, thing2, *args): + if thing1 != thing2 or any(thing1 != arg for arg in args): + raise AssertionError("not(%s)" % " == ".join( + str(arg) for arg in (thing1, thing2) + args)) + + +def assert_ne(thing1, thing2): + if thing1 == thing2: + raise AssertionError("not(%s)" % " != ".join([thing1, thing2])) + + +def assert_greater_than(thing1, thing2): + if thing1 <= thing2: + raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) + + +def assert_greater_than_or_equal(thing1, thing2): + if thing1 < thing2: + raise AssertionError("%s < %s" % (str(thing1), str(thing2))) + + +def assert_raises(exc, fun, *args, **kwds): + assert_raises_message(exc, None, fun, *args, **kwds) + + +def assert_raises_message(exc, message, fun, *args, **kwds): + try: + fun(*args, **kwds) + except JSONRPCException: + raise AssertionError( + "Use assert_raises_rpc_error() to test RPC failures") + except exc as e: + if message is not None and message not in e.error['message']: + raise AssertionError("Expected substring not found:" + + e.error['message']) + except Exception as e: + raise AssertionError("Unexpected exception raised: " + + type(e).__name__) + else: + raise AssertionError("No exception raised") + + +def assert_raises_process_error(returncode, output, fun, *args, **kwds): + """Execute a process and asserts the process return code and output. + + Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError + and verifies that the return code and output are as expected. Throws AssertionError if + no CalledProcessError was raised or if the return code and output are not as expected. + + Args: + returncode (int): the process return code. + output (string): [a substring of] the process output. + fun (function): the function to call. This should execute a process. + args*: positional arguments for the function. + kwds**: named arguments for the function. + """ + try: + fun(*args, **kwds) + except CalledProcessError as e: + if returncode != e.returncode: + raise AssertionError("Unexpected returncode %i" % e.returncode) + if output not in e.output: + raise AssertionError("Expected substring not found:" + e.output) + else: + raise AssertionError("No exception raised") + + +def assert_raises_rpc_error(code: Optional[int], message: Optional[str], fun: Callable, *args, err_data_: Optional[str]=None, **kwds): + """Run an RPC and verify that a specific JSONRPC exception code and message is raised. + + Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException + and verifies that the error code and message are as expected. Throws AssertionError if + no JSONRPCException was raised or if the error code/message are not as expected. + + Args: + code (int), optional: the error code returned by the RPC call (defined + in src/rpc/protocol.h). Set to None if checking the error code is not required. + message (string), optional: [a substring of] the error string returned by the + RPC call. Set to None if checking the error string is not required. + fun (function): the function to call. This should be the name of an RPC. + args*: positional arguments for the function. + kwds**: named arguments for the function. + """ + assert try_rpc(code, message, fun, err_data_, *args, **kwds), "No exception raised" + + +def try_rpc(code: Optional[int], message: Optional[str], fun: Callable, err_data_: Optional[str]=None, *args, **kwds): + """Tries to run an rpc command. + + Test against error code and message if the rpc fails. + Returns whether a JSONRPCException was raised.""" + try: + fun(*args, **kwds) + except jsonrpcclient.exceptions.ReceivedErrorResponseError as e: + error = e.response + # JSONRPCException was thrown as expected. Check the code and message values are correct. + if (code is not None) and (code != error.code): + raise AssertionError( + "Unexpected JSONRPC error code %i" % error.code) + if (message is not None) and (message not in cast(str, error.message)): + raise AssertionError(f"Expected substring not found: {error.message}") + if (err_data_ is not None): + if not getattr(error, "data", None) or (err_data_ not in cast(str, error.data)): + raise AssertionError(f"Expected substring not found: {error.data}") + return True + except Exception as e: + raise AssertionError("Unexpected exception raised: " + + type(e).__name__) + else: + return False + + +def assert_is_hex_string(string): + try: + if string != "0x": + int(string, 16) + except Exception as e: + raise AssertionError( + "Couldn't interpret %r as hexadecimal; raised: %s" % (string, e)) + +def assert_is_hash_string(string, length=64): + if not isinstance(string, str): + raise AssertionError("Expected a string, got type %r" % type(string)) + + if string.startswith("0x"): + string = string[2:] + + if length and len(string) != length: + raise AssertionError( + "String of length %d expected; got %d" % (length, len(string))) + + if not re.match('[abcdef0-9]+$', string): + raise AssertionError( + "String %r contains invalid characters for a hash." % string) + + +def assert_array_result(object_array, + to_match, + expected, + should_not_find=False): + """ + Pass in array of JSON objects, a dictionary with key/value pairs + to match against, and another dictionary with expected key/value + pairs. + If the should_not_find flag is true, to_match should not be found + in object_array + """ + if should_not_find: + assert_equal(expected, {}) + num_matched = 0 + for item in object_array: + all_match = True + for key, value in to_match.items(): + if item[key] != value: + all_match = False + if not all_match: + continue + elif should_not_find: + num_matched = num_matched + 1 + for key, value in expected.items(): + if item[key] != value: + raise AssertionError( + "%s : expected %s=%s" % (str(item), str(key), str(value))) + num_matched = num_matched + 1 + if num_matched == 0 and not should_not_find: + raise AssertionError("No objects matched %s" % (str(to_match))) + if num_matched > 0 and should_not_find: + raise AssertionError("Objects were found %s" % (str(to_match))) + + +# Utility functions +################### + + +def check_json_precision(): + """Make sure json library being used does not lose precision converting BTC values""" + n = Decimal("20000000.00000003") + satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8) + if satoshis != 2000000000000003: + raise RuntimeError("JSON encode/decode loses precision") + +def satoshi_round(amount): + return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) + + +def wait_until(predicate, + *, + attempts=float('inf'), + timeout=float('inf'), + lock=None): + if attempts == float('inf') and timeout == float('inf'): + timeout = 60 + attempt = 0 + time_end = time.time() + timeout + + while attempt < attempts and time.time() < time_end: + if lock: + with lock: + if predicate(): + return + else: + if predicate(): + return + attempt += 1 + time.sleep(0.5) + + # Print the cause of the timeout + predicate_source = inspect.getsourcelines(predicate) + logger.error("wait_until() failed. Predicate: {}".format(predicate_source)) + if attempt >= attempts: + raise AssertionError("Predicate {} not true after {} attempts".format( + predicate_source, attempts)) + elif time.time() >= time_end: + raise AssertionError("Predicate {} not true after {} seconds".format( + predicate_source, timeout)) + raise RuntimeError('Unreachable') + + +# Node functions +################ + +def initialize_tg_config(dirname, nodes, genesis_nodes, chain_id, initial_seed="0"*64, start_index=None, pkfile=None, pos_round_time_ms=1000): + tg_config_gen = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../target/release/pos-genesis-tool") + try: + if pkfile is None: + check_output([tg_config_gen, "random", "--num-validator={}".format(nodes), + "--num-genesis-validator={}".format(genesis_nodes), "--chain-id={}".format(chain_id), + "--initial-seed={}".format(initial_seed)], cwd=dirname) + else: + print([tg_config_gen, "frompub", pkfile], dirname) + check_output([tg_config_gen, "frompub", "--initial-seed={}".format(initial_seed), pkfile], cwd=dirname) + except CalledProcessError as e: + print(e.output) + if start_index is None: + start_index = 0 + for n in range(start_index, start_index + nodes): + set_node_pos_config(dirname, n, pos_round_time_ms=pos_round_time_ms) + + +def set_node_pos_config(dirname, n, setup_keys=True, pos_round_time_ms=1000, hardcoded_epoch_committee=None): + waypoint_path = os.path.join(dirname, 'waypoint_config') + genesis_path = os.path.join(dirname, 'genesis_file') + waypoint = open(waypoint_path, 'r').readlines()[0].strip() + private_keys_dir = os.path.join(dirname, "private_keys") + datadir = get_datadir_path(dirname, n) + if not os.path.isdir(datadir): + os.makedirs(datadir) + net_config_dir = os.path.join(datadir, 'blockchain_data', 'net_config') + os.makedirs(net_config_dir, exist_ok = True) + os.makedirs(os.path.join(datadir, 'pos_db'), exist_ok = True) + validator_config = {} + validator_config['base'] = { + 'data_dir': os.path.join(datadir, 'pos_db'), + 'role': 'validator', + 'waypoint': { + 'from_config': waypoint, + } + } + validator_config['execution'] = { + 'genesis_file_location': genesis_path, + } + validator_config['storage'] = { + 'dir': os.path.join(datadir, 'pos_db', 'db'), + } + validator_config['consensus'] = { + 'safety_rules': { + 'service': { + 'type': "local", + } + }, + 'round_initial_timeout_ms': pos_round_time_ms, + } + if hardcoded_epoch_committee is not None: + validator_config['consensus']['hardcoded_epoch_committee'] = hardcoded_epoch_committee + validator_config['logger'] = { + 'level': "TRACE", + 'file': os.path.join(datadir, "pos.log") + } + validator_config['mempool'] = { + "shared_mempool_tick_interval_ms": 200, + } + with open(os.path.join(datadir, 'validator_full_node.yaml'), 'w') as f: + f.write(yaml.dump(validator_config, default_flow_style=False)) + if setup_keys: + shutil.copyfile(os.path.join(private_keys_dir, str(n)), os.path.join(net_config_dir, 'pos_key')) + shutil.copyfile(os.path.join(private_keys_dir, "pow_sk"+str(n)), os.path.join(datadir, 'pow_sk')) + + +def _will_create_genesis_secret_file(conf_parameters, core_secrets: list[str], evm_secrets: list[str]): + if conf_parameters.get("genesis_secrets") == None: + return len(core_secrets) > 1 or len(evm_secrets) > 1 # initial genesis secrets are already set + if conf_parameters.get("genesis_secrets") and (len(core_secrets) > 0 or len(evm_secrets) > 0): + warnings.warn("genesis_secrets is set and extra secrets are provided. extra secrets will be ignored.") + return False + +def initialize_datadir(dirname, n, port_min, conf_parameters, extra_files: dict = {}, core_secrets: list[str] = [], evm_secrets: list[str] = []): + datadir = get_datadir_path(dirname, n) + if not os.path.isdir(datadir): + os.makedirs(datadir) + + if _will_create_genesis_secret_file(conf_parameters, core_secrets, evm_secrets): + genesis_file_path = os.path.join(datadir, "genesis_secrets.txt") + with open(genesis_file_path, 'w') as f: + for secret in core_secrets: + f.write(secret + "\n") + conf_parameters.update({"genesis_secrets": f"\"{genesis_file_path}\""}) + genesis_evm_file_path = os.path.join(datadir, "genesis_evm_secrets.txt") + with open(genesis_evm_file_path, 'w') as f: + for secret in evm_secrets: + f.write(secret + "\n") + conf_parameters.update({"genesis_evm_secrets": f"\"{genesis_evm_file_path}\""}) + + with open( + os.path.join(datadir, "conflux.conf"), 'w', encoding='utf8') as f: + local_conf = { + "tcp_port": str(p2p_port(n)), + "jsonrpc_local_http_port": str(rpc_port(n)), + "jsonrpc_ws_port": str(pubsub_port(n)), + "jsonrpc_http_port": str(remote_rpc_port(n)), + "jsonrpc_http_eth_port": str(evm_rpc_port(n)), + "jsonrpc_ws_eth_port": str(evm_rpc_ws_port(n)), + "jsonrpc_http_eth_port_v2": str(evm_rpc_port_v2(n)), # the async espace rpc port + "pos_config_path": "\'{}\'".format(os.path.join(datadir, "validator_full_node.yaml")), + "pos_initial_nodes_path": "\'{}\'".format(os.path.join(dirname, "initial_nodes.json")), + "pos_private_key_path": "'{}'".format(os.path.join(datadir, "blockchain_data", "net_config", "pos_key")) + } + local_conf.update(integration_tests.conflux.config.small_local_test_conf) + local_conf.update(conf_parameters) + for k in local_conf: + f.write("{}={}\n".format(k, local_conf[k])) + os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True) + os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True) + for file_name, content in extra_files.items(): + with open(os.path.join(datadir, file_name), 'w', encoding='utf8') as f: + f.write(content) + return datadir + + +def get_datadir_path(dirname, n): + return os.path.join(dirname, "node" + str(n)) + + +def append_config(datadir, options): + with open( + os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f: + for option in options: + f.write(option + "\n") + + +def get_auth_cookie(datadir): + user = None + password = None + if os.path.isfile(os.path.join(datadir, "bitcoin.conf")): + with open( + os.path.join(datadir, "bitcoin.conf"), 'r', + encoding='utf8') as f: + for line in f: + if line.startswith("rpcuser="): + assert user is None # Ensure that there is only one rpcuser line + user = line.split("=")[1].strip("\n") + if line.startswith("rpcpassword="): + assert password is None # Ensure that there is only one rpcpassword line + password = line.split("=")[1].strip("\n") + if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): + with open( + os.path.join(datadir, "regtest", ".cookie"), 'r', + encoding="ascii") as f: + userpass = f.read() + split_userpass = userpass.split(':') + user = split_userpass[0] + password = split_userpass[1] + if user is None or password is None: + raise ValueError("No RPC credentials") + return user, password + + +# If a cookie file exists in the given datadir, delete it. +def delete_cookie_file(datadir): + if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")): + logger.debug("Deleting leftover cookie file") + os.remove(os.path.join(datadir, "regtest", ".cookie")) + + +def get_bip9_status(node, key): + info = node.getblockchaininfo() + return info['bip9_softforks'][key] + + +def set_node_times(nodes, t): + for node in nodes: + node.setmocktime(t) + + +def disconnect_nodes(nodes, from_connection, node_num): + try: + nodes[from_connection].test_removeNode(nodes[node_num].key, get_peer_addr(nodes[node_num])) + nodes[node_num].test_removeNode(nodes[from_connection].key, get_peer_addr(nodes[from_connection])) + except JSONRPCException as e: + # If this node is disconnected between calculating the peer id + # and issuing the disconnect, don't worry about it. + # This avoids a race condition if we're mass-disconnecting peers. + if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED + raise + + # wait to disconnect + wait_until(lambda: [peer for peer in nodes[from_connection].test_getPeerInfo() if peer["nodeid"] == nodes[node_num].key] == [], timeout=5) + wait_until(lambda: [peer for peer in nodes[node_num].test_getPeerInfo() if peer["nodeid"] == nodes[from_connection].key] == [], timeout=5) + + +def check_handshake(from_connection, target_node_id): + """ + Check whether node 'from_connection' has already + added node 'target_node_id' into its peer set. + """ + peers = from_connection.test_getPeerInfo() + for peer in peers: + if peer["nodeid"] == target_node_id and len(peer['protocols']) > 0: + return True + return False + + +def get_peer_addr(connection): + return "{}:{}".format(connection.ip, connection.port) + + +def connect_nodes(nodes, a, node_num, timeout=60): + """ + Let node[a] connect to node[node_num] + """ + from_connection = nodes[a] + to_connection = nodes[node_num] + key = nodes[node_num].key + peer_addr = get_peer_addr(to_connection) + from_connection.test_addNode(key, peer_addr) + # poll until hello handshake complete to avoid race conditions + # with transaction relaying + wait_until(lambda: check_handshake(from_connection, to_connection.key), timeout=timeout) + + +def sync_blocks(rpc_connections, *, sync_count=True, sync_state=True, wait=1, timeout=60): + """ + Wait until everybody has the same tip. + + sync_blocks needs to be called with an rpc_connections set that has least + one node already synced to the latest, stable tip, otherwise there's a + chance it might return before all nodes are stably synced. + """ + stop_time = time.time() + timeout + while time.time() <= stop_time: + best_hash = [x.best_block_hash() for x in rpc_connections] + best_executed = [x.cfx_epochNumber("latest_state") if sync_state else 0 for x in rpc_connections] + block_count = [x.test_getBlockCount() for x in rpc_connections] + if best_hash.count(best_hash[0]) == len(rpc_connections) \ + and (not sync_state or best_executed.count(best_executed[0]) == len(rpc_connections)) \ + and (not sync_count or block_count.count(block_count[0]) == len(rpc_connections)): + return + time.sleep(wait) + raise AssertionError("Block sync timed out:{}".format("".join( + "\n {!r}".format(b) for b in best_hash + best_executed + block_count))) + + +def sync_mempools(rpc_connections, *, wait=1, timeout=60, + flush_scheduler=True): + """ + Wait until everybody has the same transactions in their memory + pools + """ + stop_time = time.time() + timeout + while time.time() <= stop_time: + pool = [set(r.getrawmempool()) for r in rpc_connections] + if pool.count(pool[0]) == len(rpc_connections): + if flush_scheduler: + for r in rpc_connections: + r.syncwithvalidationinterfacequeue() + return + time.sleep(wait) + raise AssertionError("Mempool sync timed out:{}".format("".join( + "\n {!r}".format(m) for m in pool))) + + +def wait_for_block_count(node, count, timeout=10): + wait_until(lambda: node.test_getBlockCount() >= count, timeout=timeout) + + +class WaitHandler: + def __init__(self, node, msgid, func=None): + self.keep_wait = True + self.node = node + self.msgid = msgid + + def on_message(obj, msg): + if func is not None: + func(obj, msg) + self.keep_wait = False + node.set_callback(msgid, on_message) + + def wait(self, timeout=10): + wait_until(lambda: not self.keep_wait, timeout=timeout) + self.node.reset_callback(self.msgid) + + +# RPC/P2P connection constants and functions +############################################ + +# The maximum number of nodes a single test can spawn +MAX_NODES = 25 +# The number of ports to "reserve" for p2p and rpc, each +PORT_RANGE = 100 + + +class PortMin: + # Must be initialized with a unique integer for each process + n: int = None + + +def get_rpc_proxy(url, node_number, timeout=CONFLUX_RPC_WAIT_TIMEOUT, coveragedir=None): + """ + Args: + url (str): URL of the RPC server to call + node_number (int): the node number (or id) that this calls to + + Kwargs: + timeout (int): HTTP timeout in seconds + + Returns: + AuthServiceProxy. convenience object for making RPC calls. + + """ + proxy_kwargs = {} + if timeout is not None: + proxy_kwargs['timeout'] = timeout + + proxy = AuthServiceProxy(url, **proxy_kwargs) + proxy.url = url # store URL on proxy for info + + coverage_logfile = coverage.get_filename( + coveragedir, node_number) if coveragedir else None + + return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile) + + +def get_simple_rpc_proxy(url, node=None, timeout=CONFLUX_RPC_WAIT_TIMEOUT): + return SimpleRpcProxy(url, timeout, node) + + +def p2p_port(n): + assert (n <= MAX_NODES) + return PortMin.n + n + +def rpc_port(n): + return PortMin.n + MAX_NODES + n*6 + +def remote_rpc_port(n): + return rpc_port(n) + 1 + +def pubsub_port(n): + return rpc_port(n) + 2 + +def evm_rpc_port(n): + return rpc_port(n) + 3 + +def evm_rpc_ws_port(n): + return rpc_port(n) + 4 + +def evm_rpc_port_v2(n): + return rpc_port(n) + 5 + +def rpc_url(i, rpchost=None, rpcport=None): + if rpchost is None: + # Do not use localhost because our test environment doesn't support + # IPv6 however the python http library assumes that. + rpchost = "127.0.0.1" + if rpcport is None: + rpcport = rpc_port(i) + return "http://%s:%d" % (rpchost, int(rpcport)) + + +def pubsub_url(i, evm=False, pubsubhost=None, pubsubport=None): + if pubsubhost is None: + # Do not use localhost because our test environment doesn't support + # IPv6 however the python http library assumes that. + pubsubhost = "127.0.0.1" + if pubsubport is None: + if evm: + pubsubport = evm_rpc_ws_port(i) + else: + pubsubport = pubsub_port(i) + return "ws://%s:%d" % (pubsubhost, int(pubsubport)) + + +def get_ip_address(): + return [int(i) for i in socket.gethostbyname(socket.gethostname()).split('.')] + + +def checktx(node, tx_hash): + return node.cfx_getTransactionReceipt(tx_hash) is not None + + +def connect_sample_nodes(nodes, log, sample=3, latency_min=0, latency_max=300, timeout=30): + """ + Establish connections among nodes with each node having 'sample' outgoing peers. + It first lets all the nodes link as a loop, then randomly pick 'sample-1' + outgoing peers for each node. + """ + peer = [[] for _ in nodes] + latencies = [{} for _ in nodes] + threads = [] + num_nodes = len(nodes) + sample = min(num_nodes - 1, sample) + + for i in range(num_nodes): + # make sure all nodes are reachable + next = (i + 1) % num_nodes + peer[i].append(next) + lat = random.randint(latency_min, latency_max) + latencies[i][next] = lat + latencies[next][i] = lat + + for _ in range(sample - 1): + while True: + p = random.randint(0, num_nodes - 1) + if p not in peer[i] and not p == i: + peer[i].append(p) + lat = random.randint(latency_min, latency_max) + latencies[i][p] = lat + latencies[p][i] = lat + break + + for i in range(num_nodes): + t = ConnectThread(nodes, i, peer[i], latencies, log, min_peers=sample) + t.start() + threads.append(t) + + for t in threads: + t.join(timeout) + assert not t.is_alive(), "Node[{}] connect to other nodes timeout in {} seconds".format(t.a, timeout) + assert not t.failed, "connect_sample_nodes failed." + + +def assert_blocks_valid(nodes, blocks): + for node in nodes: + for block in blocks: + r = node.test_getBlockStatus(block) + assert_equal(r[0], 0) # block status is valid + assert_equal(r[1], True) # state_valid is True + + +class ConnectThread(threading.Thread): + def __init__(self, nodes, a, peers, latencies, log, min_peers=3, daemon=True): + threading.Thread.__init__(self, daemon=daemon) + self.nodes = nodes + self.a = a + self.peers = peers + self.latencies = latencies + self.log = log + self.min_peers = min_peers + self.failed = False + + def run(self): + try: + while True: + for i in range(len(self.peers)): + p = self.peers[i] + connect_nodes(self.nodes, self.a, p) + for p in self.latencies[self.a]: + self.nodes[self.a].test_addLatency(self.nodes[p].key, self.latencies[self.a][p]) + if len(self.nodes[self.a].test_getPeerInfo()) >= self.min_peers: + break + else: + time.sleep(1) + except Exception as e: + node = self.nodes[self.a] + self.log.error("Node " + str(self.a) + " fails to be connected to " + str(self.peers) + ", ip={}, index={}".format(node.ip, node.index)) + self.log.error(e) + self.failed = True + + +def get_contract_instance(contract_dict=None, + source=None, + contract_name=None, + address=None, + abi_file=None, + bytecode_file=None): + w3 = web3.Web3() + contract = None + if source and contract_name: + output = solcx.compile_files([source]) + if platform == "win32": + source = os.path.abspath(source).replace("\\","/") + contract_dict = output[f"{source}:{contract_name}"] + if "bin" in contract_dict: + contract_dict["bytecode"] = contract_dict.pop("bin") + elif "code" in contract_dict: + contract_dict["bytecode"] = contract_dict.pop("code") + if contract_dict: + contract = w3.eth.contract( + abi=contract_dict['abi'], bytecode=contract_dict['bytecode'], address=address) + elif abi_file: + with open(abi_file, 'r') as abi_file: + abi = json.loads(abi_file.read()) + if address: + contract = w3.eth.contract(abi=abi, address=address) + elif bytecode_file: + bytecode = None + if bytecode_file: + with open(bytecode_file, 'r') as bytecode_file: + bytecode = bytecode_file.read() + contract = w3.eth.contract(abi=abi, bytecode=bytecode) + else: + raise ValueError("The bytecode or the address must be provided") + return contract + +# This is a util function to test rpc with block object +def do_rpc_call_test_with_block_object(client: "RpcClient", txs: List, rpc_call: Callable, expected_result_lambda: Callable[..., bool], params: List=[]): + parent_hash = client.block_by_epoch("latest_mined")['hash'] + + # generate epoch of 2 block with transactions in each block + # NOTE: we need `C` to ensure that the top fork is heavier + + # --- --- --- + # .- | A | <--- | C | <--- | D | <--- ... + # --- | --- --- --- + # ... <--- | P | <-* . + # --- | --- . + # .- | B | <.................. + # --- + + # all block except for block D is empty + + block_a = client.generate_custom_block(parent_hash = parent_hash, referee = [], txs = []) + block_b = client.generate_custom_block(parent_hash = parent_hash, referee = [], txs = []) + block_c = client.generate_custom_block(parent_hash = block_a, referee = [], txs = []) + block_d = client.generate_custom_block(parent_hash = block_c, referee = [block_b], txs = txs) + + parent_hash = block_d + + # current block_d is not executed + assert_raises_rpc_error(-32602, None, rpc_call, *params, { + "blockHash": block_d + }, err_data_="is not executed") + + # cannot find this block + assert_raises_rpc_error(-32602, "Invalid parameters: epoch parameter", rpc_call, *params, { + "blockHash": "0x{:064x}".format(int(block_d, 16) + 1) + }, err_data_="block's epoch number is not found") + + for _ in range(5): + block = client.generate_custom_block(parent_hash = parent_hash, referee = [], txs = []) + parent_hash = block + + assert_raises_rpc_error(-32602, "Invalid parameters: epoch parameter", rpc_call, *params, { + "blockHash": block_b + }) + assert_raises_rpc_error(-32602, "Invalid parameters: epoch parameter", rpc_call, *params, { + "blockHash": block_b, + "requirePivot": True + }) + + result1 = rpc_call(*params, { + "blockHash": block_d + }) + + result2 = rpc_call(*params, { + "blockHash": block_b, + "requirePivot": False + }) + + assert(expected_result_lambda(result1)) + assert_equal(result2, result1) + +# acct should have cfx +# create a chain of blocks with specified transfer tx with specified num and gas +# return the last block's hash and acct nonce +def generate_blocks_for_base_fee_manipulation(rpc: "RpcClient", acct: Union[CfxLocalAccount, str], block_count=10, tx_per_block=4, gas_per_tx=13500000,initial_parent_hash:str = None) -> Tuple[str, int]: + if isinstance(acct, str): + acct = CfxAccount.from_key(acct) + starting_nonce: int = rpc.get_nonce(acct.hex_address) + + if initial_parent_hash is None: + initial_parent_hash = cast(str, rpc.block_by_epoch("latest_mined")["hash"]) + + block_pointer = initial_parent_hash + for block_count in range(block_count): + block_pointer, starting_nonce = generate_single_block_for_base_fee_manipulation(rpc, acct, tx_per_block=tx_per_block, gas_per_tx=gas_per_tx,parent_hash=block_pointer, starting_nonce=starting_nonce) + + return block_pointer, starting_nonce + block_count * tx_per_block + +def generate_single_block_for_base_fee_manipulation(rpc: "RpcClient", acct: CfxLocalAccount, referee:list[str] =[], tx_per_block=4, gas_per_tx=13500000,parent_hash:str = None, starting_nonce: int = None) -> Tuple[str, int]: + if starting_nonce is None: + starting_nonce = cast(int, rpc.get_nonce(acct.hex_address)) + + if parent_hash is None: + parent_hash = cast(str, rpc.block_by_epoch("latest_mined")["hash"]) + + new_block = rpc.generate_custom_block( + txs=[ + rpc.new_tx( + priv_key=acct.key, + receiver=acct.address, + gas=gas_per_tx, + nonce=starting_nonce + i , + gas_price=rpc.base_fee_per_gas()*2 # give enough gas price to make the tx valid + ) + for i in range(tx_per_block) + ], + parent_hash=parent_hash, + referee=referee, + ) + return new_block, starting_nonce + tx_per_block + +# for transactions in either pivot/non-pivot block +# checks priority fee is calculated as expeted +def assert_correct_fee_computation_for_core_tx(rpc: "RpcClient", tx_hash: str, burnt_ratio=0.5): + def get_gas_charged(rpc: "RpcClient", tx_hash: str) -> int: + gas_limit = int(rpc.get_tx(tx_hash)["gas"], 16) + gas_used = int(rpc.get_transaction_receipt(tx_hash)["gasUsed"], 16) + return max(int(3/4*gas_limit), gas_used) + + receipt = rpc.get_transaction_receipt(tx_hash) + # The transaction is not executed + if receipt is None: + return + + tx_data = rpc.get_tx(tx_hash) + tx_type = int(tx_data["type"], 16) + if tx_type == 2: + # original tx fields + max_fee_per_gas = int(tx_data["maxFeePerGas"], 16) + max_priority_fee_per_gas = int(tx_data["maxPriorityFeePerGas"], 16) + else: + max_fee_per_gas = int(tx_data["gasPrice"], 16) + max_priority_fee_per_gas = int(tx_data["gasPrice"], 16) + + effective_gas_price = int(receipt["effectiveGasPrice"], 16) + transaction_epoch = int(receipt["epochNumber"],16) + is_in_pivot_block = rpc.block_by_epoch(transaction_epoch)["hash"] == receipt["blockHash"] + base_fee_per_gas = rpc.base_fee_per_gas(transaction_epoch) + burnt_fee_per_gas = math.ceil(base_fee_per_gas * burnt_ratio) + gas_fee = int(receipt["gasFee"], 16) + burnt_gas_fee = int(receipt["burntGasFee"], 16) + gas_charged = get_gas_charged(rpc, tx_hash) + + # check gas fee computation + # print("effective gas price: ", effective_gas_price) + # print("gas charged: ", get_gas_charged(rpc, tx_hash)) + # print("gas fee", gas_fee) + + # check gas fee and burnt gas fee computation + if receipt["outcomeStatus"] == "0x1": # tx fails because of not enough cash + assert "NotEnoughCash" in receipt["txExecErrorMsg"] + # all gas is charged + assert_equal(rpc.get_balance(tx_data["from"], receipt["epochNumber"]), 0) + # gas fee less than effective gas price + assert gas_fee < effective_gas_price*gas_charged + else: + assert_equal(gas_fee, effective_gas_price*gas_charged) + # check burnt fee computation + assert_equal(burnt_gas_fee, burnt_fee_per_gas*gas_charged) + + # if max_fee_per_gas >= base_fee_per_gas, it shall follow the computation, regardless of transaction in pivot block or not + if max_fee_per_gas >= base_fee_per_gas: + priority_fee_per_gas = effective_gas_price - base_fee_per_gas + # check priority fee computation + assert_equal(priority_fee_per_gas, min(max_priority_fee_per_gas, max_fee_per_gas - base_fee_per_gas)) + else: + # max fee per gas should be greater than burnt fee per gas + assert is_in_pivot_block == False, "Transaction should be in non-pivot block" + assert max_fee_per_gas >= burnt_fee_per_gas + +def assert_tx_exec_error(client: "RpcClient", tx_hash: str, err_msg: str): + client.wait_for_receipt(tx_hash) + receipt = client.get_transaction_receipt(tx_hash) + assert_equal(receipt["txExecErrorMsg"], err_msg) + + +InternalContractName = Literal["AdminControl", "SponsorWhitelistControl", + "Staking", "ConfluxContext", "PoSRegister", "CrossSpaceCall", "ParamsControl"] + +def load_contract_metadata(name: str): + path = Path(join(dirname(__file__), "..", "..", "..", "tests", "test_contracts", "artifacts")) + try: + found_file = next(path.rglob(f"{name}.json")) + return json.loads(open(found_file, "r").read()) + except StopIteration: + raise Exception(f"Cannot found contract {name}'s metadata") + diff --git a/integration_tests/test_framework/util/epoch.py b/integration_tests/test_framework/util/epoch.py new file mode 100644 index 0000000000..2ec9c82b88 --- /dev/null +++ b/integration_tests/test_framework/util/epoch.py @@ -0,0 +1,48 @@ +from dataclasses import dataclass +import sys + + +@dataclass +class EpochErrorInfo: + """ + A dataclass to encapsulate information about epoch-related errors. + epoch (any): The epoch value that caused the error. + error_code (int): The error code. + error_msg (callable): A function that takes the type and args as arguments and returns the error message. + """ + + epoch: any + error_code: int + error_msg: str + + +epoch_invalid_epoch_type_error = EpochErrorInfo( + epoch=1, + error_code=-32602, + error_msg="Invalid params: invalid type: integer `1`, expected an epoch number or 'latest_mined', 'latest_state', 'latest_checkpoint', 'latest_finalized', 'latest_confirmed' or 'earliest'.", +) + + +epoch_epoch_number_too_large_error = EpochErrorInfo( + epoch=hex(sys.maxsize), + error_code=-32602, + error_msg="Invalid params: expected a numbers with less than largest epoch number.", +) + +epoch_empty_epoch_string_error = EpochErrorInfo( + epoch="0x", + error_code=-32602, + error_msg="Invalid params: Invalid epoch number: cannot parse integer from empty string.", +) + +epoch_invalid_digit_epoch_error = EpochErrorInfo( + epoch="0xZZZ", + error_code=-32602, + error_msg="Invalid params: Invalid epoch number: invalid digit found in string.", +) + +epoch_missing_hex_prefix_error = EpochErrorInfo( + epoch="-1", + error_code=-32602, + error_msg="Invalid params: Invalid epoch number: missing 0x prefix.", +) diff --git a/integration_tests/tests/__init__.py b/integration_tests/tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration_tests/tests/cip137_test.py b/integration_tests/tests/cip137_test.py new file mode 100644 index 0000000000..9f979d7b82 --- /dev/null +++ b/integration_tests/tests/cip137_test.py @@ -0,0 +1,226 @@ +import pytest +from typing import Tuple +from integration_tests.conflux.rpc import RpcClient, default_config +from integration_tests.test_framework.util import ( + assert_equal, +) + +from cfx_account import Account as CfxAccount +from cfx_account.signers.local import LocalAccount as CfxLocalAccount + +from integration_tests.test_framework.test_framework import ConfluxTestFramework +from integration_tests.test_framework.util import ( + generate_blocks_for_base_fee_manipulation, + generate_single_block_for_base_fee_manipulation, + assert_correct_fee_computation_for_core_tx, +) + +MIN_NATIVE_BASE_PRICE = 10000 +BURNT_RATIO = 0.5 + + +@pytest.fixture(scope="module") +def framework_class(): + class CIP137TestEnv(ConfluxTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.conf_parameters["min_native_base_price"] = MIN_NATIVE_BASE_PRICE + self.conf_parameters["next_hardfork_transition_height"] = 1 + self.conf_parameters["next_hardfork_transition_number"] = 1 + + def setup_network(self): + self.add_nodes(self.num_nodes) + self.start_node(0, ["--archive"]) + self.rpc = RpcClient(self.nodes[0]) + + return CIP137TestEnv + + +# We need to ensure that the tx in B block +# B and ending block will be in the same epoch +# --- --- --- --- --- --- +# .- | A | <--- | C | <--- | D | <--- | E | <--- | F | <--- | G | ... +# --- | --- --- --- --- --- --- +# ... <--- | P | <-* . +# --- | --- . +# .- | B | <................................................... +# --- +# ensures txs to be included in B block and the ending block (e.g. F) base gas price is greater than the specified target_minimum_base_fee (not guaranteed to be the first block) +# returns the ending block hash +def construct_non_pivot_block( + network: ConfluxTestFramework, + acct: CfxLocalAccount, + txs: list, + starting_block_hash: str = None, + epoch_delta: int = 6, # 1.125^6 -> 2.027 which would make the initial tx invalid +) -> Tuple[str, str]: + + if epoch_delta <= 0: + raise ValueError("epoch_delta must be positive") + + if starting_block_hash is None: + starting_block_hash = network.rpc.block_by_epoch("latest_mined")["hash"] + + # create the non-pivot block + non_pivot_block = network.rpc.generate_custom_block( + parent_hash=starting_block_hash, txs=txs, referee=[] + ) + ending_but_two_block, account_next_nonce = ( + generate_blocks_for_base_fee_manipulation( + network.rpc, acct, epoch_delta - 1, initial_parent_hash=starting_block_hash + ) + ) + ending_block, _ = generate_single_block_for_base_fee_manipulation( + network.rpc, + acct, + [non_pivot_block], + parent_hash=ending_but_two_block, + starting_nonce=account_next_nonce, + ) + return non_pivot_block, ending_block + + +def init_acct_with_cfx( + network: ConfluxTestFramework, drip: int = 10**21 +) -> CfxLocalAccount: + network.rpc.send_tx( + network.rpc.new_tx( + receiver=(acct := CfxAccount.create()).address, + value=drip, + gas_price=max( + network.rpc.base_fee_per_gas() * 2, MIN_NATIVE_BASE_PRICE + ), # avoid genisis zero gas price + ), + True, + ) + return acct + + +def get_gas_charged(network: ConfluxTestFramework, tx_hash: str) -> int: + gas_limit = int(network.rpc.get_tx(tx_hash)["gas"], 16) + gas_used = int(network.rpc.get_transaction_receipt(tx_hash)["gasUsed"], 16) + return max(int(3 / 4 * gas_limit), gas_used) + + +def test_cip137(network: ConfluxTestFramework): + acct1 = init_acct_with_cfx(network) + acct2 = init_acct_with_cfx(network) + + block_p = network.rpc.block_by_epoch("latest_mined")["hash"] + + gas_price_level_1 = MIN_NATIVE_BASE_PRICE + gas_price_level_1_5 = int(MIN_NATIVE_BASE_PRICE * 1.5) + gas_price_level_2 = network.rpc.base_fee_per_gas() * 10 + + acct1_txs = [ + network.rpc.new_typed_tx( + receiver=network.rpc.rand_addr(), + priv_key=acct1.key, + nonce=0, + max_fee_per_gas=gas_price_level_2, + ), # expected to succeed + network.rpc.new_typed_tx( + receiver=network.rpc.rand_addr(), + priv_key=acct1.key, + nonce=1, + max_fee_per_gas=gas_price_level_1_5, + ), # expected to succeed with max fee less than epoch base gas fee + network.rpc.new_tx( + receiver=network.rpc.rand_addr(), + priv_key=acct1.key, + nonce=2, + gas_price=gas_price_level_1, + ), # expected to be ignored and can be resend later + network.rpc.new_tx( + receiver=network.rpc.rand_addr(), + priv_key=acct1.key, + nonce=3, + gas_price=gas_price_level_2, + ), # expected to be ignored + ] + + acct2_txs = [ + network.rpc.new_tx( + receiver=network.rpc.rand_addr(), + priv_key=acct2.key, + nonce=0, + gas_price=gas_price_level_2, + ), # expected to succeed + network.rpc.new_tx( + receiver=network.rpc.rand_addr(), + priv_key=acct2.key, + nonce=1, + gas_price=gas_price_level_2, + ), # expected to succeed + network.rpc.new_tx( + receiver=network.rpc.rand_addr(), + priv_key=acct2.key, + nonce=2, + gas_price=gas_price_level_2, + ), # expected to succeed + ] + + block_b, block_f = construct_non_pivot_block( + network, + CfxAccount.from_key(default_config["GENESIS_PRI_KEY"]), + [*acct1_txs, *acct2_txs], + starting_block_hash=block_p, + epoch_delta=6, # 1.125^6 -> 2.03 + ) + + network.log.info(f"current base fee per gas: {network.rpc.base_fee_per_gas()}") + + # we are ensuring the gas price order: + # gas_price_level_1 < current_base_fee * burnt_ratio < gas_price_level_1_5 < current_base_fee < gas_price_level_2 + assert gas_price_level_2 > network.rpc.base_fee_per_gas() * BURNT_RATIO + assert ( + gas_price_level_1 < network.rpc.base_fee_per_gas() * BURNT_RATIO + ), f"gas_price_level_1 {gas_price_level_1} should be less than {network.rpc.base_fee_per_gas() * BURNT_RATIO}" + + # wait for epoch of block f executed + parent_block = block_f + for _ in range(30): + block = network.rpc.generate_custom_block( + parent_hash=parent_block, referee=[], txs=[] + ) + parent_block = block + + assert_equal(network.rpc.get_nonce(acct1.address), 2) + assert_equal(network.rpc.get_nonce(acct2.address), 3) + focusing_block = network.rpc.block_by_hash(block_b, True) + epoch = int(focusing_block["epochNumber"], 16) + + network.log.info(f"epoch of block b: {epoch}") + network.log.info(f"heigth of block b: {int(focusing_block['height'], 16)}") + network.log.info( + f"base_fee_per_gas for epoch {epoch}: {network.rpc.base_fee_per_gas(epoch)}" + ) + network.log.info( + f"burnt_fee_per_gas for epoch {epoch}: {network.rpc.base_fee_per_gas(epoch) * 0.5}" + ) + network.log.info( + f"least base fee for epoch {epoch}: {network.rpc.base_fee_per_gas(epoch) * BURNT_RATIO}" + ) + network.log.info( + f"transactions in block b: {network.rpc.block_by_hash(block_b)['transactions']}" + ) + + assert_equal(focusing_block["transactions"][0]["status"], "0x0") + assert_equal(focusing_block["transactions"][1]["status"], "0x0") + assert_equal(focusing_block["transactions"][2]["status"], None) + assert_equal(focusing_block["transactions"][2]["blockHash"], None) + assert_equal(focusing_block["transactions"][3]["status"], None) + assert_equal(focusing_block["transactions"][3]["blockHash"], None) + + # as comparison + assert_equal(focusing_block["transactions"][4]["status"], "0x0") + assert_equal(focusing_block["transactions"][5]["status"], "0x0") + assert_equal(focusing_block["transactions"][6]["status"], "0x0") + + for tx_hash in network.rpc.block_by_hash(block_b)["transactions"]: + assert_correct_fee_computation_for_core_tx(network.rpc, tx_hash, BURNT_RATIO) + + network.rpc.generate_blocks(20, 5) + + # transactions shall be sent back to txpool and then get packed + assert_equal(network.rpc.get_nonce(acct1.address), 4) diff --git a/integration_tests/tests/conftest.py b/integration_tests/tests/conftest.py new file mode 100644 index 0000000000..28848fd21b --- /dev/null +++ b/integration_tests/tests/conftest.py @@ -0,0 +1,150 @@ +import pytest +import argparse +import os +from typing import Type + +from integration_tests.test_framework.test_framework import ConfluxTestFramework +from integration_tests.conflux.rpc import RpcClient + +TMP_DIR = None + +PORT_MIN = 11000 +PORT_MAX = 65535 +PORT_RANGE = 100 + +@pytest.fixture(scope="session") +def arg_parser(): + parser = argparse.ArgumentParser(usage="%(prog)s [options]") + parser.add_argument( + "--nocleanup", + dest="nocleanup", + default=False, + action="store_true", + help="Leave bitcoinds and test.* datadir on exit or error") + parser.add_argument( + "--noshutdown", + dest="noshutdown", + default=False, + action="store_true", + help="Don't stop bitcoinds after the test execution") + parser.add_argument( + "--cachedir", + dest="cachedir", + default=os.path.abspath( + os.path.dirname(os.path.realpath(__file__)) + "/../../cache"), + help= + "Directory for caching pregenerated datadirs (default: %(default)s)" + ) + parser.add_argument( + "--tmpdir", dest="tmpdir", help="Root directory for datadirs") + parser.add_argument( + "-l", + "--loglevel", + dest="loglevel", + default="INFO", + help= + "log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory." + ) + parser.add_argument( + "--tracerpc", + dest="trace_rpc", + default=False, + action="store_true", + help="Print out all RPC calls as they are made") + parser.add_argument( + "--portseed", + dest="port_seed", + default=os.getpid(), + type=int, + help= + "The seed to use for assigning port numbers (default: current process id)" + ) + parser.add_argument( + "--coveragedir", + dest="coveragedir", + help="Write tested RPC commands into this directory") + parser.add_argument( + "--pdbonfailure", + dest="pdbonfailure", + default=False, + action="store_true", + help="Attach a python debugger if test fails") + parser.add_argument( + "--usecli", + dest="usecli", + default=False, + action="store_true", + help="use bitcoin-cli instead of RPC for all commands") + parser.add_argument( + "--randomseed", + dest="random_seed", + type=int, + help="Set a random seed") + parser.add_argument( + "--metrics-report-interval-ms", + dest="metrics_report_interval_ms", + default=0, + type=int) + + parser.add_argument( + "--conflux-binary", + dest="conflux", + default=os.path.join( + os.path.dirname(os.path.realpath(__file__)), + "../../target/release/conflux"), + type=str) + parser.add_argument( + "--port-min", + dest="port_min", + default=11000, + type=int) + return parser + +@pytest.fixture(scope="module") +def framework_class() -> Type[ConfluxTestFramework]: + class DefaultFramework(ConfluxTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.conf_parameters["min_native_base_price"] = 10000 + self.conf_parameters["next_hardfork_transition_height"] = 1 + self.conf_parameters["next_hardfork_transition_number"] = 1 + + def setup_network(self): + self.setup_nodes() + self.rpc = RpcClient(self.nodes[0]) + return DefaultFramework + +@pytest.fixture(scope="module") +def network(framework_class: Type[ConfluxTestFramework], port_min: int, additional_secrets: int, request: pytest.FixtureRequest): + try: + framework = framework_class(port_min, additional_secrets) + except Exception as e: + pytest.fail(f"Failed to setup framework: {e}") + yield framework + framework.teardown(request) + +@pytest.fixture(scope="module") +def port_min(worker_id: str) -> int: + # worker_id is "master" or "gw0", "gw1", etc. + index = int(worker_id.split("gw")[1]) if "gw" in worker_id else 0 + return PORT_MIN + index * PORT_RANGE + +@pytest.fixture(scope="module") +def additional_secrets(): + return 0 + +@pytest.fixture(scope="module") +def cw3(network: ConfluxTestFramework): + return network.cw3 + +@pytest.fixture(scope="module") +def ew3(network: ConfluxTestFramework): + return network.ew3 + +@pytest.fixture(scope="module") +def core_accounts(network: ConfluxTestFramework): + return network.core_accounts + +@pytest.fixture(scope="module") +def evm_accounts(network: ConfluxTestFramework): + return network.evm_accounts diff --git a/integration_tests/tests/example_test.py b/integration_tests/tests/example_test.py new file mode 100644 index 0000000000..818f48becf --- /dev/null +++ b/integration_tests/tests/example_test.py @@ -0,0 +1,32 @@ +import pytest +from cfx_utils import CFX + +@pytest.fixture(scope="module") +def additional_secrets(): + return 9 + +def test_secrets_count(additional_secrets, core_accounts, evm_accounts): + assert len(core_accounts) == additional_secrets + 1 + assert len(evm_accounts) == additional_secrets + 1 + +def test_send_core_cfx(cw3, core_accounts): + for account in core_accounts: + to_address = cw3.cfx.account.create().address + tx_hash = cw3.cfx.send_transaction({ + "to": to_address, + "value": CFX(1), + "from": account.address + }) + tx_hash.executed() # cw3.cfx.wait_for_transaction_receipt(tx_hash) also works + assert cw3.cfx.get_balance(to_address) == CFX(1) + +def test_send_evm_cfx(ew3, evm_accounts): + for account in evm_accounts: + to_address = ew3.eth.account.create().address + tx_hash = ew3.eth.send_transaction({ + "to": to_address, + "value": ew3.to_wei(1, "ether"), + "from": account.address + }) + ew3.eth.wait_for_transaction_receipt(tx_hash) + assert ew3.eth.get_balance(to_address) == ew3.to_wei(1, "ether") diff --git a/integration_tests/tests/hello_test.py b/integration_tests/tests/hello_test.py new file mode 100644 index 0000000000..795cc58d2e --- /dev/null +++ b/integration_tests/tests/hello_test.py @@ -0,0 +1,2 @@ +def test_hello(): + assert True diff --git a/integration_tests/tests/invalid_message_test.py b/integration_tests/tests/invalid_message_test.py new file mode 100644 index 0000000000..63e295d496 --- /dev/null +++ b/integration_tests/tests/invalid_message_test.py @@ -0,0 +1,126 @@ +import pytest +from integration_tests.test_framework.test_framework import ConfluxTestFramework + +from integration_tests.conflux import utils, trie +from integration_tests.conflux.rpc import RpcClient +from integration_tests.conflux.utils import encode_hex, bytes_to_int, int_to_hex, str_to_bytes +from integration_tests.test_framework.blocktools import create_block, create_transaction +from integration_tests.test_framework.test_framework import ConfluxTestFramework +from integration_tests.test_framework.mininode import * +from integration_tests.test_framework.test_node import TestNode +from integration_tests.test_framework.util import * + +class InvalidMessageTestClass(ConfluxTestFramework): + def set_test_params(self): + self.num_nodes = 4 + # Disable 1559 for RPC tests temporarily + self.conf_parameters["cip1559_transition_height"] = str(99999999) + + def setup_network(self): + self.setup_nodes() + for i in range(self.num_nodes - 1): + connect_nodes(self.nodes, i, i + 1) + self.nodes[i].test_addLatency(self.nodes[i+1].key, 1000) + self.nodes[i+1].test_addLatency(self.nodes[i].key, 1000) + + + def send_msg(self, msg): + self.nodes[0].p2p.send_protocol_msg(msg) + + def reconnect(self, node: TestNode): + node.disconnect_p2ps() + # Wait for disconnection + time.sleep(0.5) + genesis = node.cfx_getBlockByEpochNumber("0x0", False)["hash"] + node.add_p2p_connection(DefaultNode(genesis)) + node.p2p.wait_for_status() + + def _test_invalid_packet(self): + self.log.info("Test invalid packet") + # self.nodes[0].p2p.send_packet(0, b'') + # self.nodes[0].p2p.send_packet(0xff, b'') + # self.nodes[0].p2p.send_packet(PACKET_PROTOCOL, b'') + block_hash = decode_hex(self.nodes[0].test_generateEmptyBlocks(1)[0]) + wait = [True] + + h = WaitHandler(self.nodes[0].p2p, GET_BLOCK_HEADERS_RESPONSE) + self.nodes[0].p2p.send_protocol_msg(GetBlockHeaders(hashes=[block_hash])) + h.wait() + + def assert_length(_node, msg): + assert_equal(len(msg.headers), 1) + h = WaitHandler(self.nodes[0].p2p, GET_BLOCK_HEADERS_RESPONSE, assert_length) + self.nodes[0].p2p.send_protocol_msg(GetBlockHeaders(hashes=[block_hash])) + h.wait() + self.reconnect(self.nodes[0]) + + def _test_new_block(self): + self.log.info("Test New Block") + client = RpcClient(self.nodes[0]) + best_block = client.best_block_hash() + best_epoch = client.epoch_number() + new_block = create_block(decode_hex(best_block), best_epoch + 1) + self.send_msg(NewBlock(block=new_block)) + wait_until(lambda: self.nodes[0].best_block_hash() == new_block.hash_hex()) + + # Wrong payload + self.nodes[0].p2p.send_protocol_packet(rlp.encode([0]) + int_to_bytes(NEW_BLOCK)) + time.sleep(1) + assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex()) + assert_equal(self.nodes[0].test_getBlockCount(), 3) + self.reconnect(self.nodes[0]) + + # Wrong-length parent hash + invalid_block = create_block(parent_hash=b'', height=2) + self.send_msg(NewBlock(block=invalid_block)) + time.sleep(1) + assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex()) + assert_equal(self.nodes[0].test_getBlockCount(), 3) + self.reconnect(self.nodes[0]) + + # Wrong-length author + invalid_block = create_block(author=b'', height=2) + self.send_msg(NewBlock(block=invalid_block)) + time.sleep(1) + assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex()) + assert_equal(self.nodes[0].test_getBlockCount(), 3) + self.reconnect(self.nodes[0]) + + # Wrong-length root + invalid_block = create_block(deferred_state_root=b'', height=2, deferred_receipts_root=b'') + self.send_msg(NewBlock(block=invalid_block)) + time.sleep(1) + assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex()) + assert_equal(self.nodes[0].test_getBlockCount(), 3) + self.reconnect(self.nodes[0]) + + # Nonexistent parent + invalid_block = create_block(parent_hash=b'\x00' * 32, height=2) + self.send_msg(NewBlock(block=invalid_block)) + time.sleep(1) + assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex()) + assert_equal(self.nodes[0].test_getBlockCount(), 3) + self.reconnect(self.nodes[0]) + + # Invalid height + invalid_block = create_block(new_block.hash, 1) + self.send_msg(NewBlock(block=invalid_block)) + time.sleep(1) + assert_equal(self.nodes[0].best_block_hash(), new_block.hash_hex()) + assert_equal(self.nodes[0].test_getBlockCount(), 3) + self.reconnect(self.nodes[0]) + + sync_blocks(self.nodes) + + # TODO Generate some random blocks that have wrong ref edges + pass + +@pytest.fixture(scope="module") +def framework_class(): + return InvalidMessageTestClass + +def test_invalid_message(network: InvalidMessageTestClass): + start_p2p_connection([network.nodes[0]]) + + network._test_invalid_packet() + network._test_new_block() diff --git a/integration_tests/tests/message_test.py b/integration_tests/tests/message_test.py new file mode 100644 index 0000000000..a10be9b577 --- /dev/null +++ b/integration_tests/tests/message_test.py @@ -0,0 +1,89 @@ +import pytest +import struct + + +from integration_tests.conflux import utils +from eth_utils.hexadecimal import decode_hex, encode_hex +from integration_tests.test_framework.blocktools import create_block +from integration_tests.test_framework.test_framework import ConfluxTestFramework +from integration_tests.test_framework.mininode import * +from integration_tests.test_framework.util import * + +class MessageTestClass(ConfluxTestFramework): + def set_test_params(self): + self.num_nodes = 4 + # Disable 1559 for RPC tests temporarily + self.conf_parameters["cip1559_transition_height"] = str(99999999) + + def setup_network(self): + self.setup_nodes() + for i in range(self.num_nodes - 1): + connect_nodes(self.nodes, i, i + 1) + + def send_msg(self, msg): + self.nodes[0].p2p.send_protocol_msg(msg) + + def test_socket_msg(self, node): + self.log.info("testing invalid socket message ...") + + # empty packet + buf = struct.pack("= 1) + def on_block_headers(node, msg): + network.log.info("Received %d headers", len(msg.headers)) + for header in msg.headers: + network.log.info("Block header: %s", encode_hex(header.hash)) + handler = WaitHandler(default_node, GET_BLOCK_HEADERS_RESPONSE, on_block_headers) + network.log.info("Send GetBlockHeaders message") + network.send_msg(GetBlockHeaders(hashes=[blocks[0]])) + handler.wait() + # This message is not used in current Conflux sync protocol + # network.log.info("Send GetBlockBoies message") + # network.send_msg(GetBlockBodies(hashes=[blocks[0]])) + # wait_until(lambda: default_node.msg_count >= 3) + network.log.info("Send GetBlocks message") + handler = WaitHandler(default_node, GET_BLOCKS_RESPONSE) + network.send_msg(GetBlocks(with_public=False, hashes=[blocks[0]])) + handler.wait() + network.log.info("Received GetBlock response") + + network.send_msg(NewBlockHashes([new_block.block_header.hash])) + network.send_msg(NewBlock(block=new_block)) + network.log.info("Send GetTerminalBLockHashes message") + network.send_msg(GetTerminalBlockHashes()) + handler = WaitHandler(default_node, GET_TERMINAL_BLOCK_HASHES_RESPONSE) + handler.wait() + network.log.info("Received TerminalBlockHashes") + + + network.test_socket_msg(network.nodes[0]) diff --git a/integration_tests/tests/rpc/conftest.py b/integration_tests/tests/rpc/conftest.py new file mode 100644 index 0000000000..cb00ba8b84 --- /dev/null +++ b/integration_tests/tests/rpc/conftest.py @@ -0,0 +1,7 @@ +import pytest +from integration_tests.test_framework.test_framework import ConfluxTestFramework +from integration_tests.conflux.rpc import RpcClient + +@pytest.fixture(scope="module") +def client(network: ConfluxTestFramework) -> RpcClient: + return network.rpc diff --git a/integration_tests/tests/rpc/espace/test_debug_rpc.py b/integration_tests/tests/rpc/espace/test_debug_rpc.py new file mode 100644 index 0000000000..4e2c49b412 --- /dev/null +++ b/integration_tests/tests/rpc/espace/test_debug_rpc.py @@ -0,0 +1,167 @@ +import pytest +from integration_tests.test_framework.test_framework import ConfluxTestFramework +from integration_tests.test_framework.util import load_contract_metadata +from integration_tests.conflux.rpc import RpcClient +from typing import Type + +@pytest.fixture(scope="module") +def framework_class() -> Type[ConfluxTestFramework]: + class DefaultFramework(ConfluxTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.conf_parameters["min_native_base_price"] = 10000 + self.conf_parameters["next_hardfork_transition_height"] = 1 + self.conf_parameters["next_hardfork_transition_number"] = 1 + self.conf_parameters["public_evm_rpc_async_apis"] = "\"all\"" # open all async apis + + def setup_network(self): + self.setup_nodes() + self.rpc = RpcClient(self.nodes[0]) + + return DefaultFramework + +@pytest.fixture(scope="module") +def erc20_contract(ew3, evm_accounts): + account = evm_accounts[0] + contract_meta = load_contract_metadata("MyToken") + # deploy contract + TokenContract = ew3.eth.contract(abi=contract_meta['abi'], bytecode=contract_meta['bytecode']) + tx_hash = TokenContract.constructor(account.address).transact() + ew3.eth.wait_for_transaction_receipt(tx_hash) + + # create erc20 contract instance + deploy_receipt = ew3.eth.get_transaction_receipt(tx_hash) + assert deploy_receipt["status"] == 1 + erc20_address = deploy_receipt["contractAddress"] + token_contract = ew3.eth.contract(address=erc20_address, abi=contract_meta['abi']) + + # mint 100 tokens to creator + mint_hash = token_contract.functions.mint(account.address, ew3.to_wei(100, "ether")).transact() + ew3.eth.wait_for_transaction_receipt(mint_hash) + + return { + "contract": token_contract, + "deploy_hash": tx_hash, + } + +@pytest.fixture(scope="module") +def token_transfer(erc20_contract, ew3): + to_address = ew3.eth.account.create().address + token_contract = erc20_contract["contract"] + transfer_hash = token_contract.functions.transfer(to_address, ew3.to_wei(1, "ether")).transact() + ew3.eth.wait_for_transaction_receipt(transfer_hash) + return { + "tx_hash": transfer_hash, + } + +def test_trace_simple_cfx_transfer(ew3, evm_accounts): + account = evm_accounts[0] + to_address = ew3.eth.account.create().address + tx_hash = ew3.eth.send_transaction({ + "from": account.address, + "to": to_address, + "value": ew3.to_wei(1, "ether"), + }) + ew3.eth.wait_for_transaction_receipt(tx_hash) + assert ew3.eth.get_balance(to_address) == ew3.to_wei(1, "ether") + + tx_trace = ew3.manager.request_blocking('debug_traceTransaction', [tx_hash]) + assert tx_trace['failed'] == False + assert tx_trace['gas'] == 21000 + assert tx_trace['returnValue'] == '' + assert len(tx_trace['structLogs']) == 0 + +def test_trace_deploy_contract(ew3, erc20_contract): + tx_hash = erc20_contract["deploy_hash"] + tx_trace = ew3.manager.request_blocking('debug_traceTransaction', [tx_hash]) + + oplog_len = len(tx_trace["structLogs"]) + assert tx_trace['failed'] == False + assert tx_trace['gas'] > 21000 + assert oplog_len > 0 + # key check + keys = ["pc", "op", "gas", "gasCost", "depth", "stack"] + for key in keys: + assert key in tx_trace['structLogs'][0] + + assert tx_trace["structLogs"][oplog_len-1]["op"] == "RETURN" + +def test_transfer_trace(ew3, token_transfer): + transfer_hash = token_transfer["tx_hash"] + transfer_trace = ew3.manager.request_blocking('debug_traceTransaction', [transfer_hash]) + + assert transfer_trace["failed"] == False + oplog_len = len(transfer_trace["structLogs"]) + assert oplog_len > 0 + assert transfer_trace["structLogs"][oplog_len-1]["op"] == "RETURN" + +def test_noop_trace(ew3, token_transfer): + transfer_hash = token_transfer["tx_hash"] + noop_trace = ew3.manager.request_blocking('debug_traceTransaction', [transfer_hash, {"tracer": "noopTracer"}]) + assert noop_trace == {} + +def test_four_byte_trace(ew3, token_transfer): + transfer_hash = token_transfer["tx_hash"] + four_byte_trace = ew3.manager.request_blocking('debug_traceTransaction', [transfer_hash, {"tracer": "4byteTracer"}]) + assert four_byte_trace == {'0xa9059cbb-64': 1} + +def test_call_trace(ew3, token_transfer): + transfer_hash = token_transfer["tx_hash"] + call_trace = ew3.manager.request_blocking('debug_traceTransaction', [transfer_hash, {"tracer": "callTracer"}]) + assert call_trace["from"] == "0x0e768d12395c8abfdedf7b1aeb0dd1d27d5e2a7f" + # assert call_trace["to"] == "0xe2182fba747b5706a516d6cf6bf62d6117ef86ea" + assert call_trace["type"] == 'CALL' + assert call_trace["value"] == "0x0" + assert call_trace["output"] == "0x0000000000000000000000000000000000000000000000000000000000000001" + +def test_opcode_trace_with_config(ew3, token_transfer): + tx_hash = token_transfer["tx_hash"] + trace = ew3.manager.request_blocking('debug_traceTransaction', [tx_hash, { + "enableMemory": True, + "disableStack": False, + "disableStorage": False, + "enableReturnData": True + }]) + + oplog_len = len(trace["structLogs"]) + assert trace["failed"] == False + assert oplog_len == 304 + + # limit parameter test + limited_trace = ew3.manager.request_blocking('debug_traceTransaction', [tx_hash, { + "enableMemory": True, + "disableStack": False, + "disableStorage": False, + "enableReturnData": True, + "limit": 10 + }]) + assert len(limited_trace["structLogs"]) == 10 + + no_stack_storage_trace = ew3.manager.request_blocking('debug_traceTransaction', [tx_hash, { + "enableMemory": True, + "disableStack": True, + "disableStorage": True, + "enableReturnData": True + }]) + + disable_all_trace = ew3.manager.request_blocking('debug_traceTransaction', [tx_hash, { + "enableMemory": False, + "disableStack": True, + "disableStorage": True, + "enableReturnData": False + }]) + + for i, oplog in enumerate(trace["structLogs"]): + oplog = trace["structLogs"][i] + + if "memory" in oplog: + assert "memory" not in disable_all_trace["structLogs"][i] + + if "returnData" in oplog: + assert "returnData" not in disable_all_trace["structLogs"][i] + + if "stack" in oplog: + assert "stack" not in no_stack_storage_trace["structLogs"][i] + + if "storage" in oplog: + assert "storage" not in no_stack_storage_trace["structLogs"][i] diff --git a/integration_tests/tests/rpc/test_block_by_epoch.py b/integration_tests/tests/rpc/test_block_by_epoch.py new file mode 100644 index 0000000000..3c2cdf1f13 --- /dev/null +++ b/integration_tests/tests/rpc/test_block_by_epoch.py @@ -0,0 +1,44 @@ +from integration_tests.conflux.rpc import RpcClient +from integration_tests.test_framework.util import assert_raises_rpc_error, assert_equal +from integration_tests.test_framework.util.epoch import ( + epoch_invalid_epoch_type_error, + epoch_epoch_number_too_large_error, + epoch_empty_epoch_string_error, + epoch_invalid_digit_epoch_error, + epoch_missing_hex_prefix_error, +) + + +def test_last_mined(client): + block_hash = client.generate_block() + block = client.block_by_epoch(client.EPOCH_LATEST_MINED) + assert_equal(block["hash"], block_hash) + + +# If this errors is changed, please let me know https://github.com/Conflux-Chain/rpc-errors/issues/new +def test_get_epoch_number_errors(client: RpcClient): + assert_raises_rpc_error( + epoch_epoch_number_too_large_error.error_code, + epoch_epoch_number_too_large_error.error_msg, + client.block_by_epoch, + epoch_epoch_number_too_large_error.epoch, + ) + assert_raises_rpc_error( + epoch_empty_epoch_string_error.error_code, + epoch_empty_epoch_string_error.error_msg, + client.block_by_epoch, + epoch_empty_epoch_string_error.epoch, + ) + assert_raises_rpc_error( + epoch_invalid_digit_epoch_error.error_code, + epoch_invalid_digit_epoch_error.error_msg, + client.block_by_epoch, + epoch_invalid_digit_epoch_error.epoch, + ) + + assert_raises_rpc_error( + epoch_missing_hex_prefix_error.error_code, + epoch_missing_hex_prefix_error.error_msg, + client.block_by_epoch, + epoch_missing_hex_prefix_error.epoch, + ) diff --git a/integration_tests/tests/rpc/test_block_number.py b/integration_tests/tests/rpc/test_block_number.py new file mode 100644 index 0000000000..1fde278e5b --- /dev/null +++ b/integration_tests/tests/rpc/test_block_number.py @@ -0,0 +1,98 @@ +from integration_tests.conflux.rpc import RpcClient +from integration_tests.test_framework.util import assert_equal + +def test_block_number_in_get_block_queries(client: RpcClient): + + # --- --- --- + # .- | A | <--- | C | <--- | D | <--- ... + # --- | --- --- --- + # ... <--- | 0 | <-* . + # --- | --- . + # .- | B | <.................. + # --- + + # 0 --- A --- C --- B --- D --- + # block number: x | x+1 | x+2 | x+3 | x+4 | + # epoch number: y | y+1 | y+2 | y + 3 | + + block_0 = client.block_by_epoch("latest_mined")['hash'] + block_a = client.generate_custom_block(parent_hash = block_0, referee = [], txs = []) + block_b = client.generate_custom_block(parent_hash = block_0, referee = [], txs = []) + block_c = client.generate_custom_block(parent_hash = block_a, referee = [], txs = []) + block_d = client.generate_custom_block(parent_hash = block_c, referee = [block_b], txs = []) + + epoch_0 = int(client.block_by_hash(block_0)['height'], 16) + block_number_0 = int(client.block_by_hash(block_0)['blockNumber'], 16) + + # check block number in `cfx_getBlockByHash` + assert_equal(int(client.block_by_hash(block_a)['blockNumber'], 16), block_number_0 + 1) + assert_equal(int(client.block_by_hash(block_c)['blockNumber'], 16), block_number_0 + 2) + assert_equal(int(client.block_by_hash(block_b)['blockNumber'], 16), block_number_0 + 3) + assert_equal(int(client.block_by_hash(block_d)['blockNumber'], 16), block_number_0 + 4) + + # check block number in `cfx_getBlockByEpochNumber` + epoch_a = hex(epoch_0 + 1) + assert_equal(int(client.block_by_epoch(epoch_a)['blockNumber'], 16), block_number_0 + 1) + + epoch_c = hex(epoch_0 + 2) + assert_equal(int(client.block_by_epoch(epoch_c)['blockNumber'], 16), block_number_0 + 2) + + # note that this epoch will reference the pivot block (D) + epoch_d = hex(epoch_0 + 3) + assert_equal(int(client.block_by_epoch(epoch_d)['blockNumber'], 16), block_number_0 + 4) + +def test_get_block_by_block_number(client: RpcClient): + + # --- --- --- + # .- | A | <--- | C | <--- | D | <--- ... + # --- | --- --- --- + # ... <--- | 0 | <-* . + # --- | --- . + # .- | B | <.................. + # --- + + # 0 --- A --- C --- B --- D --- + # block number: x | x+1 | x+2 | x+3 | x+4 | + # epoch number: y | y+1 | y+2 | y + 3 | + + block_0 = client.block_by_epoch("latest_mined")['hash'] + block_a = client.generate_custom_block(parent_hash = block_0, referee = [], txs = []) + block_b = client.generate_custom_block(parent_hash = block_0, referee = [], txs = []) + block_c = client.generate_custom_block(parent_hash = block_a, referee = [], txs = []) + block_d = client.generate_custom_block(parent_hash = block_c, referee = [block_b], txs = []) + + epoch_0 = int(client.block_by_hash(block_0)['height'], 16) + block_number_0 = int(client.block_by_hash(block_0)['blockNumber'], 16) + + # make sure transactions have been executed + parent_hash = block_d + + for _ in range(5): + block = client.generate_custom_block(parent_hash = parent_hash, referee = [], txs = []) + parent_hash = block + + # check blocks by block number + assert_equal( + client.block_by_block_number(hex(block_number_0)), + client.block_by_hash(block_0) + ) + + assert_equal( + client.block_by_block_number(hex(block_number_0 + 1)), + client.block_by_hash(block_a) + ) + + assert_equal( + client.block_by_block_number(hex(block_number_0 + 2)), + client.block_by_hash(block_c) + ) + + assert_equal( + client.block_by_block_number(hex(block_number_0 + 3)), + client.block_by_hash(block_b) + ) + + assert_equal( + client.block_by_block_number(hex(block_number_0 + 4)), + client.block_by_hash(block_d) + ) diff --git a/integration_tests/tests/rpc/test_epoch_number.py b/integration_tests/tests/rpc/test_epoch_number.py new file mode 100644 index 0000000000..7c9d0777ef --- /dev/null +++ b/integration_tests/tests/rpc/test_epoch_number.py @@ -0,0 +1,44 @@ +from integration_tests.conflux.rpc import RpcClient +from integration_tests.test_framework.util import assert_raises_rpc_error +from integration_tests.test_framework.util.epoch import ( + epoch_invalid_epoch_type_error, + epoch_epoch_number_too_large_error, + epoch_empty_epoch_string_error, + epoch_invalid_digit_epoch_error, + epoch_missing_hex_prefix_error, +) + + +# If this errors is changed, please let me know https://github.com/Conflux-Chain/rpc-errors/issues/new +def test_get_epoch_number_errors(client: RpcClient): + assert_raises_rpc_error( + epoch_invalid_epoch_type_error.error_code, + epoch_invalid_epoch_type_error.error_msg, + client.epoch_number, + epoch_invalid_epoch_type_error.epoch, + ) + assert_raises_rpc_error( + epoch_epoch_number_too_large_error.error_code, + epoch_epoch_number_too_large_error.error_msg, + client.epoch_number, + epoch_epoch_number_too_large_error.epoch, + ) + assert_raises_rpc_error( + epoch_empty_epoch_string_error.error_code, + epoch_empty_epoch_string_error.error_msg, + client.epoch_number, + epoch_empty_epoch_string_error.epoch, + ) + assert_raises_rpc_error( + epoch_invalid_digit_epoch_error.error_code, + epoch_invalid_digit_epoch_error.error_msg, + client.epoch_number, + epoch_invalid_digit_epoch_error.epoch, + ) + + assert_raises_rpc_error( + epoch_missing_hex_prefix_error.error_code, + epoch_missing_hex_prefix_error.error_msg, + client.epoch_number, + epoch_missing_hex_prefix_error.epoch, + ) diff --git a/integration_tests/tests/rpc/test_get_tx_by_hash.py b/integration_tests/tests/rpc/test_get_tx_by_hash.py new file mode 100644 index 0000000000..7d161f995d --- /dev/null +++ b/integration_tests/tests/rpc/test_get_tx_by_hash.py @@ -0,0 +1,29 @@ +from integration_tests.conflux.rpc import RpcClient +from integration_tests.test_framework.util import assert_raises_rpc_error, assert_equal + + +# If this errors is changed, please let me know https://github.com/Conflux-Chain/rpc-errors/issues/new + + +def test_get_tx_by_hash_errors(client: RpcClient): + + assert_raises_rpc_error( + -32602, + "Invalid params: invalid length 63, expected a (both 0x-prefixed or not) hex string with length of 64.", + client.node.cfx_getTransactionByHash, + "0x88df016429689c079f3b2f6ad39fa052532c56795b733da78a91ebe6a713944", + ) + + assert_raises_rpc_error( + -32602, + "Invalid params: invalid type: integer `11`, expected a (both 0x-prefixed or not) hex string with length of 64.", + client.node.cfx_getTransactionByHash, + 11, + ) + + assert_raises_rpc_error( + -32602, + "Invalid params: invalid length 0, expected a (both 0x-prefixed or not) hex string with length of 64.", + client.node.cfx_getTransactionByHash, + "0x", + ) diff --git a/tests/test_contracts b/tests/test_contracts index 68fa543c28..83a7877418 160000 --- a/tests/test_contracts +++ b/tests/test_contracts @@ -1 +1 @@ -Subproject commit 68fa543c2858465cff09ffad9ac3571c4597d93c +Subproject commit 83a7877418233b4a40dc4a85f5ec9892539066f2