¶ Polygon RPC endpoint detailsThis is a general guide on deploying an Polygon RPC endpoint for your Chainlink Node(s). |
You Chainlink node has two connections methods. Websocket and HTTP.
As these can vary between networks, the default values for an Arbitrum node are:
ws://<your_ip_address>:8546
http://<your_ip_address>:8545
Before we can get our Polygon node running, we need to do some house keeping and install some necessary software.
we'll install some packages that we'll need or will at least be handy to have.
sudo mkdir -p /opt/data/polygon-data
sudo mkdir -p /opt/data/polygon-data/bor
sudo mkdir -p /opt/data/polygon-data/heimdall
Install Docker-CE dependencies first.
sudo apt update && sudo apt upgrade && sudo apt-get install ca-certificates curl git gnupg lsb-release
Download the necessary certificates
sudo mkdir -p /etc/apt/keyrings
curl -fsSL
https://download.docker.com/linux/debian/gpg
| sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg]
https://download.docker.com/linux/debian
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
Update apt with the above changes and install Docker-CE
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
sudo usermod -a -G docker $USER<
Add the below lines to the bottom of your /etc/docker/daemon.json
file
(sudo nano /etc/docker/daemon.json
{ "data-root": "/opt/data/docker_data" }
Restart your Docker-CE service
sudo systemctl daemon-reload
sudo systemctl restart docker
docker info | grep -i "Docker Root Dir"
sudo apt install -y git curl wget jq node-ws telnet traceroute
Download the latest snapshot from snapshots.matic.today
docker run -v /opt/data/polygon-data/heimdall:/heimdall-home:rw --entrypoint /usr/local/bin/heimdalld -it 0xpolygon/heimdall:0.2.11 init --home=/heimdall-home
nano /opt/data/polygon-data/heimdall/config/config.toml
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### main base config options #####
# TCP or UNIX socket address of the ABCI application,
# or the name of an ABCI application compiled in with the Tendermint binary
proxy_app = "tcp://127.0.0.1:26658"
# A custom human readable name for this node
moniker = "dt-polygon-00"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast_sync = true
# Database backend: goleveldb | cleveldb | boltdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
# - stable
# * cleveldb (uses levigo wrapper)
# - fast
# - requires gcc
# - use cleveldb build tag (go build -tags cleveldb)
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
# - EXPERIMENTAL
# - may be faster is some use-cases (random reads - indexer)
# - use boltdb build tag (go build -tags boltdb)
db_backend = "goleveldb"
# Database directory
db_dir = "data"
# Output level for logging, including package level options
log_level = "main:info,state:info,*:error"
# Output format: 'plain' (colored text) or 'json'
log_format = "plain"
##### additional base config options #####
# Path to the JSON file containing the initial validator set and other meta data
genesis_file = "config/genesis.json"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv_validator_key_file = "config/priv_validator_key.json"
# Path to the JSON file containing the last sign state of a validator
priv_validator_state_file = "data/priv_validator_state.json"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
priv_validator_laddr = ""
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "config/node_key.json"
# Mechanism to connect to the ABCI application: socket | grpc
abci = "socket"
# TCP or UNIX socket address for the profiling server to listen on
prof_laddr = "localhost:6060"
# If true, query the ABCI app on connecting to a new peer
# so the app can decide if we should keep the connection or not
filter_peers = false
##### advanced configuration options #####
##### rpc server configuration options #####
[rpc]
# TCP or UNIX socket address for the RPC server to listen on
laddr = "tcp://0.0.0.0:26657"
# A list of origins a cross-domain request can be executed from
# Default value '[]' disables cors support
# Use '["*"]' to allow any origin
cors_allowed_origins = ["*"]
# A list of methods the client is allowed to use with cross-domain requests
cors_allowed_methods = ["HEAD", "GET", "POST", ]
# A list of non simple headers the client is allowed to use with cross-domain requests
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ]
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
grpc_laddr = ""
# Maximum number of simultaneous connections.
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
grpc_max_open_connections = 900
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool
unsafe = false
# Maximum number of simultaneous connections (including WebSocket).
# Does not include gRPC connections. See grpc_max_open_connections
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
max_open_connections = 900
# Maximum number of unique clientIDs that can /subscribe
# If you're using /broadcast_tx_commit, set to the estimated maximum number
# of broadcast_tx_commit calls per block.
max_subscription_clients = 100
# Maximum number of unique queries a given client can /subscribe to
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
# the estimated # maximum number of broadcast_tx_commit calls per block.
max_subscriptions_per_client = 5
# How long to wait for a tx to be committed during /broadcast_tx_commit.
# WARNING: Using a value larger than 10s will result in increasing the
# global HTTP write timeout, which applies to all connections and endpoints.
# See https://github.com/tendermint/tendermint/issues/3435
timeout_broadcast_tx_commit = "10s"
# Maximum size of request body, in bytes
max_body_bytes = 1000000
# Maximum size of request header, in bytes
max_header_bytes = 1048576
# The path to a file containing certificate that is used to create the HTTPS server.
# Migth be either absolute path or path related to tendermint's config directory.
# If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
tls_cert_file = ""
# The path to a file containing matching private key that is used to create the HTTPS server.
# Migth be either absolute path or path related to tendermint's config directory.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
tls_key_file = ""
##### peer to peer configuration options #####
[p2p]
# Address to listen for incoming connections
laddr = "tcp://0.0.0.0:26656"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address.
external_address = ""
# Comma separated list of seed nodes to connect to
#seeds = "f4f605d60b8ffaaf15240564e58a81103510631c@159.203.9.164:26656,4fb1bc820088764a564d4f66bba1963d47d82329@44.232.55.71:26656"
seeds = "f4f605d60b8ffaaf15240564e58a81103510631c@159.203.9.164:26656,4fb1bc820088764a564d4f66bba1963d47d82329@44.232.55.71:26656,2eadba4be3ce47ac8db0a3538cb923b57b41c927@35.199.4.13:26656,3b23b20017a6f348d329c102ddc0088f0a10a444@35.221.13.28:26656,25f5f65a09c56e9f1d2d90618aa70cd358aa68da@35.230.116.151:26656"
# Comma separated list of nodes to keep persistent connections to
persistent_peers = ""
# UPNP port forwarding
upnp = false
# Path to address book
addr_book_file = "config/addrbook.json"
# Set true for strict address routability rules
# Set false for private or local networks
addr_book_strict = true
# Maximum number of inbound peers
max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024
# Rate at which packets can be sent, in bytes/second
send_rate = 5120000
# Rate at which packets can be received, in bytes/second
recv_rate = 5120000
# Set true to enable the peer-exchange reactor
pex = true
# Seed mode, in which node constantly crawls the network and looks for
# peers. If another node asks it for addresses, it responds and disconnects.
#
# Does not work if the peer-exchange reactor is disabled.
seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = false
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options #####
[mempool]
recheck = true
broadcast = true
wal_dir = ""
# Maximum number of transactions in the mempool
size = 5000
# Limit the total size of all txs in the mempool.
# This only accounts for raw transactions (e.g. given 1MB transactions and
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
max_txs_bytes = 1073741824
# Size of the cache (used to filter transactions we saw earlier) in transactions
cache_size = 10000
# Maximum size of a single transaction.
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
max_tx_bytes = 1048576
##### fast sync configuration options #####
[fastsync]
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v1" - refactor of v0 version for better testability
version = "v0"
##### consensus configuration options #####
[consensus]
wal_file = "data/cs.wal/wal"
timeout_propose = "3s"
timeout_propose_delta = "500ms"
timeout_prevote = "1s"
timeout_prevote_delta = "500ms"
timeout_precommit = "1s"
timeout_precommit_delta = "500ms"
timeout_commit = "5s"
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true
create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters
peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = "2s"
##### transactions indexer configuration options #####
[tx_index]
# What indexer to use for transactions
#
# Options:
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
indexer = "kv"
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
#
# You can also index transactions by height by adding "tx.height" tag here.
#
# It's recommended to index only a subset of tags due to possible memory
# bloat. This is, of course, depends on the indexer's DB and the volume of
# transactions.
index_tags = ""
# When set to true, tells indexer to index all tags (predefined tags:
# "tx.hash", "tx.height" and all tags from DeliverTx responses).
#
# Note this may be not desirable (see the comment above). IndexTags has a
# precedence over IndexAllTags (i.e. when given both, IndexTags will be
# indexed).
index_all_tags = true
##### instrumentation configuration options #####
[instrumentation]
# When true, Prometheus metrics are served under /metrics on
# PrometheusListenAddr.
# Check out the documentation for the list of available metrics.
prometheus = false
# Address to listen for Prometheus collector(s) connections
prometheus_listen_addr = ":26660"
# Maximum number of simultaneous connections.
# If you want to accept a larger number than the default, make sure
# you increase your OS limits.
# 0 - unlimited.
max_open_connections = 3
# Instrumentation namespace
namespace = "tendermint"
sudo nano config/heimdall-config.toml
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
##### RPC and REST configs #####
# RPC endpoint for ethereum chain
eth_rpc_url = "https://rpc.dextrac.com:8545"
# RPC endpoint for bor chain
bor_rpc_url = "https://bor:8555"
# RPC endpoint for tendermint
tendermint_rpc_url = "http://0.0.0.0:26657"
# Heimdall REST server endpoint
heimdall_rest_server = "http://heimdallr:1317"
#### Bridge configs ####
# AMQP endpoint
amqp_url = "amqp://guest:guest@rabbitmq:5672"
## Poll intervals
checkpoint_poll_interval = "5m0s"
syncer_poll_interval = "1m0s"
noack_poll_interval = "16m50s"
clerk_poll_interval = "10s"
span_poll_interval = "1m0s"
#### gas limits ####
main_chain_gas_limit = "5000000"
#### gas price ####
main_chain_max_gas_price = "400000000000"
##### Timeout Config #####
no_ack_wait_time = "30m0s"
sudo curl -o /opt/data/polygon-data/heimdall/config/genesis.json
https://raw.githubusercontent.com/maticnetwork/heimdall/develop/builder/files/genesis-mainnet-v1.json
sudo chown -R $USER:$USER /opt/data/polygon-data/heimdall/data
sudo rm -rf application.db blockstore.db evidence.db state.dbtx_index.db
The below is an example, please be sure to update it with the correct file name for the snapshot you downloaded
tar -xf /opt/data/polygon-data/heimdall/heimdall-snapshot-2022-10-10.tar.gz -C /opt/data/polygon-data/heimdall/data # REPLACE WITH CORRECT FILENAME
sudo chown -R root:root /opt/data/polygon-data/heimdall/data
create a docker-compose file in your Heimdall directory
nano /opt/data/polygon/heimdall/docker-compose.yml
version: '3.8'
services:
rabbitmq:
container_name: rabbitmq
image: rabbitmq:3-alpine
ports:
- "5672:5672"
restart: unless-stopped
heimdalld:
container_name: heimdalld
image: 0xpolygon/heimdall:0.2.11
restart: unless-stopped
entrypoint:
- /usr/local/bin/heimdalld
volumes:
- /opt/data/polygon-data/heimdall:/heimdall-home:rw
ports:
- "26656:26656"
- "26657:26657"
depends_on:
- rabbitmq
command:
- start
- --home=/heimdall-home
- --moniker=dt-polygon-00
- --p2p.laddr=tcp://0.0.0.0:26656
- --rpc.laddr=tcp://0.0.0.0:26657
heimdallr:
container_name: heimdallr
image: 0xpolygon/heimdall:0.2.11
restart: unless-stopped
entrypoint:
- /usr/local/bin/heimdalld
volumes:
- /opt/data/polygon-data/heimdall:/heimdall-home:rw
ports:
- "1317:1317"
depends_on:
- heimdalld
command:
- rest-server
- --home=/heimdall-home
- --chain-id=137
- --laddr=tcp://0.0.0.0:1317
- --node=tcp://heimdalld:26657
networks:
default:
name: polygon-network
driver: bridge
driver_opts:
com.docker.network.bridge.enable_icc: "true"
Then, start your Heimdall container
docker compose -f /opt/data/infra/rpc/polygon/heimdall/docker-compose.yml up -d
Note: It must be synced before you can start your Bor container successfully
curl localhost:26657/status
curl localhost:1317/bor/span/1
sudo curl -o /opt/data/polygon-data/bor/genesis.json
https://raw.githubusercontent.com/maticnetwork/bor/develop/builder/files/genesis-mainnet-v1.json
docker run -v /opt/data/polygon-data/bor:/bor-home:rw -it 0xpolygon/bor:0.2.17 --datadir /bor-home init /bor-home/genesis.json
sudo chown -R $USER:$USER /opt/data/polygon-data/bor/bor
rm -rf /opt/data/polygon-data/bor/bor/chaindata/*
The below is an example, please be sure to update it with the correct file name for the snapshot you downloaded tar -xf /opt/data/polygon-data/bor/bor-fullnode-snapshot-2022-10-10.tar.gz -C /opt/data/polygon-data/bor/bor/chaindata
sudo chown -R root:root /opt/data/polygon-data/bor/bor
create a docker-compose file in your Bor directory
nano /opt/data/polygon/heimdall/docker-compose.yml
version: '3.8'
services:
bor:
container_name: bor
image: 0xpolygon/bor:0.2.17
volumes:
- /opt/data/polygon-data/bor:/bor-home:rw
ports:
- "8555:8555"
- "8556:8556"
- "30303:30303"
- "30303:30303/udp"
command:
- --syncmode=full
- --datadir=/bor-home
- --networkid=137
- --bor.heimdall=http://heimdallr:1317
- --bor.logs
- --rpc.txfeecap=0
- --miner.gasprice=30000000000
- --miner.gaslimit=200000000
- --miner.gastarget=20000000
- --ipcdisable
- --http
- --http.addr=0.0.0.0
- --http.port=8555
- --http.api=eth,net,web3,admin,debug,bor
- --http.corsdomain=*
- --http.vhosts=*
- --ws
- --ws.addr=0.0.0.0
- --ws.port=8556
- --ws.api=eth,net,web3,admin,debug,bor
- --ws.origins=*
- --nousb
- --bootnodes=enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303,enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303
networks:
default:
external: true
name: polygon-network
Then, start your Bor container
docker compose -f /opt/data/infra/rpc/polygon/bor/docker-compose.yml up -d