feat(web): add network subpages and subdomains listing page
- Add /subdomains page listing all 20 PezkuwiChain subdomains - Add Back to Home button to Subdomains page - Create NetworkPage reusable component for network details - Add 7 network subpages: /mainnet, /staging, /testnet, /beta, /alfa, /development, /local - Update ChainSpecs network cards to navigate to network subpages - Add i18n translations for chainSpecs section in en.ts - Add SDK docs with rebranding support (rebrand-rustdoc.cjs) - Add generate-docs-structure.cjs for automatic docs generation - Update shared libs: endpoints, polkadot, wallet, xcm-bridge - Add new token logos: TYR, ZGR, pezkuwi_icon - Add new pages: Explorer, Docs, Wallet, Api, Faucet, Developers, Grants, Wiki, Forum, Telemetry
|
After Width: | Height: | Size: 74 KiB |
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
const { ApiPromise, WsProvider, Keyring } = require('@polkadot/api');
|
||||
const fs = require('fs');
|
||||
|
||||
async function main() {
|
||||
const provider = new WsProvider('ws://127.0.0.1:9944');
|
||||
const api = await ApiPromise.create({ provider });
|
||||
|
||||
// Read genesis files as hex strings
|
||||
const genesisHead = '0x' + fs.readFileSync('/tmp/teyrchain-genesis-head', 'utf8').trim();
|
||||
const genesisWasm = '0x' + fs.readFileSync('/tmp/teyrchain-genesis-wasm', 'utf8').trim();
|
||||
|
||||
console.log(`Genesis head length: ${genesisHead.length} chars`);
|
||||
console.log(`Genesis WASM length: ${genesisWasm.length} chars`);
|
||||
|
||||
// Create keyring and add Alice
|
||||
const keyring = new Keyring({ type: 'sr25519' });
|
||||
const alice = keyring.addFromUri('//Alice');
|
||||
|
||||
console.log('Registering parachain 2000...');
|
||||
|
||||
// Para ID 2000
|
||||
const paraId = 2000;
|
||||
|
||||
// Register parachain using sudo
|
||||
const tx = api.tx.sudo.sudo(
|
||||
api.tx.parasSudoWrapper.sudoScheduleParaInitialize(
|
||||
paraId,
|
||||
{
|
||||
genesisHead,
|
||||
validationCode: genesisWasm,
|
||||
paraKind: true, // true for parachain, false for parathread
|
||||
}
|
||||
)
|
||||
);
|
||||
|
||||
// Sign and send transaction
|
||||
await new Promise(async (resolve, reject) => {
|
||||
const unsub = await tx.signAndSend(alice, ({ status, events, dispatchError }) => {
|
||||
console.log(`Transaction status: ${status.type}`);
|
||||
|
||||
if (status.isInBlock) {
|
||||
console.log(`Included in block ${status.asInBlock.toHex()}`);
|
||||
|
||||
// Check for errors
|
||||
if (dispatchError) {
|
||||
if (dispatchError.isModule) {
|
||||
const decoded = api.registry.findMetaError(dispatchError.asModule);
|
||||
const { docs, name, section } = decoded;
|
||||
console.error(`Error: ${section}.${name}: ${docs.join(' ')}`);
|
||||
} else {
|
||||
console.error(`Error: ${dispatchError.toString()}`);
|
||||
}
|
||||
reject(dispatchError);
|
||||
}
|
||||
|
||||
events.forEach(({ event }) => {
|
||||
const { section, method, data } = event;
|
||||
console.log(`Event: ${section}.${method}`, data.toString());
|
||||
});
|
||||
} else if (status.isFinalized) {
|
||||
console.log(`Finalized in block ${status.asFinalized.toHex()}`);
|
||||
unsub();
|
||||
resolve();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
console.log('Parachain 2000 registered successfully!');
|
||||
await api.disconnect();
|
||||
}
|
||||
|
||||
main().catch(console.error).finally(() => process.exit());
|
||||
@@ -42,13 +42,31 @@ export const NETWORK_ENDPOINTS: Record<string, NetworkConfig> = {
|
||||
description: 'Staging environment for pre-production testing',
|
||||
},
|
||||
|
||||
// Development Testnet
|
||||
TESTNET: {
|
||||
name: 'Pezkuwi Testnet',
|
||||
endpoint: 'https://testnet.pezkuwichain.io',
|
||||
wsEndpoint: 'wss://testnet.pezkuwichain.io',
|
||||
// Alfa Testnet
|
||||
ALFA: {
|
||||
name: 'Pezkuwi Alfa Testnet',
|
||||
endpoint: 'https://alfa.pezkuwichain.io',
|
||||
wsEndpoint: 'wss://alfa.pezkuwichain.io',
|
||||
type: 'development',
|
||||
description: 'Development testnet for feature testing',
|
||||
description: 'Alfa testnet for early feature testing',
|
||||
},
|
||||
|
||||
// Development Environment
|
||||
DEV: {
|
||||
name: 'Pezkuwi Development',
|
||||
endpoint: 'https://dev.pezkuwichain.io',
|
||||
wsEndpoint: 'wss://dev.pezkuwichain.io',
|
||||
type: 'development',
|
||||
description: 'Development environment for feature testing',
|
||||
},
|
||||
|
||||
// Local Development
|
||||
LOCAL: {
|
||||
name: 'Local Development',
|
||||
endpoint: 'http://127.0.0.1:9944',
|
||||
wsEndpoint: 'ws://127.0.0.1:9944',
|
||||
type: 'development',
|
||||
description: 'Local development node',
|
||||
},
|
||||
};
|
||||
|
||||
@@ -57,8 +75,8 @@ export const NETWORK_ENDPOINTS: Record<string, NetworkConfig> = {
|
||||
*/
|
||||
export const DEFAULT_NETWORK =
|
||||
process.env.NODE_ENV === 'production'
|
||||
? NETWORK_ENDPOINTS.BETA // Currently using Beta for production
|
||||
: NETWORK_ENDPOINTS.TESTNET;
|
||||
? NETWORK_ENDPOINTS.BETA // Currently using Beta for production
|
||||
: NETWORK_ENDPOINTS.DEV;
|
||||
|
||||
/**
|
||||
* Port Configuration
|
||||
@@ -98,6 +116,23 @@ export function getAllNetworks(): NetworkConfig[] {
|
||||
return Object.values(NETWORK_ENDPOINTS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current network configuration based on the VITE_NETWORK environment variable.
|
||||
* This serves as the single source of truth for the application's network configuration.
|
||||
* @returns {NetworkConfig} The active network configuration.
|
||||
*/
|
||||
export const getCurrentNetworkConfig = (): NetworkConfig => {
|
||||
const networkName = (import.meta.env.VITE_NETWORK || 'local').toUpperCase();
|
||||
const validNetworkKeys = Object.keys(NETWORK_ENDPOINTS);
|
||||
|
||||
if (validNetworkKeys.includes(networkName)) {
|
||||
return NETWORK_ENDPOINTS[networkName as keyof typeof NETWORK_ENDPOINTS];
|
||||
}
|
||||
|
||||
// Fallback to a default or local configuration if the name is invalid
|
||||
return NETWORK_ENDPOINTS.LOCAL;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if endpoint is available
|
||||
*/
|
||||
@@ -109,3 +144,4 @@ export async function checkEndpoint(endpoint: string): Promise<boolean> {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
export const NETWORKS = NETWORK_ENDPOINTS;
|
||||
|
||||
@@ -3,46 +3,22 @@
|
||||
*/
|
||||
|
||||
import type { BlockchainNetwork } from '../types/blockchain';
|
||||
import { getCurrentNetworkConfig } from './endpoints';
|
||||
|
||||
/**
|
||||
* Pezkuwi blockchain network configuration
|
||||
* Uses BETA endpoint from centralized endpoints.ts (source of truth)
|
||||
*/
|
||||
export const PEZKUWI_NETWORK: BlockchainNetwork = {
|
||||
name: 'Pezkuwi',
|
||||
endpoint: 'wss://beta-rpc.pezkuwi.art',
|
||||
endpoint: getCurrentNetworkConfig().wsEndpoint,
|
||||
chainId: 'pezkuwi',
|
||||
};
|
||||
|
||||
/**
|
||||
* Common blockchain endpoints
|
||||
*/
|
||||
export const BLOCKCHAIN_ENDPOINTS = {
|
||||
mainnet: 'wss://mainnet.pezkuwichain.io',
|
||||
testnet: 'wss://ws.pezkuwichain.io',
|
||||
local: 'ws://127.0.0.1:9944',
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Get the appropriate WebSocket endpoint based on environment
|
||||
*/
|
||||
function getWebSocketEndpoint(): string {
|
||||
const network = import.meta.env.VITE_NETWORK || 'local';
|
||||
|
||||
switch (network) {
|
||||
case 'mainnet':
|
||||
return import.meta.env.VITE_WS_ENDPOINT_MAINNET || BLOCKCHAIN_ENDPOINTS.mainnet;
|
||||
case 'testnet':
|
||||
return import.meta.env.VITE_WS_ENDPOINT_TESTNET || BLOCKCHAIN_ENDPOINTS.testnet;
|
||||
case 'local':
|
||||
default:
|
||||
return import.meta.env.VITE_WS_ENDPOINT_LOCAL || BLOCKCHAIN_ENDPOINTS.local;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Default endpoint (reads from environment variables)
|
||||
*/
|
||||
export const DEFAULT_ENDPOINT = getWebSocketEndpoint();
|
||||
export const DEFAULT_ENDPOINT = getCurrentNetworkConfig().wsEndpoint;
|
||||
|
||||
/**
|
||||
* Get block explorer URL for a transaction
|
||||
|
||||
|
Before Width: | Height: | Size: 178 KiB After Width: | Height: | Size: 289 KiB |
|
Before Width: | Height: | Size: 469 KiB After Width: | Height: | Size: 586 KiB |
|
After Width: | Height: | Size: 266 KiB |
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 265 KiB |
|
Before Width: | Height: | Size: 719 KiB After Width: | Height: | Size: 170 KiB |
|
After Width: | Height: | Size: 85 KiB |
@@ -4,17 +4,7 @@
|
||||
// This file configures wallet connectivity for Substrate-based chains
|
||||
|
||||
import type { InjectedAccountWithMeta } from '@polkadot/extension-inject/types';
|
||||
|
||||
// ========================================
|
||||
// NETWORK ENDPOINTS
|
||||
// ========================================
|
||||
export const NETWORK_ENDPOINTS = {
|
||||
local: import.meta.env.VITE_DEVELOPMENT_WS || 'ws://127.0.0.1:9944',
|
||||
testnet: import.meta.env.VITE_TESTNET_WS || 'wss://testnet.pezkuwichain.io',
|
||||
mainnet: import.meta.env.VITE_MAINNET_WS || 'wss://mainnet.pezkuwichain.io',
|
||||
staging: import.meta.env.VITE_STAGING_WS || 'wss://staging.pezkuwichain.io',
|
||||
beta: import.meta.env.VITE_BETA_WS || 'wss://beta.pezkuwichain.io',
|
||||
};
|
||||
import { getCurrentNetworkConfig } from '../blockchain/endpoints';
|
||||
|
||||
// ========================================
|
||||
// CHAIN CONFIGURATION
|
||||
@@ -38,7 +28,7 @@ export const CHAIN_CONFIG = {
|
||||
export const ASSET_IDS = {
|
||||
WHEZ: parseInt(import.meta.env.VITE_ASSET_WHEZ || '0'), // Wrapped HEZ
|
||||
PEZ: parseInt(import.meta.env.VITE_ASSET_PEZ || '1'), // PEZ utility token
|
||||
WUSDT: parseInt(import.meta.env.VITE_ASSET_WUSDT || '1000'), // Wrapped USDT (6 decimals, matches SDK)
|
||||
WUSDT: parseInt(import.meta.env.VITE_ASSET_WUSDT || '1000'), // Wrapped USDT (6 decimals, Asset ID 1000)
|
||||
USDT: parseInt(import.meta.env.VITE_ASSET_USDT || '3'),
|
||||
BTC: parseInt(import.meta.env.VITE_ASSET_BTC || '4'),
|
||||
ETH: parseInt(import.meta.env.VITE_ASSET_ETH || '5'),
|
||||
@@ -146,8 +136,7 @@ export const getAssetSymbol = (assetId: number): string => {
|
||||
* @returns WebSocket endpoint URL
|
||||
*/
|
||||
export const getCurrentEndpoint = (): string => {
|
||||
const network = import.meta.env.VITE_NETWORK || 'local';
|
||||
return NETWORK_ENDPOINTS[network as keyof typeof NETWORK_ENDPOINTS] || NETWORK_ENDPOINTS.local;
|
||||
return getCurrentNetworkConfig().wsEndpoint;
|
||||
};
|
||||
|
||||
// ========================================
|
||||
|
||||
@@ -3,17 +3,40 @@
|
||||
*
|
||||
* Handles Asset Hub USDT → wUSDT bridge configuration
|
||||
* User-friendly abstraction over complex XCM operations
|
||||
*
|
||||
* ALFA TESTNET MODE: Mock XCM for standalone chain testing
|
||||
* BETA+ MODE: Real XCM with Rococo/Westend Asset Hub
|
||||
*/
|
||||
|
||||
import { ApiPromise, WsProvider } from '@polkadot/api';
|
||||
import type { Signer } from '@polkadot/api/types';
|
||||
|
||||
// Westend Asset Hub endpoint
|
||||
// Detect mock mode (alfa testnet)
|
||||
const IS_MOCK_MODE = typeof process !== 'undefined'
|
||||
? process.env.VITE_MOCK_XCM === 'true'
|
||||
: typeof import.meta !== 'undefined'
|
||||
? import.meta.env?.VITE_MOCK_XCM === 'true'
|
||||
: false;
|
||||
|
||||
// Mock XCM state management (localStorage)
|
||||
const MOCK_XCM_STORAGE_KEY = 'pezkuwi_mock_xcm_configured';
|
||||
|
||||
function getMockXcmConfigured(): boolean {
|
||||
if (typeof window === 'undefined') return false;
|
||||
return localStorage.getItem(MOCK_XCM_STORAGE_KEY) === 'true';
|
||||
}
|
||||
|
||||
function setMockXcmConfigured(configured: boolean): void {
|
||||
if (typeof window === 'undefined') return;
|
||||
localStorage.setItem(MOCK_XCM_STORAGE_KEY, String(configured));
|
||||
}
|
||||
|
||||
// Westend Asset Hub endpoint (production)
|
||||
export const ASSET_HUB_ENDPOINT = 'wss://westend-asset-hub-rpc.polkadot.io';
|
||||
|
||||
// Known Asset IDs
|
||||
export const ASSET_HUB_USDT_ID = 1984; // USDT on Asset Hub
|
||||
export const WUSDT_ASSET_ID = 1000; // wUSDT on PezkuwiChain
|
||||
export const WUSDT_ASSET_ID = 1000; // wUSDT on PezkuwiChain (was 2, now 1000)
|
||||
export const ASSET_HUB_PARACHAIN_ID = 1000;
|
||||
|
||||
/**
|
||||
@@ -42,6 +65,12 @@ export interface AssetHubUsdtInfo {
|
||||
* Connect to Asset Hub
|
||||
*/
|
||||
export async function connectToAssetHub(): Promise<ApiPromise> {
|
||||
if (IS_MOCK_MODE) {
|
||||
console.log('[MOCK XCM] Simulating Asset Hub connection for alfa testnet');
|
||||
// Return null to signal mock mode - calling code will handle gracefully
|
||||
return null as any;
|
||||
}
|
||||
|
||||
try {
|
||||
const provider = new WsProvider(ASSET_HUB_ENDPOINT);
|
||||
const api = await ApiPromise.create({ provider });
|
||||
@@ -60,6 +89,17 @@ export async function connectToAssetHub(): Promise<ApiPromise> {
|
||||
export async function fetchAssetHubUsdtInfo(
|
||||
assetHubApi?: ApiPromise
|
||||
): Promise<AssetHubUsdtInfo> {
|
||||
if (IS_MOCK_MODE) {
|
||||
console.log('[MOCK XCM] Returning simulated Asset Hub USDT info');
|
||||
return {
|
||||
id: ASSET_HUB_USDT_ID,
|
||||
name: 'Tether USD',
|
||||
symbol: 'USDT',
|
||||
decimals: 6,
|
||||
supply: '1000000000000000', // 1 billion USDT (simulated)
|
||||
};
|
||||
}
|
||||
|
||||
let api = assetHubApi;
|
||||
let shouldDisconnect = false;
|
||||
|
||||
@@ -106,6 +146,19 @@ export async function checkBridgeStatus(
|
||||
const wusdtAsset = await api.query.assets.asset(WUSDT_ASSET_ID);
|
||||
const wusdtExists = wusdtAsset.isSome;
|
||||
|
||||
// Mock mode: Simulate successful bridge setup
|
||||
if (IS_MOCK_MODE) {
|
||||
const isConfigured = getMockXcmConfigured();
|
||||
console.log('[MOCK XCM] Returning simulated bridge status for alfa testnet (configured:', isConfigured, ')');
|
||||
return {
|
||||
isConfigured,
|
||||
assetHubLocation: isConfigured ? `ParaId(${ASSET_HUB_PARACHAIN_ID})` : null,
|
||||
usdtMapping: isConfigured ? WUSDT_ASSET_ID : null,
|
||||
assetHubConnected: true, // Simulated connection success
|
||||
wusdtExists,
|
||||
};
|
||||
}
|
||||
|
||||
// Try to connect to Asset Hub
|
||||
let assetHubConnected = false;
|
||||
try {
|
||||
@@ -155,6 +208,25 @@ export async function configureXcmBridge(
|
||||
throw new Error('Sudo pallet not available');
|
||||
}
|
||||
|
||||
// Mock mode: Simulate successful configuration
|
||||
if (IS_MOCK_MODE) {
|
||||
console.log('[MOCK XCM] Simulating XCM bridge configuration for alfa testnet');
|
||||
|
||||
onStatusUpdate?.('Preparing XCM configuration...');
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
onStatusUpdate?.('Simulating sudo transaction...');
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
|
||||
onStatusUpdate?.('Mock XCM bridge configured successfully!');
|
||||
|
||||
// Store mock configuration state
|
||||
setMockXcmConfigured(true);
|
||||
|
||||
// Return mock transaction hash
|
||||
return '0x' + '0'.repeat(64);
|
||||
}
|
||||
|
||||
try {
|
||||
onStatusUpdate?.('Preparing XCM configuration...');
|
||||
|
||||
|
||||
@@ -0,0 +1,506 @@
|
||||
/**
|
||||
* XCM Configuration Wizard Backend Functions
|
||||
*
|
||||
* Handles parachain registration, HRMP channels, foreign asset registration,
|
||||
* and XCM transfer testing for PezkuwiChain.
|
||||
*/
|
||||
|
||||
import type { ApiPromise } from '@polkadot/api';
|
||||
import type { InjectedAccountWithMeta } from '@polkadot/extension-inject/types';
|
||||
|
||||
// ========================================
|
||||
// TYPES
|
||||
// ========================================
|
||||
|
||||
export type RelayChain = 'westend' | 'rococo' | 'polkadot';
|
||||
|
||||
export interface ChainArtifacts {
|
||||
genesisPath: string;
|
||||
genesisSize: number;
|
||||
wasmPath: string;
|
||||
wasmSize: number;
|
||||
}
|
||||
|
||||
export interface HRMPChannel {
|
||||
sender: number;
|
||||
receiver: number;
|
||||
channelId: string;
|
||||
}
|
||||
|
||||
export interface AssetMetadata {
|
||||
name: string;
|
||||
symbol: string;
|
||||
decimals: number;
|
||||
minBalance: string;
|
||||
}
|
||||
|
||||
export interface ForeignAsset {
|
||||
symbol: string;
|
||||
location: {
|
||||
parents: number;
|
||||
interior: any; // XCM Location interior
|
||||
};
|
||||
metadata: AssetMetadata;
|
||||
}
|
||||
|
||||
export interface RegisteredAsset {
|
||||
assetId: number;
|
||||
symbol: string;
|
||||
}
|
||||
|
||||
export interface XCMTestResult {
|
||||
txHash: string;
|
||||
success: boolean;
|
||||
balance: string;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// STEP 1: RESERVE PARAID
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Reserve a ParaId on the relay chain
|
||||
*
|
||||
* @param api - Polkadot.js API instance (connected to relay chain)
|
||||
* @param relayChain - Target relay chain (westend/rococo/polkadot)
|
||||
* @param account - Account to sign the transaction
|
||||
* @returns Reserved ParaId number
|
||||
*/
|
||||
export async function reserveParaId(
|
||||
api: ApiPromise,
|
||||
relayChain: RelayChain,
|
||||
account: InjectedAccountWithMeta
|
||||
): Promise<number> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const injector = await window.injectedWeb3[account.meta.source]?.enable?.('PezkuwiChain');
|
||||
if (!injector) {
|
||||
throw new Error('Failed to get injector from wallet extension');
|
||||
}
|
||||
|
||||
const signer = injector.signer;
|
||||
|
||||
// Call registrar.reserve() on relay chain
|
||||
const tx = api.tx.registrar.reserve();
|
||||
|
||||
let unsub: () => void;
|
||||
|
||||
await tx.signAndSend(account.address, { signer }, ({ status, events, dispatchError }) => {
|
||||
if (dispatchError) {
|
||||
if (dispatchError.isModule) {
|
||||
const decoded = api.registry.findMetaError(dispatchError.asModule);
|
||||
reject(new Error(`${decoded.section}.${decoded.name}: ${decoded.docs.join(' ')}`));
|
||||
} else {
|
||||
reject(new Error(dispatchError.toString()));
|
||||
}
|
||||
if (unsub) unsub();
|
||||
return;
|
||||
}
|
||||
|
||||
if (status.isInBlock) {
|
||||
// Extract ParaId from events
|
||||
const reservedEvent = events.find(({ event }) =>
|
||||
api.events.registrar.Reserved.is(event)
|
||||
);
|
||||
|
||||
if (reservedEvent) {
|
||||
const paraId = reservedEvent.event.data[0].toNumber();
|
||||
resolve(paraId);
|
||||
if (unsub) unsub();
|
||||
} else {
|
||||
reject(new Error('ParaId reservation failed: No Reserved event found'));
|
||||
if (unsub) unsub();
|
||||
}
|
||||
}
|
||||
}).then(unsubscribe => { unsub = unsubscribe; });
|
||||
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// STEP 2: GENERATE CHAIN ARTIFACTS
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Generate genesis state and runtime WASM for parachain
|
||||
*
|
||||
* Note: This is a simplified version. In production, you'd call
|
||||
* your blockchain node CLI to generate these artifacts.
|
||||
*
|
||||
* @param chainName - Name of the parachain
|
||||
* @returns Paths to generated artifacts
|
||||
*/
|
||||
export async function generateChainArtifacts(
|
||||
chainName: string
|
||||
): Promise<ChainArtifacts> {
|
||||
// In a real implementation, this would:
|
||||
// 1. Call: ./target/release/pezkuwi export-genesis-state --chain=<chain-spec> > genesis-head.hex
|
||||
// 2. Call: ./target/release/pezkuwi export-genesis-wasm --chain=<chain-spec> > runtime.wasm
|
||||
// 3. Return the file paths and sizes
|
||||
|
||||
// For now, we'll return placeholder paths
|
||||
// The actual implementation should use Node.js child_process or a backend API
|
||||
|
||||
return {
|
||||
genesisPath: `/tmp/pezkuwi-${chainName}-genesis.hex`,
|
||||
genesisSize: 0, // Would be actual file size
|
||||
wasmPath: `/tmp/pezkuwi-${chainName}-runtime.wasm`,
|
||||
wasmSize: 0, // Would be actual file size
|
||||
};
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// STEP 3: REGISTER PARACHAIN
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Register parachain on relay chain with genesis and WASM
|
||||
*
|
||||
* @param api - Polkadot.js API instance (relay chain)
|
||||
* @param paraId - Reserved ParaId
|
||||
* @param genesisFile - Genesis state file
|
||||
* @param wasmFile - Runtime WASM file
|
||||
* @param account - Account to sign transaction
|
||||
* @returns Transaction hash
|
||||
*/
|
||||
export async function registerParachain(
|
||||
api: ApiPromise,
|
||||
paraId: number,
|
||||
genesisFile: File,
|
||||
wasmFile: File,
|
||||
account: InjectedAccountWithMeta
|
||||
): Promise<string> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const injector = await window.injectedWeb3[account.meta.source]?.enable?.('PezkuwiChain');
|
||||
if (!injector) {
|
||||
throw new Error('Failed to get injector from wallet extension');
|
||||
}
|
||||
|
||||
const signer = injector.signer;
|
||||
|
||||
// Read files as hex strings
|
||||
const genesisHex = await readFileAsHex(genesisFile);
|
||||
const wasmHex = await readFileAsHex(wasmFile);
|
||||
|
||||
// Call registrar.register() with paraId, genesis, and wasm
|
||||
const tx = api.tx.registrar.register(paraId, genesisHex, wasmHex);
|
||||
|
||||
let unsub: () => void;
|
||||
|
||||
await tx.signAndSend(account.address, { signer }, ({ status, dispatchError }) => {
|
||||
if (dispatchError) {
|
||||
if (dispatchError.isModule) {
|
||||
const decoded = api.registry.findMetaError(dispatchError.asModule);
|
||||
reject(new Error(`${decoded.section}.${decoded.name}: ${decoded.docs.join(' ')}`));
|
||||
} else {
|
||||
reject(new Error(dispatchError.toString()));
|
||||
}
|
||||
if (unsub) unsub();
|
||||
return;
|
||||
}
|
||||
|
||||
if (status.isInBlock) {
|
||||
resolve(status.asInBlock.toString());
|
||||
if (unsub) unsub();
|
||||
}
|
||||
}).then(unsubscribe => { unsub = unsubscribe; });
|
||||
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper: Read File as hex string
|
||||
*/
|
||||
async function readFileAsHex(file: File): Promise<string> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const reader = new FileReader();
|
||||
reader.onload = () => {
|
||||
const arrayBuffer = reader.result as ArrayBuffer;
|
||||
const uint8Array = new Uint8Array(arrayBuffer);
|
||||
const hex = '0x' + Array.from(uint8Array)
|
||||
.map(b => b.toString(16).padStart(2, '0'))
|
||||
.join('');
|
||||
resolve(hex);
|
||||
};
|
||||
reader.onerror = () => reject(new Error('Failed to read file'));
|
||||
reader.readAsArrayBuffer(file);
|
||||
});
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// STEP 4: OPEN HRMP CHANNELS
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Open bidirectional HRMP channels with target parachains
|
||||
*
|
||||
* @param api - Polkadot.js API instance (relay chain)
|
||||
* @param paraId - Our ParaId
|
||||
* @param targetParas - List of target ParaIds (e.g., [1000] for Asset Hub)
|
||||
* @param account - Account to sign transactions
|
||||
* @returns Array of opened channels
|
||||
*/
|
||||
export async function openHRMPChannels(
|
||||
api: ApiPromise,
|
||||
paraId: number,
|
||||
targetParas: number[],
|
||||
account: InjectedAccountWithMeta
|
||||
): Promise<HRMPChannel[]> {
|
||||
const channels: HRMPChannel[] = [];
|
||||
|
||||
for (const targetParaId of targetParas) {
|
||||
// Open channel: paraId → targetParaId
|
||||
const outgoingChannel = await openHRMPChannel(api, paraId, targetParaId, account);
|
||||
channels.push(outgoingChannel);
|
||||
|
||||
// Open channel: targetParaId → paraId (requires governance or target's approval)
|
||||
// Note: In practice, this requires the target parachain to initiate
|
||||
// For Asset Hub and system chains, this is usually done via governance
|
||||
}
|
||||
|
||||
return channels;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a single HRMP channel
|
||||
*/
|
||||
async function openHRMPChannel(
|
||||
api: ApiPromise,
|
||||
sender: number,
|
||||
receiver: number,
|
||||
account: InjectedAccountWithMeta
|
||||
): Promise<HRMPChannel> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const injector = await window.injectedWeb3[account.meta.source]?.enable?.('PezkuwiChain');
|
||||
if (!injector) {
|
||||
throw new Error('Failed to get injector from wallet extension');
|
||||
}
|
||||
|
||||
const signer = injector.signer;
|
||||
|
||||
// Call hrmp.hrmpInitOpenChannel(recipient, proposedMaxCapacity, proposedMaxMessageSize)
|
||||
const maxCapacity = 1000;
|
||||
const maxMessageSize = 102400; // 100 KB
|
||||
|
||||
const tx = api.tx.hrmp.hrmpInitOpenChannel(receiver, maxCapacity, maxMessageSize);
|
||||
|
||||
let unsub: () => void;
|
||||
|
||||
await tx.signAndSend(account.address, { signer }, ({ status, events, dispatchError }) => {
|
||||
if (dispatchError) {
|
||||
if (dispatchError.isModule) {
|
||||
const decoded = api.registry.findMetaError(dispatchError.asModule);
|
||||
reject(new Error(`${decoded.section}.${decoded.name}: ${decoded.docs.join(' ')}`));
|
||||
} else {
|
||||
reject(new Error(dispatchError.toString()));
|
||||
}
|
||||
if (unsub) unsub();
|
||||
return;
|
||||
}
|
||||
|
||||
if (status.isInBlock) {
|
||||
const channelId = status.asInBlock.toString();
|
||||
resolve({ sender, receiver, channelId });
|
||||
if (unsub) unsub();
|
||||
}
|
||||
}).then(unsubscribe => { unsub = unsubscribe; });
|
||||
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// STEP 5: REGISTER FOREIGN ASSETS
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Register foreign assets from other chains (via XCM)
|
||||
*
|
||||
* @param api - Polkadot.js API instance (our parachain)
|
||||
* @param assets - List of foreign assets to register
|
||||
* @param account - Account to sign transactions
|
||||
* @returns List of registered assets with Asset IDs
|
||||
*/
|
||||
export async function registerForeignAssets(
|
||||
api: ApiPromise,
|
||||
assets: ForeignAsset[],
|
||||
account: InjectedAccountWithMeta
|
||||
): Promise<RegisteredAsset[]> {
|
||||
const registered: RegisteredAsset[] = [];
|
||||
|
||||
for (const asset of assets) {
|
||||
const registeredAsset = await registerSingleAsset(api, asset, account);
|
||||
registered.push(registeredAsset);
|
||||
}
|
||||
|
||||
return registered;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register a single foreign asset
|
||||
*/
|
||||
async function registerSingleAsset(
|
||||
api: ApiPromise,
|
||||
asset: ForeignAsset,
|
||||
account: InjectedAccountWithMeta
|
||||
): Promise<RegisteredAsset> {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const injector = await window.injectedWeb3[account.meta.source]?.enable?.('PezkuwiChain');
|
||||
if (!injector) {
|
||||
throw new Error('Failed to get injector from wallet extension');
|
||||
}
|
||||
|
||||
const signer = injector.signer;
|
||||
|
||||
// Get next available asset ID
|
||||
const nextAssetId = await getNextAssetId(api);
|
||||
|
||||
// Create asset with metadata
|
||||
// Note: Adjust based on your pallet configuration
|
||||
const createTx = api.tx.assets.create(
|
||||
nextAssetId,
|
||||
account.address, // Admin
|
||||
asset.metadata.minBalance
|
||||
);
|
||||
|
||||
const setMetadataTx = api.tx.assets.setMetadata(
|
||||
nextAssetId,
|
||||
asset.metadata.name,
|
||||
asset.metadata.symbol,
|
||||
asset.metadata.decimals
|
||||
);
|
||||
|
||||
// Batch both transactions
|
||||
const tx = api.tx.utility.batchAll([createTx, setMetadataTx]);
|
||||
|
||||
let unsub: () => void;
|
||||
|
||||
await tx.signAndSend(account.address, { signer }, ({ status, dispatchError }) => {
|
||||
if (dispatchError) {
|
||||
if (dispatchError.isModule) {
|
||||
const decoded = api.registry.findMetaError(dispatchError.asModule);
|
||||
reject(new Error(`${decoded.section}.${decoded.name}: ${decoded.docs.join(' ')}`));
|
||||
} else {
|
||||
reject(new Error(dispatchError.toString()));
|
||||
}
|
||||
if (unsub) unsub();
|
||||
return;
|
||||
}
|
||||
|
||||
if (status.isInBlock) {
|
||||
resolve({
|
||||
assetId: nextAssetId,
|
||||
symbol: asset.metadata.symbol,
|
||||
});
|
||||
if (unsub) unsub();
|
||||
}
|
||||
}).then(unsubscribe => { unsub = unsubscribe; });
|
||||
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get next available Asset ID
|
||||
*/
|
||||
async function getNextAssetId(api: ApiPromise): Promise<number> {
|
||||
// Query existing assets and find the next ID
|
||||
// This is a simplified version - adjust based on your implementation
|
||||
const assets = await api.query.assets.asset.entries();
|
||||
|
||||
if (assets.length === 0) {
|
||||
return 1000; // Start from 1000 for foreign assets
|
||||
}
|
||||
|
||||
const maxId = Math.max(...assets.map(([key]) => {
|
||||
const assetId = key.args[0].toNumber();
|
||||
return assetId;
|
||||
}));
|
||||
|
||||
return maxId + 1;
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// STEP 6: TEST XCM TRANSFER
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Test XCM transfer from Asset Hub USDT to our wUSDT
|
||||
*
|
||||
* @param api - Polkadot.js API instance (our parachain)
|
||||
* @param amount - Amount to transfer (in smallest unit)
|
||||
* @param account - Account to receive the transfer
|
||||
* @returns Test result with transaction hash and balance
|
||||
*/
|
||||
export async function testXCMTransfer(
|
||||
api: ApiPromise,
|
||||
amount: string,
|
||||
account: InjectedAccountWithMeta
|
||||
): Promise<XCMTestResult> {
|
||||
try {
|
||||
// This is a placeholder for XCM testing
|
||||
// In reality, you'd need to:
|
||||
// 1. Connect to Asset Hub
|
||||
// 2. Send limitedReserveTransferAssets() to our parachain
|
||||
// 3. Monitor for AssetReceived event on our side
|
||||
|
||||
// For now, return a mock success result
|
||||
return {
|
||||
txHash: '0x0000000000000000000000000000000000000000000000000000000000000000',
|
||||
success: false,
|
||||
balance: '0',
|
||||
error: 'XCM testing requires connection to relay chain and Asset Hub',
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
txHash: '',
|
||||
success: false,
|
||||
balance: '0',
|
||||
error: error instanceof Error ? error.message : 'Unknown error',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ========================================
|
||||
// UTILITY FUNCTIONS
|
||||
// ========================================
|
||||
|
||||
/**
|
||||
* Get relay chain endpoint based on network selection
|
||||
*/
|
||||
export function getRelayChainEndpoint(relayChain: RelayChain): string {
|
||||
const endpoints = {
|
||||
westend: 'wss://westend-rpc.polkadot.io',
|
||||
rococo: 'wss://rococo-rpc.polkadot.io',
|
||||
polkadot: 'wss://rpc.polkadot.io',
|
||||
};
|
||||
|
||||
return endpoints[relayChain];
|
||||
}
|
||||
|
||||
/**
|
||||
* Asset Hub ParaId by relay chain
|
||||
*/
|
||||
export function getAssetHubParaId(relayChain: RelayChain): number {
|
||||
const paraIds = {
|
||||
westend: 1000, // Westend Asset Hub
|
||||
rococo: 1000, // Rococo Asset Hub
|
||||
polkadot: 1000, // Polkadot Asset Hub (Statemint)
|
||||
};
|
||||
|
||||
return paraIds[relayChain];
|
||||
}
|
||||
@@ -0,0 +1,228 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { spawnSync } = require('child_process');
|
||||
|
||||
// --- Configuration ---
|
||||
const pezkuwiSdkRoot = path.join(__dirname, '..', '..', 'Pezkuwi-SDK');
|
||||
const sdkDocsSourcePath = path.join(pezkuwiSdkRoot, 'docs', 'sdk');
|
||||
const mainDocsSourcePath = path.join(pezkuwiSdkRoot, 'docs'); // This is where whitepaper.md etc. are
|
||||
const publicPath = path.join(__dirname, 'public');
|
||||
const publicDocsPath = path.join(publicPath, 'docs'); // Where markdown/rs files will be copied
|
||||
const rustdocDestPath = path.join(publicPath, 'sdk_docs'); // Destination for BUILT rustdocs (e.g., public/sdk_docs/pezkuwi_sdk_docs/index.html)
|
||||
const structureOutputPath = path.join(publicPath, 'docs-structure.json');
|
||||
const rustdocBuildOutputPath = path.join(pezkuwiSdkRoot, 'target', 'doc'); // Output of cargo doc
|
||||
|
||||
// Absolute path to rustup (used to build rustdoc)
|
||||
const rustupPath = '/home/mamostehp/.cargo/bin/rustup';
|
||||
|
||||
// Path to the rebranding script (now .cjs)
|
||||
const rebrandScriptPath = path.join(__dirname, 'rebrand-rustdoc.cjs');
|
||||
|
||||
|
||||
// --- Helper Functions ---
|
||||
|
||||
function runCommand(command, args, cwd) {
|
||||
console.log(`\n> Running command: ${command} ${args.join(' ')} in ${cwd}`);
|
||||
const result = spawnSync(command, args, { stdio: 'inherit', cwd });
|
||||
if (result.error) {
|
||||
console.error(`Error executing command: ${command}`);
|
||||
throw result.error;
|
||||
}
|
||||
if (result.status !== 0) {
|
||||
throw new Error(`Command "${command} ${args.join(' ')}" failed with exit code ${result.status}`);
|
||||
}
|
||||
}
|
||||
|
||||
function copyRecursive(src, dest) {
|
||||
console.log(`↪️ Copying from ${src} to ${dest}...`);
|
||||
fs.mkdirSync(dest, { recursive: true });
|
||||
fs.cpSync(src, dest, { recursive: true });
|
||||
}
|
||||
|
||||
function removeDir(dir) {
|
||||
console.log(`🧹 Clearing directory: ${dir}...`);
|
||||
if (fs.existsSync(dir)) {
|
||||
fs.rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
|
||||
// Files that should be explicitly grouped under "General Docs"
|
||||
const generalCategoryFileNames = [
|
||||
'AUDIT.md',
|
||||
'BACKPORT.md',
|
||||
'RELEASE.md',
|
||||
'runtimes-pallets.md',
|
||||
'workflow_rebranding.md'
|
||||
];
|
||||
|
||||
function generateRecursiveStructure(currentDir) {
|
||||
const currentStructure = {};
|
||||
const items = fs.readdirSync(currentDir);
|
||||
|
||||
items.sort((a, b) => {
|
||||
try {
|
||||
const aIsDir = fs.statSync(path.join(currentDir, a)).isDirectory();
|
||||
const bIsDir = fs.statSync(path.join(currentDir, b)).isDirectory();
|
||||
if (aIsDir && !bIsDir) return -1; // Directories first
|
||||
if (!aIsDir && bIsDir) return 1;
|
||||
return a.localeCompare(b); // Then alphabetical
|
||||
} catch (e) {
|
||||
return 0;
|
||||
}
|
||||
});
|
||||
|
||||
for (const item of items) {
|
||||
const ignoreList = ['images', 'sdk', 'target', 'Cargo.toml', 'build.rs'];
|
||||
if (ignoreList.includes(item) || item.startsWith('.') || item === 'Cargo.lock') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const fullPath = path.join(currentDir, item);
|
||||
const stat = fs.statSync(fullPath);
|
||||
let title = item.replace(/\.(md|rs)$/, '');
|
||||
title = title.replace(/_/g, ' ').replace(/-/g, ' ').replace(/\b\w/g, l => l.toUpperCase());
|
||||
|
||||
const relativePath = path.relative(mainDocsSourcePath, fullPath).replace(/\\/g, '/');
|
||||
|
||||
if (stat.isDirectory()) {
|
||||
const subStructure = generateRecursiveStructure(fullPath);
|
||||
if (Object.keys(subStructure).length > 0) {
|
||||
currentStructure[title] = subStructure;
|
||||
}
|
||||
} else if (item.endsWith('.md') || item.endsWith('.rs')) {
|
||||
currentStructure[title] = relativePath;
|
||||
}
|
||||
}
|
||||
return currentStructure;
|
||||
}
|
||||
|
||||
|
||||
// --- Main Execution ---
|
||||
|
||||
function main() {
|
||||
try {
|
||||
console.log('--- Documentation Automation ---');
|
||||
console.log(`Pezkuwi-SDK Root: ${pezkuwiSdkRoot}`);
|
||||
console.log(`SDK Docs Source: ${sdkDocsSourcePath}`);
|
||||
console.log(`Main Docs Source: ${mainDocsSourcePath}`);
|
||||
|
||||
// 1. Build the Rust SDK documentation (if tools available)
|
||||
console.log('\n--- Step 1: Building SDK Documentation (Attempting) ---');
|
||||
let rustdocBuiltSuccessfully = false;
|
||||
try {
|
||||
runCommand(rustupPath, ['run', 'stable', 'cargo', 'doc', '--no-deps'], sdkDocsSourcePath);
|
||||
console.log('✅ SDK documentation built successfully.');
|
||||
rustdocBuiltSuccessfully = true;
|
||||
} catch (e) {
|
||||
console.warn(`⚠️ Warning: Could not build SDK documentation. Error: ${e.message}`);
|
||||
console.warn(' This might be due to missing Rust toolchain or environment issues. Proceeding without building rustdoc.');
|
||||
}
|
||||
|
||||
// 2. Perform Rebranding on the Built Rustdoc (if built)
|
||||
if (rustdocBuiltSuccessfully && fs.existsSync(rustdocBuildOutputPath)) {
|
||||
console.log('\n--- Step 2: Rebranding Built SDK Documentation ---');
|
||||
runCommand('node', [rebrandScriptPath, rustdocBuildOutputPath], __dirname); // Run rebranding script
|
||||
console.log('✅ Built SDK docs rebranded successfully.');
|
||||
}
|
||||
|
||||
|
||||
// 3. Clean up old public documentation artifacts
|
||||
console.log('\n--- Step 3: Cleaning Public Directories ---');
|
||||
removeDir(publicDocsPath);
|
||||
removeDir(rustdocDestPath);
|
||||
|
||||
// 4. Copy main Markdown/RS files from Pezkuwi-SDK/docs to public/docs
|
||||
console.log('\n--- Step 4: Copying Main Documentation Files ---');
|
||||
copyRecursive(mainDocsSourcePath, publicDocsPath);
|
||||
console.log('✅ Main documentation files copied successfully.');
|
||||
|
||||
// 5. Copy the BUILT and Rebranded Rustdoc site (if built successfully)
|
||||
if (rustdocBuiltSuccessfully && fs.existsSync(rustdocBuildOutputPath)) {
|
||||
console.log('\n--- Step 5: Copying Built and Rebranded SDK Documentation ---');
|
||||
copyRecursive(rustdocBuildOutputPath, rustdocDestPath);
|
||||
console.log('✅ Built and rebranded SDK docs copied successfully.');
|
||||
} else {
|
||||
console.warn('\n⚠️ Warning: Rustdoc build output not found or build failed. Skipping copy of built SDK docs.');
|
||||
}
|
||||
|
||||
|
||||
// 6. Generate the final navigation structure
|
||||
console.log('\n--- Step 6: Generating Navigation Structure ---');
|
||||
const rawStructure = generateRecursiveStructure(mainDocsSourcePath);
|
||||
|
||||
const finalStructure = {};
|
||||
const generalDocs = {};
|
||||
|
||||
// Iterate through the raw structure to categorize
|
||||
for (const key in rawStructure) {
|
||||
// Check if the item is a string (a file) and if its base name is in the generalCategoryFileNames list
|
||||
if (typeof rawStructure[key] === 'string' && generalCategoryFileNames.includes(path.basename(rawStructure[key]))) {
|
||||
generalDocs[key] = rawStructure[key];
|
||||
} else {
|
||||
finalStructure[key] = rawStructure[key]; // Keep as is (folder or other direct file)
|
||||
}
|
||||
}
|
||||
|
||||
// Add "Getting Started" as the first category
|
||||
finalStructure['Getting Started'] = {
|
||||
'Introduction': 'introduction.md'
|
||||
};
|
||||
|
||||
// Move whitepaper to Getting Started if it exists, and remove from rawStructure to prevent duplication
|
||||
if (rawStructure['Whitepaper']) {
|
||||
finalStructure['Getting Started']['Whitepaper'] = 'whitepaper/whitepaper.md';
|
||||
delete rawStructure['Whitepaper'];
|
||||
}
|
||||
|
||||
// Add SDK Reference section (always visible)
|
||||
finalStructure['SDK Reference'] = {
|
||||
'📚 Rust SDK Docs': 'sdk://open',
|
||||
'Runtimes & Pallets': 'runtimes-pallets.md'
|
||||
};
|
||||
|
||||
// Remove items that are moved to other categories
|
||||
if (generalDocs['Genesis Engineering Plan']) {
|
||||
delete generalDocs['Genesis Engineering Plan'];
|
||||
}
|
||||
if (generalDocs['Runtimes Pallets']) {
|
||||
delete generalDocs['Runtimes Pallets'];
|
||||
}
|
||||
|
||||
// Add "General Docs" as a top-level category
|
||||
if (Object.keys(generalDocs).length > 0) {
|
||||
finalStructure['General Docs'] = generalDocs;
|
||||
}
|
||||
|
||||
// Sort the top-level keys for consistent sidebar order
|
||||
const sortedKeys = Object.keys(finalStructure).sort((a, b) => {
|
||||
// Priority order: Getting Started, SDK Reference, General Docs, Contributor, Whitepaper, then alphabetical for others
|
||||
const order = ['Getting Started', 'SDK Reference', 'General Docs', 'Contributor', 'Whitepaper'];
|
||||
const indexA = order.indexOf(a);
|
||||
const indexB = order.indexOf(b);
|
||||
|
||||
if (indexA === -1 && indexB === -1) { // Both not in priority list
|
||||
return a.localeCompare(b);
|
||||
}
|
||||
if (indexA === -1) return 1; // A not in list, B is, so B comes first
|
||||
if (indexB === -1) return -1; // B not in list, A is, so A comes first
|
||||
return indexA - indexB; // Sort by priority index
|
||||
});
|
||||
|
||||
const finalSortedStructure = {};
|
||||
for (const key of sortedKeys) {
|
||||
finalSortedStructure[key] = finalStructure[key];
|
||||
}
|
||||
|
||||
fs.writeFileSync(structureOutputPath, JSON.stringify(finalSortedStructure, null, 2));
|
||||
console.log(`✅ Successfully generated docs structure at ${structureOutputPath}`);
|
||||
|
||||
console.log('\n🚀 Documentation automation complete!');
|
||||
|
||||
} catch (error) {
|
||||
console.error('\n❌ FATAL ERROR during documentation automation:');
|
||||
console.error(error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
@@ -4,7 +4,9 @@
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"predev": "node generate-docs-structure.cjs",
|
||||
"dev": "vite",
|
||||
"prebuild": "node generate-docs-structure.cjs",
|
||||
"build": "vite build",
|
||||
"build:dev": "vite build --mode development",
|
||||
"lint": "eslint .",
|
||||
@@ -49,11 +51,15 @@
|
||||
"@sentry/react": "^10.26.0",
|
||||
"@supabase/supabase-js": "^2.49.4",
|
||||
"@tanstack/react-query": "^5.56.2",
|
||||
"@types/dompurify": "^3.0.5",
|
||||
"@types/react-syntax-highlighter": "^15.5.13",
|
||||
"@types/uuid": "^10.0.0",
|
||||
"buffer": "^6.0.3",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "^1.0.0",
|
||||
"date-fns": "^3.6.0",
|
||||
"dompurify": "^3.3.1",
|
||||
"embla-carousel-react": "^8.3.0",
|
||||
"highlight.js": "^11.9.0",
|
||||
"i18next": "^23.7.6",
|
||||
@@ -70,6 +76,7 @@
|
||||
"react-i18next": "^14.0.0",
|
||||
"react-resizable-panels": "^2.1.3",
|
||||
"react-router-dom": "^6.26.2",
|
||||
"react-syntax-highlighter": "^16.1.0",
|
||||
"recharts": "^2.12.7",
|
||||
"sonner": "^1.5.0",
|
||||
"tailwind-merge": "^2.5.2",
|
||||
@@ -101,6 +108,7 @@
|
||||
"typescript": "^5.5.3",
|
||||
"typescript-eslint": "^8.0.1",
|
||||
"vite": "^5.4.1",
|
||||
"vite-plugin-node-polyfills": "^0.24.0",
|
||||
"vitest": "^4.0.10"
|
||||
}
|
||||
}
|
||||
|
||||
|
After Width: | Height: | Size: 476 KiB |
|
After Width: | Height: | Size: 56 KiB |
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"Getting Started": {
|
||||
"Introduction": "introduction.md",
|
||||
"Whitepaper": "whitepaper/whitepaper.md"
|
||||
},
|
||||
"SDK Reference": {
|
||||
"📚 Rust SDK Docs": "sdk://open",
|
||||
"Runtimes & Pallets": "runtimes-pallets.md"
|
||||
},
|
||||
"General Docs": {
|
||||
"AUDIT": "AUDIT.md",
|
||||
"BACKPORT": "BACKPORT.md",
|
||||
"RELEASE": "RELEASE.md",
|
||||
"Workflow Rebranding": "workflow_rebranding.md"
|
||||
},
|
||||
"Contributor": {
|
||||
"CODE OF CONDUCT": "contributor/CODE_OF_CONDUCT.md",
|
||||
"Commands Readme": "contributor/commands-readme.md",
|
||||
"Container": "contributor/container.md",
|
||||
"CONTRIBUTING": "contributor/CONTRIBUTING.md",
|
||||
"DEPRECATION CHECKLIST": "contributor/DEPRECATION_CHECKLIST.md",
|
||||
"Docker": "contributor/docker.md",
|
||||
"DOCUMENTATION GUIDELINES": "contributor/DOCUMENTATION_GUIDELINES.md",
|
||||
"Markdown Linting": "contributor/markdown_linting.md",
|
||||
"Prdoc": "contributor/prdoc.md",
|
||||
"PULL REQUEST TEMPLATE": "contributor/PULL_REQUEST_TEMPLATE.md",
|
||||
"SECURITY": "contributor/SECURITY.md",
|
||||
"STYLE GUIDE": "contributor/STYLE_GUIDE.md",
|
||||
"Weight Generation": "contributor/weight-generation.md"
|
||||
},
|
||||
"Whitepaper": {
|
||||
"Whitepaper": "whitepaper/whitepaper.md"
|
||||
},
|
||||
"Introduction": "introduction.md"
|
||||
}
|
||||
|
After Width: | Height: | Size: 1.7 MiB |
|
After Width: | Height: | Size: 1.6 MiB |
|
After Width: | Height: | Size: 1.8 MiB |
|
After Width: | Height: | Size: 1.6 MiB |
|
After Width: | Height: | Size: 1.5 MiB |
|
After Width: | Height: | Size: 1.5 MiB |
|
After Width: | Height: | Size: 1.7 MiB |
|
After Width: | Height: | Size: 1.9 MiB |
|
After Width: | Height: | Size: 1.6 MiB |
|
After Width: | Height: | Size: 634 KiB |
|
After Width: | Height: | Size: 5.0 MiB |
|
After Width: | Height: | Size: 5.0 MiB |
|
After Width: | Height: | Size: 601 KiB |
|
After Width: | Height: | Size: 208 KiB |
|
After Width: | Height: | Size: 208 KiB |
|
After Width: | Height: | Size: 355 KiB |
|
After Width: | Height: | Size: 171 KiB |
|
After Width: | Height: | Size: 586 KiB |
|
After Width: | Height: | Size: 742 KiB |
|
After Width: | Height: | Size: 750 KiB |
|
After Width: | Height: | Size: 269 KiB |
|
After Width: | Height: | Size: 18 KiB |
|
After Width: | Height: | Size: 265 KiB |
|
After Width: | Height: | Size: 99 KiB |
|
After Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 129 KiB After Width: | Height: | Size: 129 KiB |
|
After Width: | Height: | Size: 51 KiB |
|
After Width: | Height: | Size: 178 KiB |
|
After Width: | Height: | Size: 178 KiB |
|
After Width: | Height: | Size: 55 KiB |
|
After Width: | Height: | Size: 429 KiB |
|
After Width: | Height: | Size: 666 KiB |
|
After Width: | Height: | Size: 632 KiB |
|
After Width: | Height: | Size: 55 KiB |
|
After Width: | Height: | Size: 670 KiB |
|
After Width: | Height: | Size: 725 KiB |
|
After Width: | Height: | Size: 319 KiB |
|
After Width: | Height: | Size: 393 KiB |
|
After Width: | Height: | Size: 21 KiB |
|
After Width: | Height: | Size: 429 KiB |
|
After Width: | Height: | Size: 72 KiB |
|
After Width: | Height: | Size: 128 KiB |
|
After Width: | Height: | Size: 393 KiB |
|
After Width: | Height: | Size: 90 KiB |
|
After Width: | Height: | Size: 252 KiB |
@@ -0,0 +1,12 @@
|
||||
flowchart TD
|
||||
dot[pezkuwichain.io] --> devhub[pezkuwi_sdk_docs]
|
||||
|
||||
devhub --> pezkuwi_sdk
|
||||
devhub --> reference_docs
|
||||
devhub --> guides
|
||||
devhub --> external_resources
|
||||
|
||||
pezkuwi_sdk --> substrate
|
||||
pezkuwi_sdk --> frame
|
||||
pezkuwi_sdk --> xcm
|
||||
pezkuwi_sdk --> templates
|
||||
@@ -0,0 +1,5 @@
|
||||
flowchart TD
|
||||
E(Extrinsic) ---> I(Inherent);
|
||||
E --> T(Transaction)
|
||||
T --> ST("Signed (aka. Transaction)")
|
||||
T --> UT(Unsigned)
|
||||
@@ -0,0 +1,3 @@
|
||||
flowchart LR
|
||||
RuntimeCall --"TryInto"--> PalletCall
|
||||
PalletCall --"Into"--> RuntimeCall
|
||||
@@ -0,0 +1,10 @@
|
||||
flowchart LR
|
||||
|
||||
subgraph Pezkuwi[The Pezkuwi Relay Chain]
|
||||
PezkuwiNode[Pezkuwi Node]
|
||||
PezkuwiRuntime[Pezkuwi Runtime]
|
||||
end
|
||||
|
||||
FRAME -.-> PezkuwiRuntime
|
||||
PezkuwiSDK[Pezkuwi SDK Node Libraries] -.-> PezkuwiNode
|
||||
|
||||
@@ -0,0 +1,8 @@
|
||||
flowchart LR
|
||||
subgraph PezkuwiSDKChain[A Pezkuwi SDK-based blockchain]
|
||||
Node
|
||||
Runtime
|
||||
end
|
||||
|
||||
FRAME -.-> Runtime
|
||||
PezkuwiSDK[Pezkuwi SDK Node Libraries] -.-> Node
|
||||
@@ -0,0 +1,11 @@
|
||||
flowchart LR
|
||||
subgraph TeyrChain[A Pezkuwi TeyrChain]
|
||||
TeyrChainNode[TeyrChain Node]
|
||||
TeyrChainRuntime[TeyrChain Runtime]
|
||||
end
|
||||
|
||||
FRAME -.-> TeyrChainRuntime
|
||||
PezkuwiSDK[Pezkuwi SDK Node Libraries] -.-> TeyrChainNode
|
||||
|
||||
CumulusC[Cumulus Node Libraries] -.-> TeyrChainNode
|
||||
CumulusR[Cumulus Runtime Libraries] -.-> TeyrChainRuntime
|
||||
@@ -0,0 +1,16 @@
|
||||
flowchart TB
|
||||
subgraph Node[Node's View Of The State 🙈]
|
||||
direction LR
|
||||
0x1234 --> 0x2345
|
||||
0x3456 --> 0x4567
|
||||
0x5678 --> 0x6789
|
||||
:code --> code[wasm code]
|
||||
end
|
||||
|
||||
subgraph Runtime[Runtime's View Of The State 🙉]
|
||||
direction LR
|
||||
ab[alice's balance] --> abv[known value]
|
||||
bb[bob's balance] --> bbv[known value]
|
||||
cb[charlie's balance] --> cbv[known value]
|
||||
c2[:code] --> c22[wasm code]
|
||||
end
|
||||
@@ -0,0 +1,21 @@
|
||||
flowchart LR
|
||||
%%{init: {'flowchart' : {'curve' : 'linear'}}}%%
|
||||
subgraph BData[Blockchain Database]
|
||||
direction LR
|
||||
BN[Block N] -.-> BN1[Block N+1]
|
||||
end
|
||||
|
||||
subgraph SData[State Database]
|
||||
direction LR
|
||||
SN[State N] -.-> SN1[State N+1] -.-> SN2[State N+2]
|
||||
end
|
||||
|
||||
BN --> STFN[STF]
|
||||
SN --> STFN[STF]
|
||||
STFN[STF] --> SN1
|
||||
|
||||
BN1 --> STFN1[STF]
|
||||
SN1 --> STFN1[STF]
|
||||
STFN1[STF] --> SN2
|
||||
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
flowchart LR
|
||||
B[Block] --> STF
|
||||
S[State] --> STF
|
||||
STF --> NS[New State]
|
||||
@@ -0,0 +1,12 @@
|
||||
graph TB
|
||||
subgraph Substrate
|
||||
direction LR
|
||||
subgraph Node
|
||||
end
|
||||
|
||||
subgraph Runtime
|
||||
end
|
||||
|
||||
Node --runtime-api--> Runtime
|
||||
Runtime --host-functions--> Node
|
||||
end
|
||||
@@ -0,0 +1,2 @@
|
||||
flowchart LR
|
||||
T[Using a Template] --> P[Writing Your Own FRAME-Based Pallet] --> C[Custom Node]
|
||||
@@ -0,0 +1,8 @@
|
||||
graph TB
|
||||
subgraph Substrate
|
||||
direction LR
|
||||
subgraph Node
|
||||
end
|
||||
subgraph Runtime
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,20 @@
|
||||
graph TB
|
||||
subgraph Substrate
|
||||
direction LR
|
||||
subgraph Node
|
||||
Database
|
||||
Networking
|
||||
Consensus
|
||||
end
|
||||
subgraph Runtime
|
||||
subgraph FRAME
|
||||
direction LR
|
||||
Governance
|
||||
Currency
|
||||
Staking
|
||||
Identity
|
||||
end
|
||||
end
|
||||
Node --runtime-api--> Runtime
|
||||
Runtime --host-functions--> Node
|
||||
end
|
||||
@@ -0,0 +1,213 @@
|
||||
[package]
|
||||
name = "pezkuwi-sdk-docs"
|
||||
description = "The one stop shop for developers of the pezkuwi-sdk"
|
||||
license = "GPL-3.0-or-later WITH Classpath-exception-2.0"
|
||||
homepage = "https://docs.pezkuwichain.io/sdk/"
|
||||
repository.workspace = true
|
||||
authors.workspace = true
|
||||
edition.workspace = true
|
||||
# This crate is not publish-able to crates.io for now because of docify.
|
||||
publish = false
|
||||
version = "0.0.1"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Needed for all FRAME-based code
|
||||
codec = { workspace = true }
|
||||
frame = { features = [
|
||||
"experimental",
|
||||
"runtime",
|
||||
], workspace = true, default-features = true }
|
||||
pallet-contracts = { workspace = true }
|
||||
pallet-default-config-example = { workspace = true, default-features = true }
|
||||
pallet-example-offchain-worker = { workspace = true, default-features = true }
|
||||
pallet-examples = { workspace = true }
|
||||
scale-info = { workspace = true }
|
||||
|
||||
# How we build docs in rust-docs
|
||||
docify = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
simple-mermaid = { workspace = true }
|
||||
|
||||
# Pezkuwi SDK deps, typically all should only be in scope such that we can link to their doc item.
|
||||
chain-spec-builder = { workspace = true, default-features = true }
|
||||
frame-benchmarking = { workspace = true }
|
||||
frame-executive = { workspace = true }
|
||||
frame-metadata-hash-extension = { workspace = true, default-features = true }
|
||||
frame-support = { workspace = true }
|
||||
frame-system = { workspace = true }
|
||||
kitchensink-runtime = { workspace = true }
|
||||
log = { workspace = true, default-features = true }
|
||||
node-cli = { workspace = true }
|
||||
pallet-example-authorization-tx-extension = { workspace = true, default-features = true }
|
||||
pallet-example-single-block-migrations = { workspace = true, default-features = true }
|
||||
pezkuwi-sdk = { features = [
|
||||
"runtime-full",
|
||||
], workspace = true, default-features = true }
|
||||
subkey = { workspace = true, default-features = true }
|
||||
|
||||
# Substrate Client
|
||||
sc-chain-spec = { workspace = true, default-features = true }
|
||||
sc-cli = { workspace = true, default-features = true }
|
||||
sc-client-db = { workspace = true, default-features = true }
|
||||
sc-consensus-aura = { workspace = true, default-features = true }
|
||||
sc-consensus-babe = { workspace = true, default-features = true }
|
||||
sc-consensus-beefy = { workspace = true, default-features = true }
|
||||
sc-consensus-grandpa = { workspace = true, default-features = true }
|
||||
sc-consensus-manual-seal = { workspace = true, default-features = true }
|
||||
sc-consensus-pow = { workspace = true, default-features = true }
|
||||
sc-executor = { workspace = true, default-features = true }
|
||||
sc-network = { workspace = true, default-features = true }
|
||||
sc-rpc = { workspace = true, default-features = true }
|
||||
sc-rpc-api = { workspace = true, default-features = true }
|
||||
sc-service = { workspace = true, default-features = true }
|
||||
|
||||
substrate-wasm-builder = { workspace = true, default-features = true }
|
||||
|
||||
# Cumulus
|
||||
cumulus-client-service = { workspace = true, default-features = true }
|
||||
cumulus-pallet-aura-ext = { workspace = true, default-features = true }
|
||||
cumulus-pallet-teyrchain-system = { workspace = true, default-features = true }
|
||||
cumulus-pallet-weight-reclaim = { workspace = true, default-features = true }
|
||||
cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true }
|
||||
teyrchain-info = { workspace = true, default-features = true }
|
||||
|
||||
# Omni Node
|
||||
pezkuwi-omni-node-lib = { workspace = true, default-features = true }
|
||||
|
||||
# Pallets and FRAME internals
|
||||
pallet-asset-conversion-tx-payment = { workspace = true, default-features = true }
|
||||
pallet-asset-tx-payment = { workspace = true, default-features = true }
|
||||
pallet-assets = { workspace = true, default-features = true }
|
||||
pallet-aura = { workspace = true, default-features = true }
|
||||
pallet-babe = { workspace = true, default-features = true }
|
||||
pallet-balances = { workspace = true, default-features = true }
|
||||
pallet-collective = { workspace = true, default-features = true }
|
||||
pallet-democracy = { workspace = true, default-features = true }
|
||||
pallet-grandpa = { workspace = true, default-features = true }
|
||||
pallet-nfts = { workspace = true, default-features = true }
|
||||
pallet-preimage = { workspace = true, default-features = true }
|
||||
pallet-scheduler = { workspace = true, default-features = true }
|
||||
pallet-skip-feeless-payment = { workspace = true, default-features = true }
|
||||
pallet-timestamp = { workspace = true, default-features = true }
|
||||
pallet-transaction-payment = { workspace = true, default-features = true }
|
||||
pallet-uniques = { workspace = true, default-features = true }
|
||||
|
||||
# Primitives
|
||||
sp-api = { workspace = true, default-features = true }
|
||||
sp-arithmetic = { workspace = true, default-features = true }
|
||||
sp-core = { workspace = true, default-features = true }
|
||||
sp-genesis-builder = { workspace = true, default-features = true }
|
||||
sp-io = { workspace = true, default-features = true }
|
||||
sp-keyring = { workspace = true, default-features = true }
|
||||
sp-offchain = { workspace = true, default-features = true }
|
||||
sp-runtime = { workspace = true, default-features = true }
|
||||
sp-runtime-interface = { workspace = true, default-features = true }
|
||||
sp-std = { workspace = true, default-features = true }
|
||||
sp-storage = { workspace = true, default-features = true }
|
||||
sp-tracing = { workspace = true, default-features = true }
|
||||
sp-version = { workspace = true, default-features = true }
|
||||
sp-weights = { workspace = true, default-features = true }
|
||||
|
||||
# XCM
|
||||
pallet-xcm = { workspace = true }
|
||||
xcm = { workspace = true, default-features = true }
|
||||
xcm-builder = { workspace = true }
|
||||
xcm-docs = { workspace = true }
|
||||
xcm-executor = { workspace = true }
|
||||
xcm-simulator = { workspace = true }
|
||||
|
||||
# Runtime guides
|
||||
chain-spec-guide-runtime = { workspace = true, default-features = true }
|
||||
|
||||
# Templates
|
||||
minimal-template-runtime = { workspace = true, default-features = true }
|
||||
solochain-template-runtime = { workspace = true, default-features = true }
|
||||
|
||||
# local packages
|
||||
first-runtime = { workspace = true, default-features = true }
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = { workspace = true }
|
||||
cmd_lib = { workspace = true }
|
||||
rand = { workspace = true, default-features = true }
|
||||
tokio = { workspace = true }
|
||||
|
||||
[features]
|
||||
runtime-benchmarks = [
|
||||
"chain-spec-builder/runtime-benchmarks",
|
||||
"chain-spec-guide-runtime/runtime-benchmarks",
|
||||
"cumulus-client-service/runtime-benchmarks",
|
||||
"cumulus-pallet-aura-ext/runtime-benchmarks",
|
||||
"cumulus-pallet-teyrchain-system/runtime-benchmarks",
|
||||
"cumulus-pallet-weight-reclaim/runtime-benchmarks",
|
||||
"cumulus-primitives-proof-size-hostfunction/runtime-benchmarks",
|
||||
"first-runtime/runtime-benchmarks",
|
||||
"frame-benchmarking/runtime-benchmarks",
|
||||
"frame-executive/runtime-benchmarks",
|
||||
"frame-metadata-hash-extension/runtime-benchmarks",
|
||||
"frame-support/runtime-benchmarks",
|
||||
"frame-system/runtime-benchmarks",
|
||||
"frame/runtime-benchmarks",
|
||||
"kitchensink-runtime/runtime-benchmarks",
|
||||
"minimal-template-runtime/runtime-benchmarks",
|
||||
"node-cli/runtime-benchmarks",
|
||||
"pallet-asset-conversion-tx-payment/runtime-benchmarks",
|
||||
"pallet-asset-tx-payment/runtime-benchmarks",
|
||||
"pallet-assets/runtime-benchmarks",
|
||||
"pallet-aura/runtime-benchmarks",
|
||||
"pallet-babe/runtime-benchmarks",
|
||||
"pallet-balances/runtime-benchmarks",
|
||||
"pallet-collective/runtime-benchmarks",
|
||||
"pallet-contracts/runtime-benchmarks",
|
||||
"pallet-default-config-example/runtime-benchmarks",
|
||||
"pallet-democracy/runtime-benchmarks",
|
||||
"pallet-example-authorization-tx-extension/runtime-benchmarks",
|
||||
"pallet-example-offchain-worker/runtime-benchmarks",
|
||||
"pallet-example-single-block-migrations/runtime-benchmarks",
|
||||
"pallet-examples/runtime-benchmarks",
|
||||
"pallet-grandpa/runtime-benchmarks",
|
||||
"pallet-nfts/runtime-benchmarks",
|
||||
"pallet-preimage/runtime-benchmarks",
|
||||
"pallet-scheduler/runtime-benchmarks",
|
||||
"pallet-skip-feeless-payment/runtime-benchmarks",
|
||||
"pallet-timestamp/runtime-benchmarks",
|
||||
"pallet-transaction-payment/runtime-benchmarks",
|
||||
"pallet-uniques/runtime-benchmarks",
|
||||
"pallet-xcm/runtime-benchmarks",
|
||||
"pezkuwi-omni-node-lib/runtime-benchmarks",
|
||||
"pezkuwi-sdk/runtime-benchmarks",
|
||||
"sc-chain-spec/runtime-benchmarks",
|
||||
"sc-cli/runtime-benchmarks",
|
||||
"sc-client-db/runtime-benchmarks",
|
||||
"sc-consensus-aura/runtime-benchmarks",
|
||||
"sc-consensus-babe/runtime-benchmarks",
|
||||
"sc-consensus-beefy/runtime-benchmarks",
|
||||
"sc-consensus-grandpa/runtime-benchmarks",
|
||||
"sc-consensus-manual-seal/runtime-benchmarks",
|
||||
"sc-consensus-pow/runtime-benchmarks",
|
||||
"sc-executor/runtime-benchmarks",
|
||||
"sc-network/runtime-benchmarks",
|
||||
"sc-rpc-api/runtime-benchmarks",
|
||||
"sc-rpc/runtime-benchmarks",
|
||||
"sc-service/runtime-benchmarks",
|
||||
"solochain-template-runtime/runtime-benchmarks",
|
||||
"sp-api/runtime-benchmarks",
|
||||
"sp-genesis-builder/runtime-benchmarks",
|
||||
"sp-io/runtime-benchmarks",
|
||||
"sp-keyring/runtime-benchmarks",
|
||||
"sp-offchain/runtime-benchmarks",
|
||||
"sp-runtime-interface/runtime-benchmarks",
|
||||
"sp-runtime/runtime-benchmarks",
|
||||
"sp-version/runtime-benchmarks",
|
||||
"subkey/runtime-benchmarks",
|
||||
"substrate-wasm-builder/runtime-benchmarks",
|
||||
"teyrchain-info/runtime-benchmarks",
|
||||
"xcm-builder/runtime-benchmarks",
|
||||
"xcm-docs/runtime-benchmarks",
|
||||
"xcm-executor/runtime-benchmarks",
|
||||
"xcm-simulator/runtime-benchmarks",
|
||||
"xcm/runtime-benchmarks",
|
||||
]
|
||||
@@ -0,0 +1,2 @@
|
||||
<script> mermaid.init({ startOnLoad: true, theme: "dark" }, "pre.language-mermaid > code");</script>
|
||||
|
||||
@@ -0,0 +1,147 @@
|
||||
<script>
|
||||
function createToC() {
|
||||
let sidebar = document.querySelector(".sidebar");
|
||||
let headers = document.querySelectorAll("#main-content h2, #main-content h3, #main-content h4");
|
||||
console.log(`detected polkadot_sdk_docs: headers: ${headers.length}`);
|
||||
|
||||
let toc = document.createElement("div");
|
||||
toc.classList.add("sidebar-table-of-contents");
|
||||
toc.appendChild(document.createElement("h2").appendChild(document.createTextNode("Table of Contents")).parentNode);
|
||||
|
||||
let modules = document.querySelectorAll("main .item-table a.mod");
|
||||
|
||||
// the first two headers are always junk
|
||||
headers.forEach(header => {
|
||||
let link = document.createElement("a");
|
||||
link.href = "#" + header.id;
|
||||
const headerTextContent = header.textContent.replace("§", "")
|
||||
link.textContent = headerTextContent;
|
||||
link.className = header.tagName.toLowerCase();
|
||||
|
||||
toc.appendChild(link);
|
||||
|
||||
if (header.id == "modules" && headerTextContent == "Modules") {
|
||||
modules.forEach(module => {
|
||||
let link = document.createElement("a");
|
||||
link.href = module.href;
|
||||
link.textContent = module.textContent;
|
||||
link.className = "h3";
|
||||
|
||||
toc.appendChild(link);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// insert toc as the second child in sidebar
|
||||
let sidebar_children = sidebar.children;
|
||||
if (sidebar_children.length > 1) {
|
||||
sidebar.insertBefore(toc, sidebar_children[1]);
|
||||
} else {
|
||||
sidebar.appendChild(toc);
|
||||
}
|
||||
}
|
||||
|
||||
function hideSidebarElements() {
|
||||
// Create the 'Expand for More' button
|
||||
var expandButton = document.createElement('button');
|
||||
expandButton.innerText = 'Expand More Items';
|
||||
expandButton.classList.add('expand-button');
|
||||
|
||||
// Insert the button at the top of the sidebar or before the '.sidebar-elems'
|
||||
var sidebarElems = document.querySelector('.sidebar-elems');
|
||||
sidebarElems.parentNode.insertBefore(expandButton, sidebarElems);
|
||||
|
||||
// Initially hide the '.sidebar-elems'
|
||||
sidebarElems.style.display = 'none';
|
||||
|
||||
// Add click event listener to the button
|
||||
expandButton.addEventListener('click', function () {
|
||||
// Toggle the display of the '.sidebar-elems'
|
||||
if (sidebarElems.style.display === 'none') {
|
||||
sidebarElems.style.display = 'block';
|
||||
expandButton.innerText = 'Collapse';
|
||||
} else {
|
||||
sidebarElems.style.display = 'none';
|
||||
expandButton.innerText = 'Expand for More';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
window.addEventListener("DOMContentLoaded", (event) => {
|
||||
// if the crate is one that starts with `polkadot_sdk_docs`
|
||||
let crate_name = document.querySelector("#main-content > div > h1 > a:nth-child(1)");
|
||||
if (!crate_name.textContent.startsWith("polkadot_sdk_docs")) {
|
||||
console.log("skipping -- not `polkadot_sdk_docs`");
|
||||
return;
|
||||
} else {
|
||||
// insert class 'sdk-docs' to the body, so it enables the custom css rules.
|
||||
document.body.classList.add("sdk-docs");
|
||||
}
|
||||
|
||||
createToC();
|
||||
hideSidebarElements();
|
||||
|
||||
console.log("updating page based on being `polkadot_sdk_docs` crate");
|
||||
});
|
||||
</script>
|
||||
|
||||
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
|
||||
|
||||
<style>
|
||||
body.sdk-docs {
|
||||
nav.side-bar {
|
||||
width: 300px;
|
||||
}
|
||||
|
||||
.sidebar-table-of-contents {
|
||||
margin-bottom: 1em;
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
.sidebar-table-of-contents a {
|
||||
display: block;
|
||||
margin: 0.2em 0;
|
||||
}
|
||||
|
||||
.sidebar-table-of-contents .h2 {
|
||||
font-weight: bold;
|
||||
margin-left: 0;
|
||||
}
|
||||
|
||||
.sidebar-table-of-contents .h3 {
|
||||
margin-left: 1em;
|
||||
}
|
||||
|
||||
.sidebar-table-of-contents .h4 {
|
||||
margin-left: 2em;
|
||||
}
|
||||
|
||||
.sidebar h2.location {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.sidebar-elems {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Center the 'Expand for More' button */
|
||||
.expand-button {
|
||||
display: inline-block;
|
||||
/* Use inline-block for sizing */
|
||||
margin: 10px auto;
|
||||
/* Auto margins for horizontal centering */
|
||||
padding: 5px 10px;
|
||||
background-color: #007bff;
|
||||
color: white;
|
||||
text-align: center;
|
||||
cursor: pointer;
|
||||
border: none;
|
||||
border-radius: 5px;
|
||||
width: auto;
|
||||
/* Centering the button within its parent container */
|
||||
position: relative;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
}
|
||||
}
|
||||
</style>
|
||||
@@ -0,0 +1,57 @@
|
||||
:root {
|
||||
--polkadot-pink: #E6007A;
|
||||
--polkadot-green: #56F39A;
|
||||
--polkadot-lime: #D3FF33;
|
||||
--polkadot-cyan: #00B2FF;
|
||||
--polkadot-purple: #552BBF;
|
||||
}
|
||||
|
||||
/* Light theme */
|
||||
html[data-theme="light"] {
|
||||
--quote-background: #f9f9f9;
|
||||
--quote-border: #ccc;
|
||||
--quote-text: #333;
|
||||
}
|
||||
|
||||
/* Dark theme */
|
||||
html[data-theme="dark"] {
|
||||
--quote-background: #333;
|
||||
--quote-border: #555;
|
||||
--quote-text: #f9f9f9;
|
||||
}
|
||||
|
||||
/* Ayu theme */
|
||||
html[data-theme="ayu"] {
|
||||
--quote-background: #272822;
|
||||
--quote-border: #383830;
|
||||
--quote-text: #f8f8f2;
|
||||
}
|
||||
|
||||
body.sdk-docs {
|
||||
nav.sidebar>div.sidebar-crate>a>img {
|
||||
width: 190px;
|
||||
height: 52px;
|
||||
}
|
||||
|
||||
nav.sidebar {
|
||||
flex: 0 0 250px;
|
||||
}
|
||||
}
|
||||
|
||||
html[data-theme="light"] .sidebar-crate > .logo-container > img {
|
||||
content: url("https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/docs/images/Polkadot_Logo_Horizontal_Pink_Black.png");
|
||||
}
|
||||
|
||||
/* Custom styles for blockquotes */
|
||||
blockquote {
|
||||
background-color: var(--quote-background);
|
||||
border-left: 5px solid var(--quote-border);
|
||||
color: var(--quote-text);
|
||||
margin: 1em 0;
|
||||
padding: 1em 1.5em;
|
||||
/* font-style: italic; */
|
||||
}
|
||||
|
||||
blockquote p {
|
||||
margin: 0;
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "pezkuwi-sdk-docs-first-pallet"
|
||||
description = "A simple pallet created for the pezkuwi-sdk-docs guides"
|
||||
version = "0.0.0"
|
||||
license = "MIT-0"
|
||||
authors.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition.workspace = true
|
||||
publish = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
targets = ["x86_64-unknown-linux-gnu"]
|
||||
|
||||
[dependencies]
|
||||
codec = { workspace = true }
|
||||
docify = { workspace = true }
|
||||
frame = { workspace = true, features = ["runtime"] }
|
||||
scale-info = { workspace = true }
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = ["codec/std", "frame/std", "scale-info/std"]
|
||||
runtime-benchmarks = ["frame/runtime-benchmarks"]
|
||||
@@ -0,0 +1,481 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Pallets used in the `your_first_pallet` guide.
|
||||
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
#[docify::export]
|
||||
#[frame::pallet(dev_mode)]
|
||||
pub mod shell_pallet {
|
||||
use frame::prelude::*;
|
||||
|
||||
#[pallet::config]
|
||||
pub trait Config: frame_system::Config {}
|
||||
|
||||
#[pallet::pallet]
|
||||
pub struct Pallet<T>(_);
|
||||
}
|
||||
|
||||
#[frame::pallet(dev_mode)]
|
||||
pub mod pallet {
|
||||
use frame::prelude::*;
|
||||
|
||||
#[docify::export]
|
||||
pub type Balance = u128;
|
||||
|
||||
#[pallet::config]
|
||||
pub trait Config: frame_system::Config {}
|
||||
|
||||
#[pallet::pallet]
|
||||
pub struct Pallet<T>(_);
|
||||
|
||||
#[docify::export]
|
||||
/// Single storage item, of type `Balance`.
|
||||
#[pallet::storage]
|
||||
pub type TotalIssuance<T: Config> = StorageValue<_, Balance>;
|
||||
|
||||
#[docify::export]
|
||||
/// A mapping from `T::AccountId` to `Balance`
|
||||
#[pallet::storage]
|
||||
pub type Balances<T: Config> = StorageMap<_, _, T::AccountId, Balance>;
|
||||
|
||||
#[docify::export(impl_pallet)]
|
||||
#[pallet::call]
|
||||
impl<T: Config> Pallet<T> {
|
||||
/// An unsafe mint that can be called by anyone. Not a great idea.
|
||||
pub fn mint_unsafe(
|
||||
origin: T::RuntimeOrigin,
|
||||
dest: T::AccountId,
|
||||
amount: Balance,
|
||||
) -> DispatchResult {
|
||||
// ensure that this is a signed account, but we don't really check `_anyone`.
|
||||
let _anyone = ensure_signed(origin)?;
|
||||
|
||||
// update the balances map. Notice how all `<T: Config>` remains as `<T>`.
|
||||
Balances::<T>::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount));
|
||||
// update total issuance.
|
||||
TotalIssuance::<T>::mutate(|t| *t = Some(t.unwrap_or(0) + amount));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Transfer `amount` from `origin` to `dest`.
|
||||
pub fn transfer(
|
||||
origin: T::RuntimeOrigin,
|
||||
dest: T::AccountId,
|
||||
amount: Balance,
|
||||
) -> DispatchResult {
|
||||
let sender = ensure_signed(origin)?;
|
||||
|
||||
// ensure sender has enough balance, and if so, calculate what is left after `amount`.
|
||||
let sender_balance = Balances::<T>::get(&sender).ok_or("NonExistentAccount")?;
|
||||
if sender_balance < amount {
|
||||
return Err("InsufficientBalance".into());
|
||||
}
|
||||
let remainder = sender_balance - amount;
|
||||
|
||||
// update sender and dest balances.
|
||||
Balances::<T>::mutate(dest, |b| *b = Some(b.unwrap_or(0) + amount));
|
||||
Balances::<T>::insert(&sender, remainder);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
impl<T: Config> Pallet<T> {
|
||||
#[docify::export]
|
||||
pub fn transfer_better(
|
||||
origin: T::RuntimeOrigin,
|
||||
dest: T::AccountId,
|
||||
amount: Balance,
|
||||
) -> DispatchResult {
|
||||
let sender = ensure_signed(origin)?;
|
||||
|
||||
let sender_balance = Balances::<T>::get(&sender).ok_or("NonExistentAccount")?;
|
||||
ensure!(sender_balance >= amount, "InsufficientBalance");
|
||||
let remainder = sender_balance - amount;
|
||||
|
||||
// .. snip
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
/// Transfer `amount` from `origin` to `dest`.
|
||||
pub fn transfer_better_checked(
|
||||
origin: T::RuntimeOrigin,
|
||||
dest: T::AccountId,
|
||||
amount: Balance,
|
||||
) -> DispatchResult {
|
||||
let sender = ensure_signed(origin)?;
|
||||
|
||||
let sender_balance = Balances::<T>::get(&sender).ok_or("NonExistentAccount")?;
|
||||
let remainder = sender_balance.checked_sub(amount).ok_or("InsufficientBalance")?;
|
||||
|
||||
// .. snip
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, doc))]
|
||||
pub(crate) mod tests {
|
||||
use crate::pallet::*;
|
||||
|
||||
#[docify::export(testing_prelude)]
|
||||
use frame::testing_prelude::*;
|
||||
|
||||
pub(crate) const ALICE: u64 = 1;
|
||||
pub(crate) const BOB: u64 = 2;
|
||||
pub(crate) const CHARLIE: u64 = 3;
|
||||
|
||||
#[docify::export]
|
||||
// This runtime is only used for testing, so it should be somewhere like `#[cfg(test)] mod
|
||||
// tests { .. }`
|
||||
mod runtime {
|
||||
use super::*;
|
||||
// we need to reference our `mod pallet` as an identifier to pass to
|
||||
// `construct_runtime`.
|
||||
// YOU HAVE TO CHANGE THIS LINE BASED ON YOUR TEMPLATE
|
||||
use crate::pallet as pallet_currency;
|
||||
|
||||
construct_runtime!(
|
||||
pub enum Runtime {
|
||||
// ---^^^^^^ This is where `enum Runtime` is defined.
|
||||
System: frame_system,
|
||||
Currency: pallet_currency,
|
||||
}
|
||||
);
|
||||
|
||||
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
|
||||
impl frame_system::Config for Runtime {
|
||||
type Block = MockBlock<Runtime>;
|
||||
// within pallet we just said `<T as frame_system::Config>::AccountId`, now we
|
||||
// finally specified it.
|
||||
type AccountId = u64;
|
||||
}
|
||||
|
||||
// our simple pallet has nothing to be configured.
|
||||
impl pallet_currency::Config for Runtime {}
|
||||
}
|
||||
|
||||
pub(crate) use runtime::*;
|
||||
|
||||
#[allow(unused)]
|
||||
#[docify::export]
|
||||
fn new_test_state_basic() -> TestState {
|
||||
let mut state = TestState::new_empty();
|
||||
let accounts = vec![(ALICE, 100), (BOB, 100)];
|
||||
state.execute_with(|| {
|
||||
for (who, amount) in &accounts {
|
||||
Balances::<Runtime>::insert(who, amount);
|
||||
TotalIssuance::<Runtime>::mutate(|b| *b = Some(b.unwrap_or(0) + amount));
|
||||
}
|
||||
});
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
pub(crate) struct StateBuilder {
|
||||
balances: Vec<(<Runtime as frame_system::Config>::AccountId, Balance)>,
|
||||
}
|
||||
|
||||
#[docify::export(default_state_builder)]
|
||||
impl Default for StateBuilder {
|
||||
fn default() -> Self {
|
||||
Self { balances: vec![(ALICE, 100), (BOB, 100)] }
|
||||
}
|
||||
}
|
||||
|
||||
#[docify::export(impl_state_builder_add)]
|
||||
impl StateBuilder {
|
||||
fn add_balance(
|
||||
mut self,
|
||||
who: <Runtime as frame_system::Config>::AccountId,
|
||||
amount: Balance,
|
||||
) -> Self {
|
||||
self.balances.push((who, amount));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[docify::export(impl_state_builder_build)]
|
||||
impl StateBuilder {
|
||||
pub(crate) fn build_and_execute(self, test: impl FnOnce() -> ()) {
|
||||
let mut ext = TestState::new_empty();
|
||||
ext.execute_with(|| {
|
||||
for (who, amount) in &self.balances {
|
||||
Balances::<Runtime>::insert(who, amount);
|
||||
TotalIssuance::<Runtime>::mutate(|b| *b = Some(b.unwrap_or(0) + amount));
|
||||
}
|
||||
});
|
||||
|
||||
ext.execute_with(test);
|
||||
|
||||
// assertions that must always hold
|
||||
ext.execute_with(|| {
|
||||
assert_eq!(
|
||||
Balances::<Runtime>::iter().map(|(_, x)| x).sum::<u128>(),
|
||||
TotalIssuance::<Runtime>::get().unwrap_or_default()
|
||||
);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn first_test() {
|
||||
TestState::new_empty().execute_with(|| {
|
||||
// We expect Alice's account to have no funds.
|
||||
assert_eq!(Balances::<Runtime>::get(&ALICE), None);
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), None);
|
||||
|
||||
// mint some funds into Alice's account.
|
||||
assert_ok!(Pallet::<Runtime>::mint_unsafe(
|
||||
RuntimeOrigin::signed(ALICE),
|
||||
ALICE,
|
||||
100
|
||||
));
|
||||
|
||||
// re-check the above
|
||||
assert_eq!(Balances::<Runtime>::get(&ALICE), Some(100));
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(100));
|
||||
})
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn state_builder_works() {
|
||||
StateBuilder::default().build_and_execute(|| {
|
||||
assert_eq!(Balances::<Runtime>::get(&ALICE), Some(100));
|
||||
assert_eq!(Balances::<Runtime>::get(&BOB), Some(100));
|
||||
assert_eq!(Balances::<Runtime>::get(&CHARLIE), None);
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(200));
|
||||
});
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn state_builder_add_balance() {
|
||||
StateBuilder::default().add_balance(CHARLIE, 42).build_and_execute(|| {
|
||||
assert_eq!(Balances::<Runtime>::get(&CHARLIE), Some(42));
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(242));
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn state_builder_duplicate_genesis_fails() {
|
||||
StateBuilder::default()
|
||||
.add_balance(CHARLIE, 42)
|
||||
.add_balance(CHARLIE, 43)
|
||||
.build_and_execute(|| {
|
||||
assert_eq!(Balances::<Runtime>::get(&CHARLIE), None);
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(242));
|
||||
})
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn mint_works() {
|
||||
StateBuilder::default().build_and_execute(|| {
|
||||
// given the initial state, when:
|
||||
assert_ok!(Pallet::<Runtime>::mint_unsafe(RuntimeOrigin::signed(ALICE), BOB, 100));
|
||||
|
||||
// then:
|
||||
assert_eq!(Balances::<Runtime>::get(&BOB), Some(200));
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(300));
|
||||
|
||||
// given:
|
||||
assert_ok!(Pallet::<Runtime>::mint_unsafe(
|
||||
RuntimeOrigin::signed(ALICE),
|
||||
CHARLIE,
|
||||
100
|
||||
));
|
||||
|
||||
// then:
|
||||
assert_eq!(Balances::<Runtime>::get(&CHARLIE), Some(100));
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(400));
|
||||
});
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn transfer_works() {
|
||||
StateBuilder::default().build_and_execute(|| {
|
||||
// given the initial state, when:
|
||||
assert_ok!(Pallet::<Runtime>::transfer(RuntimeOrigin::signed(ALICE), BOB, 50));
|
||||
|
||||
// then:
|
||||
assert_eq!(Balances::<Runtime>::get(&ALICE), Some(50));
|
||||
assert_eq!(Balances::<Runtime>::get(&BOB), Some(150));
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(200));
|
||||
|
||||
// when:
|
||||
assert_ok!(Pallet::<Runtime>::transfer(RuntimeOrigin::signed(BOB), ALICE, 50));
|
||||
|
||||
// then:
|
||||
assert_eq!(Balances::<Runtime>::get(&ALICE), Some(100));
|
||||
assert_eq!(Balances::<Runtime>::get(&BOB), Some(100));
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(200));
|
||||
});
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[test]
|
||||
fn transfer_from_non_existent_fails() {
|
||||
StateBuilder::default().build_and_execute(|| {
|
||||
// given the initial state, when:
|
||||
assert_err!(
|
||||
Pallet::<Runtime>::transfer(RuntimeOrigin::signed(CHARLIE), ALICE, 10),
|
||||
"NonExistentAccount"
|
||||
);
|
||||
|
||||
// then nothing has changed.
|
||||
assert_eq!(Balances::<Runtime>::get(&ALICE), Some(100));
|
||||
assert_eq!(Balances::<Runtime>::get(&BOB), Some(100));
|
||||
assert_eq!(Balances::<Runtime>::get(&CHARLIE), None);
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(200));
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[frame::pallet(dev_mode)]
|
||||
pub mod pallet_v2 {
|
||||
use super::pallet::Balance;
|
||||
use frame::prelude::*;
|
||||
|
||||
#[docify::export(config_v2)]
|
||||
#[pallet::config]
|
||||
pub trait Config: frame_system::Config {
|
||||
/// The overarching event type of the runtime.
|
||||
#[allow(deprecated)]
|
||||
type RuntimeEvent: From<Event<Self>>
|
||||
+ IsType<<Self as frame_system::Config>::RuntimeEvent>
|
||||
+ TryInto<Event<Self>>;
|
||||
}
|
||||
|
||||
#[pallet::pallet]
|
||||
pub struct Pallet<T>(_);
|
||||
|
||||
#[pallet::storage]
|
||||
pub type Balances<T: Config> = StorageMap<_, _, T::AccountId, Balance>;
|
||||
|
||||
#[pallet::storage]
|
||||
pub type TotalIssuance<T: Config> = StorageValue<_, Balance>;
|
||||
|
||||
#[docify::export]
|
||||
#[pallet::error]
|
||||
pub enum Error<T> {
|
||||
/// Account does not exist.
|
||||
NonExistentAccount,
|
||||
/// Account does not have enough balance.
|
||||
InsufficientBalance,
|
||||
}
|
||||
|
||||
#[docify::export]
|
||||
#[pallet::event]
|
||||
#[pallet::generate_deposit(pub(super) fn deposit_event)]
|
||||
pub enum Event<T: Config> {
|
||||
/// A transfer succeeded.
|
||||
Transferred { from: T::AccountId, to: T::AccountId, amount: Balance },
|
||||
}
|
||||
|
||||
#[pallet::call]
|
||||
impl<T: Config> Pallet<T> {
|
||||
#[docify::export(transfer_v2)]
|
||||
pub fn transfer(
|
||||
origin: T::RuntimeOrigin,
|
||||
dest: T::AccountId,
|
||||
amount: Balance,
|
||||
) -> DispatchResult {
|
||||
let sender = ensure_signed(origin)?;
|
||||
|
||||
// ensure sender has enough balance, and if so, calculate what is left after `amount`.
|
||||
let sender_balance =
|
||||
Balances::<T>::get(&sender).ok_or(Error::<T>::NonExistentAccount)?;
|
||||
let remainder =
|
||||
sender_balance.checked_sub(amount).ok_or(Error::<T>::InsufficientBalance)?;
|
||||
|
||||
Balances::<T>::mutate(&dest, |b| *b = Some(b.unwrap_or(0) + amount));
|
||||
Balances::<T>::insert(&sender, remainder);
|
||||
|
||||
Self::deposit_event(Event::<T>::Transferred { from: sender, to: dest, amount });
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(any(test, doc))]
|
||||
pub mod tests {
|
||||
use super::{super::pallet::tests::StateBuilder, *};
|
||||
use frame::testing_prelude::*;
|
||||
const ALICE: u64 = 1;
|
||||
const BOB: u64 = 2;
|
||||
|
||||
#[docify::export]
|
||||
pub mod runtime_v2 {
|
||||
use super::*;
|
||||
use crate::pallet_v2 as pallet_currency;
|
||||
|
||||
construct_runtime!(
|
||||
pub enum Runtime {
|
||||
System: frame_system,
|
||||
Currency: pallet_currency,
|
||||
}
|
||||
);
|
||||
|
||||
#[derive_impl(frame_system::config_preludes::TestDefaultConfig)]
|
||||
impl frame_system::Config for Runtime {
|
||||
type Block = MockBlock<Runtime>;
|
||||
type AccountId = u64;
|
||||
}
|
||||
|
||||
impl pallet_currency::Config for Runtime {
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) use runtime_v2::*;
|
||||
|
||||
#[docify::export(transfer_works_v2)]
|
||||
#[test]
|
||||
fn transfer_works() {
|
||||
StateBuilder::default().build_and_execute(|| {
|
||||
// skip the genesis block, as events are not deposited there and we need them for
|
||||
// the final assertion.
|
||||
System::set_block_number(ALICE);
|
||||
|
||||
// given the initial state, when:
|
||||
assert_ok!(Pallet::<Runtime>::transfer(RuntimeOrigin::signed(ALICE), BOB, 50));
|
||||
|
||||
// then:
|
||||
assert_eq!(Balances::<Runtime>::get(&ALICE), Some(50));
|
||||
assert_eq!(Balances::<Runtime>::get(&BOB), Some(150));
|
||||
assert_eq!(TotalIssuance::<Runtime>::get(), Some(200));
|
||||
|
||||
// now we can also check that an event has been deposited:
|
||||
assert_eq!(
|
||||
System::read_events_for_pallet::<Event<Runtime>>(),
|
||||
vec![Event::Transferred { from: ALICE, to: BOB, amount: 50 }]
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,71 @@
|
||||
[package]
|
||||
name = "pezkuwi-sdk-docs-first-runtime"
|
||||
description = "A simple runtime created for the pezkuwi-sdk-docs guides"
|
||||
version = "0.0.0"
|
||||
license = "MIT-0"
|
||||
authors.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
edition.workspace = true
|
||||
publish = false
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
codec = { workspace = true }
|
||||
scale-info = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
# this is a frame-based runtime, thus importing `frame` with runtime feature enabled.
|
||||
frame = { workspace = true, features = ["runtime"] }
|
||||
|
||||
# pallets that we want to use
|
||||
pallet-balances = { workspace = true }
|
||||
pallet-sudo = { workspace = true }
|
||||
pallet-timestamp = { workspace = true }
|
||||
pallet-transaction-payment = { workspace = true }
|
||||
pallet-transaction-payment-rpc-runtime-api = { workspace = true }
|
||||
|
||||
# other pezkuwi-sdk-deps
|
||||
sp-keyring = { workspace = true }
|
||||
|
||||
# local pallet templates
|
||||
first-pallet = { workspace = true }
|
||||
|
||||
docify = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
substrate-wasm-builder = { workspace = true, optional = true }
|
||||
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = [
|
||||
"codec/std",
|
||||
"scale-info/std",
|
||||
"serde_json/std",
|
||||
|
||||
"frame/std",
|
||||
|
||||
"pallet-balances/std",
|
||||
"pallet-sudo/std",
|
||||
"pallet-timestamp/std",
|
||||
"pallet-transaction-payment-rpc-runtime-api/std",
|
||||
"pallet-transaction-payment/std",
|
||||
|
||||
"first-pallet/std",
|
||||
"sp-keyring/std",
|
||||
|
||||
"substrate-wasm-builder",
|
||||
]
|
||||
runtime-benchmarks = [
|
||||
"first-pallet/runtime-benchmarks",
|
||||
"frame/runtime-benchmarks",
|
||||
"pallet-balances/runtime-benchmarks",
|
||||
"pallet-sudo/runtime-benchmarks",
|
||||
"pallet-timestamp/runtime-benchmarks",
|
||||
"pallet-transaction-payment-rpc-runtime-api/runtime-benchmarks",
|
||||
"pallet-transaction-payment/runtime-benchmarks",
|
||||
"sp-keyring/runtime-benchmarks",
|
||||
"substrate-wasm-builder?/runtime-benchmarks",
|
||||
]
|
||||
@@ -0,0 +1,27 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
#[cfg(feature = "std")]
|
||||
{
|
||||
substrate_wasm_builder::WasmBuilder::new()
|
||||
.with_current_project()
|
||||
.export_heap_base()
|
||||
.import_memory()
|
||||
.build();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,299 @@
|
||||
// This file is part of Substrate.
|
||||
|
||||
// Copyright (C) Parity Technologies (UK) Ltd.
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Runtime used in `your_first_runtime`.
|
||||
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
extern crate alloc;
|
||||
use alloc::{vec, vec::Vec};
|
||||
use first_pallet::pallet_v2 as our_first_pallet;
|
||||
use frame::{
|
||||
prelude::*,
|
||||
runtime::{apis, prelude::*},
|
||||
};
|
||||
use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, RuntimeDispatchInfo};
|
||||
|
||||
#[docify::export]
|
||||
#[runtime_version]
|
||||
pub const VERSION: RuntimeVersion = RuntimeVersion {
|
||||
spec_name: alloc::borrow::Cow::Borrowed("first-runtime"),
|
||||
impl_name: alloc::borrow::Cow::Borrowed("first-runtime"),
|
||||
authoring_version: 1,
|
||||
spec_version: 0,
|
||||
impl_version: 1,
|
||||
apis: RUNTIME_API_VERSIONS,
|
||||
transaction_version: 1,
|
||||
system_version: 1,
|
||||
};
|
||||
|
||||
#[docify::export(cr)]
|
||||
construct_runtime!(
|
||||
pub struct Runtime {
|
||||
// Mandatory for all runtimes
|
||||
System: frame_system,
|
||||
|
||||
// A number of other pallets from FRAME.
|
||||
Timestamp: pallet_timestamp,
|
||||
Balances: pallet_balances,
|
||||
Sudo: pallet_sudo,
|
||||
TransactionPayment: pallet_transaction_payment,
|
||||
|
||||
// Our local pallet
|
||||
FirstPallet: our_first_pallet,
|
||||
}
|
||||
);
|
||||
|
||||
#[docify::export_content]
|
||||
mod runtime_types {
|
||||
use super::*;
|
||||
pub(super) type SignedExtra = (
|
||||
// `frame` already provides all the signed extensions from `frame-system`. We just add the
|
||||
// one related to tx-payment here.
|
||||
frame::runtime::types_common::SystemTransactionExtensionsOf<Runtime>,
|
||||
pallet_transaction_payment::ChargeTransactionPayment<Runtime>,
|
||||
);
|
||||
|
||||
pub(super) type Block = frame::runtime::types_common::BlockOf<Runtime, SignedExtra>;
|
||||
pub(super) type Header = HeaderFor<Runtime>;
|
||||
|
||||
pub(super) type RuntimeExecutive = Executive<
|
||||
Runtime,
|
||||
Block,
|
||||
frame_system::ChainContext<Runtime>,
|
||||
Runtime,
|
||||
AllPalletsWithSystem,
|
||||
>;
|
||||
}
|
||||
use runtime_types::*;
|
||||
|
||||
#[docify::export_content]
|
||||
mod config_impls {
|
||||
use super::*;
|
||||
|
||||
parameter_types! {
|
||||
pub const Version: RuntimeVersion = VERSION;
|
||||
}
|
||||
|
||||
#[derive_impl(frame_system::config_preludes::SolochainDefaultConfig)]
|
||||
impl frame_system::Config for Runtime {
|
||||
type Block = Block;
|
||||
type Version = Version;
|
||||
type AccountData =
|
||||
pallet_balances::AccountData<<Runtime as pallet_balances::Config>::Balance>;
|
||||
}
|
||||
|
||||
#[derive_impl(pallet_balances::config_preludes::TestDefaultConfig)]
|
||||
impl pallet_balances::Config for Runtime {
|
||||
type AccountStore = System;
|
||||
}
|
||||
|
||||
#[derive_impl(pallet_sudo::config_preludes::TestDefaultConfig)]
|
||||
impl pallet_sudo::Config for Runtime {}
|
||||
|
||||
#[derive_impl(pallet_timestamp::config_preludes::TestDefaultConfig)]
|
||||
impl pallet_timestamp::Config for Runtime {}
|
||||
|
||||
#[derive_impl(pallet_transaction_payment::config_preludes::TestDefaultConfig)]
|
||||
impl pallet_transaction_payment::Config for Runtime {
|
||||
type OnChargeTransaction = pallet_transaction_payment::FungibleAdapter<Balances, ()>;
|
||||
// We specify a fixed length to fee here, which essentially means all transactions charge
|
||||
// exactly 1 unit of fee.
|
||||
type LengthToFee = FixedFee<1, <Self as pallet_balances::Config>::Balance>;
|
||||
type WeightToFee = NoFee<<Self as pallet_balances::Config>::Balance>;
|
||||
}
|
||||
}
|
||||
|
||||
#[docify::export(our_config_impl)]
|
||||
impl our_first_pallet::Config for Runtime {
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
}
|
||||
|
||||
/// Provides getters for genesis configuration presets.
|
||||
pub mod genesis_config_presets {
|
||||
use super::*;
|
||||
use crate::{
|
||||
interface::{Balance, MinimumBalance},
|
||||
BalancesConfig, RuntimeGenesisConfig, SudoConfig,
|
||||
};
|
||||
use frame::deps::frame_support::build_struct_json_patch;
|
||||
use serde_json::Value;
|
||||
|
||||
/// Returns a development genesis config preset.
|
||||
#[docify::export]
|
||||
pub fn development_config_genesis() -> Value {
|
||||
let endowment = <MinimumBalance as Get<Balance>>::get().max(1) * 1000;
|
||||
build_struct_json_patch!(RuntimeGenesisConfig {
|
||||
balances: BalancesConfig {
|
||||
balances: Sr25519Keyring::iter()
|
||||
.map(|a| (a.to_account_id(), endowment))
|
||||
.collect::<Vec<_>>(),
|
||||
},
|
||||
sudo: SudoConfig { key: Some(Sr25519Keyring::Alice.to_account_id()) },
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the set of the available genesis config presets.
|
||||
#[docify::export]
|
||||
pub fn get_preset(id: &PresetId) -> Option<Vec<u8>> {
|
||||
let patch = match id.as_ref() {
|
||||
DEV_RUNTIME_PRESET => development_config_genesis(),
|
||||
_ => return None,
|
||||
};
|
||||
Some(
|
||||
serde_json::to_string(&patch)
|
||||
.expect("serialization to json is expected to work. qed.")
|
||||
.into_bytes(),
|
||||
)
|
||||
}
|
||||
|
||||
/// List of supported presets.
|
||||
#[docify::export]
|
||||
pub fn preset_names() -> Vec<PresetId> {
|
||||
vec![PresetId::from(DEV_RUNTIME_PRESET)]
|
||||
}
|
||||
}
|
||||
|
||||
impl_runtime_apis! {
|
||||
impl apis::Core<Block> for Runtime {
|
||||
fn version() -> RuntimeVersion {
|
||||
VERSION
|
||||
}
|
||||
|
||||
fn execute_block(block: <Block as frame::traits::Block>::LazyBlock) {
|
||||
RuntimeExecutive::execute_block(block)
|
||||
}
|
||||
|
||||
fn initialize_block(header: &Header) -> ExtrinsicInclusionMode {
|
||||
RuntimeExecutive::initialize_block(header)
|
||||
}
|
||||
}
|
||||
|
||||
impl apis::Metadata<Block> for Runtime {
|
||||
fn metadata() -> OpaqueMetadata {
|
||||
OpaqueMetadata::new(Runtime::metadata().into())
|
||||
}
|
||||
|
||||
fn metadata_at_version(version: u32) -> Option<OpaqueMetadata> {
|
||||
Runtime::metadata_at_version(version)
|
||||
}
|
||||
|
||||
fn metadata_versions() -> Vec<u32> {
|
||||
Runtime::metadata_versions()
|
||||
}
|
||||
}
|
||||
|
||||
impl apis::BlockBuilder<Block> for Runtime {
|
||||
fn apply_extrinsic(extrinsic: ExtrinsicFor<Runtime>) -> ApplyExtrinsicResult {
|
||||
RuntimeExecutive::apply_extrinsic(extrinsic)
|
||||
}
|
||||
|
||||
fn finalize_block() -> HeaderFor<Runtime> {
|
||||
RuntimeExecutive::finalize_block()
|
||||
}
|
||||
|
||||
fn inherent_extrinsics(data: InherentData) -> Vec<ExtrinsicFor<Runtime>> {
|
||||
data.create_extrinsics()
|
||||
}
|
||||
|
||||
fn check_inherents(
|
||||
block: <Block as frame::traits::Block>::LazyBlock,
|
||||
data: InherentData,
|
||||
) -> CheckInherentsResult {
|
||||
data.check_extrinsics(&block)
|
||||
}
|
||||
}
|
||||
|
||||
impl apis::TaggedTransactionQueue<Block> for Runtime {
|
||||
fn validate_transaction(
|
||||
source: TransactionSource,
|
||||
tx: ExtrinsicFor<Runtime>,
|
||||
block_hash: <Runtime as frame_system::Config>::Hash,
|
||||
) -> TransactionValidity {
|
||||
RuntimeExecutive::validate_transaction(source, tx, block_hash)
|
||||
}
|
||||
}
|
||||
|
||||
impl apis::OffchainWorkerApi<Block> for Runtime {
|
||||
fn offchain_worker(header: &HeaderFor<Runtime>) {
|
||||
RuntimeExecutive::offchain_worker(header)
|
||||
}
|
||||
}
|
||||
|
||||
impl apis::SessionKeys<Block> for Runtime {
|
||||
fn generate_session_keys(_seed: Option<Vec<u8>>) -> Vec<u8> {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
fn decode_session_keys(
|
||||
_encoded: Vec<u8>,
|
||||
) -> Option<Vec<(Vec<u8>, apis::KeyTypeId)>> {
|
||||
Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl apis::AccountNonceApi<Block, interface::AccountId, interface::Nonce> for Runtime {
|
||||
fn account_nonce(account: interface::AccountId) -> interface::Nonce {
|
||||
System::account_nonce(account)
|
||||
}
|
||||
}
|
||||
|
||||
impl apis::GenesisBuilder<Block> for Runtime {
|
||||
fn build_state(config: Vec<u8>) -> GenesisBuilderResult {
|
||||
build_state::<RuntimeGenesisConfig>(config)
|
||||
}
|
||||
|
||||
fn get_preset(id: &Option<PresetId>) -> Option<Vec<u8>> {
|
||||
get_preset::<RuntimeGenesisConfig>(id, self::genesis_config_presets::get_preset)
|
||||
}
|
||||
|
||||
fn preset_names() -> Vec<PresetId> {
|
||||
crate::genesis_config_presets::preset_names()
|
||||
}
|
||||
}
|
||||
|
||||
impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi<
|
||||
Block,
|
||||
interface::Balance,
|
||||
> for Runtime {
|
||||
fn query_info(uxt: ExtrinsicFor<Runtime>, len: u32) -> RuntimeDispatchInfo<interface::Balance> {
|
||||
TransactionPayment::query_info(uxt, len)
|
||||
}
|
||||
fn query_fee_details(uxt: ExtrinsicFor<Runtime>, len: u32) -> FeeDetails<interface::Balance> {
|
||||
TransactionPayment::query_fee_details(uxt, len)
|
||||
}
|
||||
fn query_weight_to_fee(weight: Weight) -> interface::Balance {
|
||||
TransactionPayment::weight_to_fee(weight)
|
||||
}
|
||||
fn query_length_to_fee(length: u32) -> interface::Balance {
|
||||
TransactionPayment::length_to_fee(length)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Just a handy re-definition of some types based on what is already provided to the pallet
|
||||
/// configs.
|
||||
pub mod interface {
|
||||
use super::Runtime;
|
||||
use frame::prelude::frame_system;
|
||||
|
||||
pub type AccountId = <Runtime as frame_system::Config>::AccountId;
|
||||
pub type Nonce = <Runtime as frame_system::Config>::Nonce;
|
||||
pub type Hash = <Runtime as frame_system::Config>::Hash;
|
||||
pub type Balance = <Runtime as pallet_balances::Config>::Balance;
|
||||
pub type MinimumBalance = <Runtime as pallet_balances::Config>::ExistentialDeposit;
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
//! # External Resources
|
||||
//!
|
||||
//! A non-exhaustive, un-opinionated list of external resources about Pezkuwi SDK.
|
||||
//!
|
||||
//! Unlike [`crate::guides`], or [`crate::pezkuwi_sdk::templates`] that contain material directly
|
||||
//! maintained in the `pezkuwi-sdk` repository, the list of resources here are maintained by
|
||||
//! third-parties, and are therefore subject to more variability. Any further resources may be added
|
||||
//! by opening a pull request to the `pezkuwi-sdk` repository.
|
||||
//!
|
||||
//! - [Pezkuwi NFT Marketplace Tutorial by Pezkuwi Fellow Shawn Tabrizi](https://www.shawntabrizi.com/substrate-collectables-workshop/)
|
||||
//! - [HEZ Code School](https://dotcodeschool.com/)
|
||||
//! - [Pezkuwi Developers Github Organization](https://github.com/polkadot-developers/)
|
||||
//! - [Pezkuwi Blockchain Academy](https://github.com/pezkuwichain/kurdistan_blockchain-akademy)
|
||||
//! - [Pezkuwi Wiki](https://wiki.network.pezkuwichain.io/)
|
||||
@@ -0,0 +1,254 @@
|
||||
//! # Upgrade Teyrchain for Asynchronous Backing Compatibility
|
||||
//!
|
||||
//! This guide is relevant for cumulus based teyrchain projects started in 2023 or before, whose
|
||||
//! backing process is synchronous where parablocks can only be built on the latest Relay Chain
|
||||
//! block. Async Backing allows collators to build parablocks on older Relay Chain blocks and create
|
||||
//! pipelines of multiple pending parablocks. This parallel block generation increases efficiency
|
||||
//! and throughput. For more information on Async backing and its terminology, refer to the document
|
||||
//! on [the Pezkuwi SDK docs.](https://docs.pezkuwichain.io/sdk/master/polkadot_sdk_docs/guides/async_backing_guide/index.html)
|
||||
//!
|
||||
//! > If starting a new teyrchain project, please use an async backing compatible template such as
|
||||
//! > the
|
||||
//! > [teyrchain template](https://github.com/pezkuwichain/pezkuwi-sdk/tree/master/templates/teyrchain).
|
||||
//! The rollout process for Async Backing has three phases. Phases 1 and 2 below put new
|
||||
//! infrastructure in place. Then we can simply turn on async backing in phase 3.
|
||||
//!
|
||||
//! ## Prerequisite
|
||||
//!
|
||||
//! The relay chain needs to have async backing enabled so double-check that the relay-chain
|
||||
//! configuration contains the following three parameters (especially when testing locally e.g. with
|
||||
//! zombienet):
|
||||
//!
|
||||
//! ```json
|
||||
//! "async_backing_params": {
|
||||
//! "max_candidate_depth": 3,
|
||||
//! "allowed_ancestry_len": 2
|
||||
//! },
|
||||
//! "scheduling_lookahead": 2
|
||||
//! ```
|
||||
//!
|
||||
//! <div class="warning"><code>scheduling_lookahead</code> must be set to 2, otherwise teyrchain
|
||||
//! block times will degrade to worse than with sync backing!</div>
|
||||
//!
|
||||
//! ## Phase 1 - Update Teyrchain Runtime
|
||||
//!
|
||||
//! This phase involves configuring your teyrchain’s runtime `/runtime/src/lib.rs` to make use of
|
||||
//! async backing system.
|
||||
//!
|
||||
//! 1. Establish and ensure constants for `capacity` and `velocity` are both set to 1 in the
|
||||
//! runtime.
|
||||
//! 2. Establish and ensure the constant relay chain slot duration measured in milliseconds equal to
|
||||
//! `6000` in the runtime.
|
||||
//! ```rust
|
||||
//! // Maximum number of blocks simultaneously accepted by the Runtime, not yet included into the
|
||||
//! // relay chain.
|
||||
//! pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1;
|
||||
//! // How many teyrchain blocks are processed by the relay chain per parent. Limits the number of
|
||||
//! // blocks authored per slot.
|
||||
//! pub const BLOCK_PROCESSING_VELOCITY: u32 = 1;
|
||||
//! // Relay chain slot duration, in milliseconds.
|
||||
//! pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000;
|
||||
//! ```
|
||||
//!
|
||||
//! 3. Establish constants `MILLISECS_PER_BLOCK` and `SLOT_DURATION` if not already present in the
|
||||
//! runtime.
|
||||
//! ```ignore
|
||||
//! // `SLOT_DURATION` is picked up by `pallet_timestamp` which is in turn picked
|
||||
//! // up by `pallet_aura` to implement `fn slot_duration()`.
|
||||
//! //
|
||||
//! // Change this to adjust the block time.
|
||||
//! pub const MILLISECS_PER_BLOCK: u64 = 12000;
|
||||
//! pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK;
|
||||
//! ```
|
||||
//!
|
||||
//! 4. Configure `cumulus_pallet_teyrchain_system` in the runtime.
|
||||
//!
|
||||
//! - Define a `FixedVelocityConsensusHook` using our capacity, velocity, and relay slot duration
|
||||
//! constants. Use this to set the teyrchain system `ConsensusHook` property.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/lib.rs", ConsensusHook)]
|
||||
//! ```ignore
|
||||
//! impl cumulus_pallet_teyrchain_system::Config for Runtime {
|
||||
//! ..
|
||||
//! type ConsensusHook = ConsensusHook;
|
||||
//! ..
|
||||
//! }
|
||||
//! ```
|
||||
//! - Set the teyrchain system property `CheckAssociatedRelayNumber` to
|
||||
//! `RelayNumberMonotonicallyIncreases`
|
||||
//! ```ignore
|
||||
//! impl cumulus_pallet_teyrchain_system::Config for Runtime {
|
||||
//! ..
|
||||
//! type CheckAssociatedRelayNumber = RelayNumberMonotonicallyIncreases;
|
||||
//! ..
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! 5. Configure `pallet_aura` in the runtime.
|
||||
//!
|
||||
//! - Set `AllowMultipleBlocksPerSlot` to `false` (don't worry, we will set it to `true` when we
|
||||
//! activate async backing in phase 3).
|
||||
//!
|
||||
//! - Define `pallet_aura::SlotDuration` using our constant `SLOT_DURATION`
|
||||
//! ```ignore
|
||||
//! impl pallet_aura::Config for Runtime {
|
||||
//! ..
|
||||
//! type AllowMultipleBlocksPerSlot = ConstBool<false>;
|
||||
//! #[cfg(feature = "experimental")]
|
||||
//! type SlotDuration = ConstU64<SLOT_DURATION>;
|
||||
//! ..
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! 6. Update `sp_consensus_aura::AuraApi::slot_duration` in `sp_api::impl_runtime_apis` to match
|
||||
//! the constant `SLOT_DURATION`
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/apis.rs", impl_slot_duration)]
|
||||
//!
|
||||
//! 7. Implement the `AuraUnincludedSegmentApi`, which allows the collator client to query its
|
||||
//! runtime to determine whether it should author a block.
|
||||
//!
|
||||
//! - Add the dependency `cumulus-primitives-aura` to the `runtime/Cargo.toml` file for your
|
||||
//! runtime
|
||||
//! ```ignore
|
||||
//! ..
|
||||
//! cumulus-primitives-aura = { path = "../../../../primitives/aura", default-features = false }
|
||||
//! ..
|
||||
//! ```
|
||||
//!
|
||||
//! - In the same file, add `"cumulus-primitives-aura/std",` to the `std` feature.
|
||||
//!
|
||||
//! - Inside the `impl_runtime_apis!` block for your runtime, implement the
|
||||
//! `cumulus_primitives_aura::AuraUnincludedSegmentApi` as shown below.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/apis.rs", impl_can_build_upon)]
|
||||
//!
|
||||
//! **Note:** With a capacity of 1 we have an effective velocity of ½ even when velocity is
|
||||
//! configured to some larger value. This is because capacity will be filled after a single block is
|
||||
//! produced and will only be freed up after that block is included on the relay chain, which takes
|
||||
//! 2 relay blocks to accomplish. Thus with capacity 1 and velocity 1 we get the customary 12 second
|
||||
//! teyrchain block time.
|
||||
//!
|
||||
//! 8. If your `runtime/src/lib.rs` provides a `CheckInherents` type to `register_validate_block`,
|
||||
//! remove it. `FixedVelocityConsensusHook` makes it unnecessary. The following example shows how
|
||||
//! `register_validate_block` should look after removing `CheckInherents`.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/lib.rs", register_validate_block)]
|
||||
//!
|
||||
//!
|
||||
//! ## Phase 2 - Update Teyrchain Nodes
|
||||
//!
|
||||
//! This phase consists of plugging in the new lookahead collator node.
|
||||
//!
|
||||
//! 1. Import `cumulus_primitives_core::ValidationCode` to `node/src/service.rs`.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/node/src/service.rs", cumulus_primitives)]
|
||||
//!
|
||||
//! 2. In `node/src/service.rs`, modify `sc_service::spawn_tasks` to use a clone of `Backend` rather
|
||||
//! than the original
|
||||
//! ```ignore
|
||||
//! sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||
//! ..
|
||||
//! backend: backend.clone(),
|
||||
//! ..
|
||||
//! })?;
|
||||
//! ```
|
||||
//!
|
||||
//! 3. Add `backend` as a parameter to `start_consensus()` in `node/src/service.rs`
|
||||
//! ```text
|
||||
//! fn start_consensus(
|
||||
//! ..
|
||||
//! backend: Arc<TeyrchainBackend>,
|
||||
//! ..
|
||||
//! ```
|
||||
//! ```ignore
|
||||
//! if validator {
|
||||
//! start_consensus(
|
||||
//! ..
|
||||
//! backend.clone(),
|
||||
//! ..
|
||||
//! )?;
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! 4. In `node/src/service.rs` import the lookahead collator rather than the basic collator
|
||||
#![doc = docify::embed!("../../templates/teyrchain/node/src/service.rs", lookahead_collator)]
|
||||
//!
|
||||
//! 5. In `start_consensus()` replace the `BasicAuraParams` struct with `AuraParams`
|
||||
//! - Change the struct type from `BasicAuraParams` to `AuraParams`
|
||||
//! - In the `para_client` field, pass in a cloned para client rather than the original
|
||||
//! - Add a `para_backend` parameter after `para_client`, passing in our para backend
|
||||
//! - Provide a `code_hash_provider` closure like that shown below
|
||||
//! - Increase `authoring_duration` from 500 milliseconds to 2000
|
||||
//! ```ignore
|
||||
//! let params = AuraParams {
|
||||
//! ..
|
||||
//! para_client: client.clone(),
|
||||
//! para_backend: backend.clone(),
|
||||
//! ..
|
||||
//! code_hash_provider: move |block_hash| {
|
||||
//! client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash())
|
||||
//! },
|
||||
//! ..
|
||||
//! authoring_duration: Duration::from_millis(2000),
|
||||
//! ..
|
||||
//! };
|
||||
//! ```
|
||||
//!
|
||||
//! **Note:** Set `authoring_duration` to whatever you want, taking your own hardware into account.
|
||||
//! But if the backer who should be slower than you due to reading from disk, times out at two
|
||||
//! seconds your candidates will be rejected.
|
||||
//!
|
||||
//! 6. In `start_consensus()` replace `basic_aura::run` with `aura::run`
|
||||
//! ```ignore
|
||||
//! let fut =
|
||||
//! aura::run::<Block, sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _, _, _, _>(
|
||||
//! params,
|
||||
//! );
|
||||
//! task_manager.spawn_essential_handle().spawn("aura", None, fut);
|
||||
//! ```
|
||||
//!
|
||||
//! ## Phase 3 - Activate Async Backing
|
||||
//!
|
||||
//! This phase consists of changes to your teyrchain’s runtime that activate async backing feature.
|
||||
//!
|
||||
//! 1. Configure `pallet_aura`, setting `AllowMultipleBlocksPerSlot` to true in
|
||||
//! `runtime/src/lib.rs`.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/configs/mod.rs", aura_config)]
|
||||
//!
|
||||
//! 2. Increase the maximum `UNINCLUDED_SEGMENT_CAPACITY` in `runtime/src/lib.rs`.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/lib.rs", async_backing_params)]
|
||||
//!
|
||||
//! 3. Decrease `MILLISECS_PER_BLOCK` to 6000.
|
||||
//!
|
||||
//! - Note: For a teyrchain which measures time in terms of its own block number rather than by
|
||||
//! relay block number it may be preferable to increase velocity. Changing block time may cause
|
||||
//! complications, requiring additional changes. See the section “Timing by Block Number”.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/lib.rs", block_times)]
|
||||
//!
|
||||
//! 4. Update `MAXIMUM_BLOCK_WEIGHT` to reflect the increased time available for block production.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/lib.rs", max_block_weight)]
|
||||
//!
|
||||
//! 5. Add a feature flagged alternative for `MinimumPeriod` in `pallet_timestamp`. The type should
|
||||
//! be `ConstU64<0>` with the feature flag experimental, and `ConstU64<{SLOT_DURATION / 2}>`
|
||||
//! without.
|
||||
//! ```ignore
|
||||
//! impl pallet_timestamp::Config for Runtime {
|
||||
//! ..
|
||||
//! #[cfg(feature = "experimental")]
|
||||
//! type MinimumPeriod = ConstU64<0>;
|
||||
//! #[cfg(not(feature = "experimental"))]
|
||||
//! type MinimumPeriod = ConstU64<{ SLOT_DURATION / 2 }>;
|
||||
//! ..
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ## Timing by Block Number
|
||||
//!
|
||||
//! With asynchronous backing it will be possible for teyrchains to opt for a block time of 6
|
||||
//! seconds rather than 12 seconds. But modifying block duration isn’t so simple for a teyrchain
|
||||
//! which was measuring time in terms of its own block number. It could result in expected and
|
||||
//! actual time not matching up, stalling the teyrchain.
|
||||
//!
|
||||
//! One strategy to deal with this issue is to instead rely on relay chain block numbers for timing.
|
||||
//! Relay block number is kept track of by each teyrchain in `pallet-teyrchain-system` with the
|
||||
//! storage value `LastRelayChainBlockNumber`. This value can be obtained and used wherever timing
|
||||
//! based on block number is needed.
|
||||
|
||||
#![deny(rustdoc::broken_intra_doc_links)]
|
||||
#![deny(rustdoc::private_intra_doc_links)]
|
||||
@@ -0,0 +1 @@
|
||||
//! # Changing Consensus
|
||||
@@ -0,0 +1 @@
|
||||
//! # Cumulus Enabled Teyrchain
|
||||
@@ -0,0 +1,182 @@
|
||||
//! # Enable elastic scaling for a teyrchain
|
||||
//!
|
||||
//! <div class="warning">This guide assumes full familiarity with Asynchronous Backing and its
|
||||
//! terminology, as defined in <a href="https://docs.pezkuwichain.io/sdk/master/polkadot_sdk_docs/guides/async_backing_guide/index.html">the Pezkuwi SDK Docs</a>.
|
||||
//! </div>
|
||||
//!
|
||||
//! ## Quick introduction to Elastic Scaling
|
||||
//!
|
||||
//! [Elastic scaling](https://www.parity.io/blog/polkadot-web3-cloud) is a feature that enables teyrchains (rollups) to use multiple cores.
|
||||
//! Teyrchains can adjust their usage of core resources on the fly to increase TPS and decrease
|
||||
//! latency.
|
||||
//!
|
||||
//! ### When do you need Elastic Scaling?
|
||||
//!
|
||||
//! Depending on their use case, applications might have an increased need for the following:
|
||||
//! - compute (CPU weight)
|
||||
//! - bandwidth (proof size)
|
||||
//! - lower latency (block time)
|
||||
//!
|
||||
//! ### High throughput (TPS) and lower latency
|
||||
//!
|
||||
//! If the main bottleneck is the CPU, then your teyrchain needs to maximize the compute usage of
|
||||
//! each core while also achieving a lower latency.
|
||||
//! 3 cores provide the best balance between CPU, bandwidth and latency: up to 6s of execution,
|
||||
//! 5MB/s of DA bandwidth and fast block time of just 2 seconds.
|
||||
//!
|
||||
//! ### High bandwidth
|
||||
//!
|
||||
//! Useful for applications that are bottlenecked by bandwidth.
|
||||
//! By using 6 cores, applications can make use of up to 6s of compute, 10MB/s of bandwidth
|
||||
//! while also achieving 1 second block times.
|
||||
//!
|
||||
//! ### Ultra low latency
|
||||
//!
|
||||
//! When latency is the primary requirement, Elastic scaling is currently the only solution. The
|
||||
//! caveat is the efficiency of core time usage decreases as more cores are used.
|
||||
//!
|
||||
//! For example, using 12 cores enables fast transaction confirmations with 500ms blocks and up to
|
||||
//! 20 MB/s of DA bandwidth.
|
||||
//!
|
||||
//! ## Dependencies
|
||||
//!
|
||||
//! Prerequisites: Pezkuwi-SDK `2509` or newer.
|
||||
//!
|
||||
//! To ensure the security and reliability of your chain when using this feature you need the
|
||||
//! following:
|
||||
//! - An omni-node based collator. This has already become the default choice for collators.
|
||||
//! - UMP signal support.
|
||||
//! [RFC103](https://github.com/polkadot-fellows/RFCs/blob/main/text/0103-introduce-core-index-commitment.md).
|
||||
//! This is mandatory protection against PoV replay attacks.
|
||||
//! - Enabling the relay parent offset feature. This is required to ensure the teyrchain block times
|
||||
//! and transaction in-block confidence are not negatively affected by relay chain forks. Read
|
||||
//! [`crate::guides::handling_teyrchain_forks`] for more information.
|
||||
//! - Block production configuration adjustments.
|
||||
//!
|
||||
//! ### Upgrade to Pezkuwi Omni node
|
||||
//!
|
||||
//! Your collators need to run `pezkuwi-teyrchain` or `pezkuwi-omni-node` with the `--authoring
|
||||
//! slot-based` CLI argument.
|
||||
//! To avoid potential issues and get best performance it is recommeneded to always run the
|
||||
//! latest release on all of the collators.
|
||||
//!
|
||||
//! Further information about omni-node and how to upgrade is available:
|
||||
//! - [high level docs](https://docs.pezkuwichain.io/develop/toolkit/parachains/polkadot-omni-node/)
|
||||
//! - [`crate::reference_docs::omni_node`]
|
||||
//!
|
||||
//! ### UMP signals
|
||||
//!
|
||||
//! UMP signals are now enabled by default in the `teyrchain-system` pallet and are used for
|
||||
//! elastic scaling. You can find more technical details about UMP signals and their usage for
|
||||
//! elastic scaling
|
||||
//! [here](https://github.com/polkadot-fellows/RFCs/blob/main/text/0103-introduce-core-index-commitment.md).
|
||||
//!
|
||||
//! ### Enable the relay parent offset feature
|
||||
//!
|
||||
//! It is recommended to use an offset of `1`, which is sufficient to eliminate any issues
|
||||
//! with relay chain forks.
|
||||
//!
|
||||
//! Configure the relay parent offset like this:
|
||||
//! ```ignore
|
||||
//! /// Build with an offset of 1 behind the relay chain best block.
|
||||
//! const RELAY_PARENT_OFFSET: u32 = 1;
|
||||
//!
|
||||
//! impl cumulus_pallet_teyrchain_system::Config for Runtime {
|
||||
//! // ...
|
||||
//! type RelayParentOffset = ConstU32<RELAY_PARENT_OFFSET>;
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! Implement the runtime API to retrieve the offset on the client side.
|
||||
//! ```ignore
|
||||
//! impl cumulus_primitives_core::RelayParentOffsetApi<Block> for Runtime {
|
||||
//! fn relay_parent_offset() -> u32 {
|
||||
//! RELAY_PARENT_OFFSET
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! ### Block production configuration
|
||||
//!
|
||||
//! This configuration directly controls the minimum block time and maximum number of cores
|
||||
//! the teyrchain can use.
|
||||
//!
|
||||
//! Example configuration for a 3 core teyrchain:
|
||||
//! ```ignore
|
||||
//! /// The upper limit of how many teyrchain blocks are processed by the relay chain per
|
||||
//! /// parent. Limits the number of blocks authored per slot. This determines the minimum
|
||||
//! /// block time of the teyrchain:
|
||||
//! /// `RELAY_CHAIN_SLOT_DURATION_MILLIS/BLOCK_PROCESSING_VELOCITY`
|
||||
//! const BLOCK_PROCESSING_VELOCITY: u32 = 3;
|
||||
//!
|
||||
//! /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included
|
||||
//! /// into the relay chain.
|
||||
//! const UNINCLUDED_SEGMENT_CAPACITY: u32 = (2 + RELAY_PARENT_OFFSET) *
|
||||
//! BLOCK_PROCESSING_VELOCITY + 1;
|
||||
//!
|
||||
//! /// Relay chain slot duration, in milliseconds.
|
||||
//! const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000;
|
||||
//!
|
||||
//! type ConsensusHook = cumulus_pallet_aura_ext::FixedVelocityConsensusHook<
|
||||
//! Runtime,
|
||||
//! RELAY_CHAIN_SLOT_DURATION_MILLIS,
|
||||
//! BLOCK_PROCESSING_VELOCITY,
|
||||
//! UNINCLUDED_SEGMENT_CAPACITY,
|
||||
//! >;
|
||||
//!
|
||||
//! ```
|
||||
//!
|
||||
//! ### Teyrchain Slot Duration
|
||||
//!
|
||||
//! A common source of confusion is the correct configuration of the `SlotDuration` that is passed
|
||||
//! to `pallet-aura`.
|
||||
//! ```ignore
|
||||
//! impl pallet_aura::Config for Runtime {
|
||||
//! // ...
|
||||
//! type SlotDuration = ConstU64<SLOT_DURATION>;
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The slot duration determines the length of each author's turn and is decoupled from the block
|
||||
//! production interval. During their slot, authors are allowed to produce multiple blocks. **The
|
||||
//! slot duration is required to be at least 6s (same as on the relay chain).**
|
||||
//!
|
||||
//! **Configuration recommendations:**
|
||||
//! - For new teyrchains starting from genesis: use a slot duration of 24 seconds
|
||||
//! - For existing live teyrchains: leave the slot duration unchanged
|
||||
//!
|
||||
//!
|
||||
//! ## Current limitations
|
||||
//!
|
||||
//! ### Maximum execution time per relay chain block.
|
||||
//!
|
||||
//! Since teyrchain block authoring is sequential, the next block can only be built after
|
||||
//! the previous one has been imported.
|
||||
//! At present, a core allows up to 2 seconds of execution per relay chain block.
|
||||
//!
|
||||
//! If we assume a 6s teyrchain slot, and each block takes the full 2 seconds to execute,
|
||||
//! the teyrchain will not be able to fully utilize the compute resources of all 3 cores.
|
||||
//!
|
||||
//! If the collator hardware is faster, it can author and import full blocks more quickly,
|
||||
//! making it possible to utilize even more than 3 cores efficiently.
|
||||
//!
|
||||
//! #### Why?
|
||||
//!
|
||||
//! Within a 6-second teyrchain slot, collators can author multiple teyrchain blocks.
|
||||
//! Before building the first block in a slot, the new block author must import the last
|
||||
//! block produced by the previous author.
|
||||
//! If the import of the last block is not completed before the next relay chain slot starts,
|
||||
//! the new author will build on its parent (assuming it was imported). This will create a fork
|
||||
//! which degrades the teyrchain block confidence and block times.
|
||||
//!
|
||||
//! This means that, on reference hardware, a teyrchain with a slot time of 6s can
|
||||
//! effectively utilize up to 4 seconds of execution per relay chain block, because it needs to
|
||||
//! ensure the next block author has enough time to import the last block.
|
||||
//! Hardware with higher single-core performance can enable a teyrchain to fully utilize more
|
||||
//! cores.
|
||||
//!
|
||||
//! ### Fixed factor scaling.
|
||||
//!
|
||||
//! For true elasticity, a teyrchain needs to acquire more cores when needed in an automated
|
||||
//! manner. This functionality is not yet available in the SDK, thus acquiring additional
|
||||
//! on-demand or bulk cores has to be managed externally.
|
||||
@@ -0,0 +1,88 @@
|
||||
//! # Enable metadata hash verification
|
||||
//!
|
||||
//! This guide will teach you how to enable the metadata hash verification in your runtime.
|
||||
//!
|
||||
//! ## What is metadata hash verification?
|
||||
//!
|
||||
//! Each FRAME based runtime exposes metadata about itself. This metadata is used by consumers of
|
||||
//! the runtime to interpret the state, to construct transactions etc. Part of this metadata are the
|
||||
//! type information. These type information can be used to e.g. decode storage entries or to decode
|
||||
//! a transaction. So, the metadata is quite useful for wallets to interact with a FRAME based
|
||||
//! chain. Online wallets can fetch the metadata directly from any node of the chain they are
|
||||
//! connected to, but offline wallets can not do this. So, for the offline wallet to have access to
|
||||
//! the metadata it needs to be transferred and stored on the device. The problem is that the
|
||||
//! metadata has a size of several hundreds of kilobytes, which takes quite a while to transfer to
|
||||
//! these offline wallets and the internal storage of these devices is also not big enough to store
|
||||
//! the metadata for one or more networks. The next problem is that the offline wallet/user can not
|
||||
//! trust the metadata to be correct. It is very important for the metadata to be correct or
|
||||
//! otherwise an attacker could change them in a way that the offline wallet decodes a transaction
|
||||
//! in a different way than what it will be decoded to on chain. So, the user may sign an incorrect
|
||||
//! transaction leading to unexpected behavior.
|
||||
//!
|
||||
//! The metadata hash verification circumvents the issues of the huge metadata and the need to trust
|
||||
//! some metadata blob to be correct. To generate a hash for the metadata, the metadata is chunked,
|
||||
//! these chunks are put into a merkle tree and then the root of this merkle tree is the "metadata
|
||||
//! hash". For a more technical explanation on how it works, see
|
||||
//! [RFC78](https://polkadot-fellows.github.io/RFCs/approved/0078-merkleized-metadata.html). At compile
|
||||
//! time the metadata hash is generated and "baked" into the runtime. This makes it extremely cheap
|
||||
//! for the runtime to verify on chain that the metadata hash is correct. By having the runtime
|
||||
//! verify the hash on chain, the user also doesn't need to trust the offchain metadata. If the
|
||||
//! metadata hash doesn't match the on chain metadata hash the transaction will be rejected. The
|
||||
//! metadata hash itself is added to the data of the transaction that is signed, this means the
|
||||
//! actual hash does not appear in the transaction. On chain the same procedure is repeated with the
|
||||
//! metadata hash that is known by the runtime and if the metadata hash doesn't match the signature
|
||||
//! verification will fail. As the metadata hash is actually the root of a merkle tree, the offline
|
||||
//! wallet can get proofs of individual types to decode a transaction. This means that the offline
|
||||
//! wallet does not require the entire metadata to be present on the device.
|
||||
//!
|
||||
//! ## Integrating metadata hash verification into your runtime
|
||||
//!
|
||||
//! The integration of the metadata hash verification is split into two parts, first the actual
|
||||
//! integration into the runtime and secondly the enabling of the metadata hash generation at
|
||||
//! compile time.
|
||||
//!
|
||||
//! ### Runtime integration
|
||||
//!
|
||||
//! From the runtime side only the
|
||||
//! [`CheckMetadataHash`](frame_metadata_hash_extension::CheckMetadataHash) needs to be added to the
|
||||
//! list of signed extension:
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/lib.rs", template_signed_extra)]
|
||||
//!
|
||||
//! > **Note:**
|
||||
//! >
|
||||
//! > Adding the signed extension changes the encoding of the transaction and adds one extra byte
|
||||
//! > per transaction!
|
||||
//!
|
||||
//! This signed extension will make sure to decode the requested `mode` and will add the metadata
|
||||
//! hash to the signed data depending on the requested `mode`. The `mode` gives the user/wallet
|
||||
//! control over deciding if the metadata hash should be verified or not. The metadata hash itself
|
||||
//! is drawn from the `RUNTIME_METADATA_HASH` environment variable. If the environment variable is
|
||||
//! not set, any transaction that requires the metadata hash is rejected with the error
|
||||
//! `CannotLookup`. This is a security measurement to prevent including invalid transactions.
|
||||
//!
|
||||
//! <div class="warning">
|
||||
//!
|
||||
//! The extension does not work with the native runtime, because the
|
||||
//! `RUNTIME_METADATA_HASH` environment variable is not set when building the
|
||||
//! `frame-metadata-hash-extension` crate.
|
||||
//!
|
||||
//! </div>
|
||||
//!
|
||||
//! ### Enable metadata hash generation
|
||||
//!
|
||||
//! The metadata hash generation needs to be enabled when building the wasm binary. The
|
||||
//! `substrate-wasm-builder` supports this out of the box:
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/build.rs", template_enable_metadata_hash)]
|
||||
//!
|
||||
//! > **Note:**
|
||||
//! >
|
||||
//! > The `metadata-hash` feature needs to be enabled for the `substrate-wasm-builder` to enable the
|
||||
//! > code for being able to generate the metadata hash. It is also recommended to put the metadata
|
||||
//! > hash generation behind a feature in the runtime as shown above. The reason behind is that it
|
||||
//! > adds a lot of code which increases the compile time and the generation itself also increases
|
||||
//! > the compile time. Thus, it is recommended to enable the feature only when the metadata hash is
|
||||
//! > required (e.g. for an on-chain build).
|
||||
//!
|
||||
//! The two parameters to `enable_metadata_hash` are the token symbol and the number of decimals of
|
||||
//! the primary token of the chain. These information are included for the wallets to show token
|
||||
//! related operations in a more user friendly way.
|
||||
@@ -0,0 +1,88 @@
|
||||
//! # Enable storage weight reclaiming
|
||||
//!
|
||||
//! This guide will teach you how to enable storage weight reclaiming for a teyrchain. The
|
||||
//! explanations in this guide assume a project structure similar to the one detailed in
|
||||
//! the [substrate documentation](crate::pezkuwi_sdk::substrate#anatomy-of-a-binary-crate). Full
|
||||
//! technical details are available in the original [pull request](https://github.com/paritytech/polkadot-sdk/pull/3002).
|
||||
//!
|
||||
//! # What is PoV reclaim?
|
||||
//! When a teyrchain submits a block to a relay chain like Pezkuwi or Kusama, it sends the block
|
||||
//! itself and a storage proof. Together they form the Proof-of-Validity (PoV). The PoV allows the
|
||||
//! relay chain to validate the teyrchain block by re-executing it. Relay chain
|
||||
//! validators distribute this PoV among themselves over the network. This distribution is costly
|
||||
//! and limits the size of the storage proof. The storage weight dimension of FRAME weights reflects
|
||||
//! this cost and limits the size of the storage proof. However, the storage weight determined
|
||||
//! during [benchmarking](crate::reference_docs::frame_benchmarking_weight) represents the worst
|
||||
//! case. In reality, runtime operations often consume less space in the storage proof. PoV reclaim
|
||||
//! offers a mechanism to reclaim the difference between the benchmarked worst-case and the real
|
||||
//! proof-size consumption.
|
||||
//!
|
||||
//!
|
||||
//! # How to enable PoV reclaim
|
||||
//! ## 1. Add the host function to your node
|
||||
//!
|
||||
//! To reclaim excess storage weight, a teyrchain runtime needs the
|
||||
//! ability to fetch the size of the storage proof from the node. The reclaim
|
||||
//! mechanism uses the
|
||||
//! [`storage_proof_size`](cumulus_primitives_proof_size_hostfunction::storage_proof_size)
|
||||
//! host function for this purpose. For convenience, cumulus provides
|
||||
//! [`TeyrchainHostFunctions`](cumulus_client_service::TeyrchainHostFunctions), a set of
|
||||
//! host functions typically used by cumulus-based teyrchains. In the binary crate of your
|
||||
//! teyrchain, find the instantiation of the [`WasmExecutor`](sc_executor::WasmExecutor) and set the
|
||||
//! correct generic type.
|
||||
//!
|
||||
//! This example from the teyrchain-template shows a type definition that includes the correct
|
||||
//! host functions.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/node/src/service.rs", wasm_executor)]
|
||||
//!
|
||||
//! > **Note:**
|
||||
//! >
|
||||
//! > If you see error `runtime requires function imports which are not present on the host:
|
||||
//! > 'env:ext_storage_proof_size_storage_proof_size_version_1'`, it is likely
|
||||
//! > that this step in the guide was not set up correctly.
|
||||
//!
|
||||
//! ## 2. Enable storage proof recording during import
|
||||
//!
|
||||
//! The reclaim mechanism reads the size of the currently recorded storage proof multiple times
|
||||
//! during block authoring and block import. Proof recording during authoring is already enabled on
|
||||
//! teyrchains. You must also ensure that storage proof recording is enabled during block import.
|
||||
//! Find where your node builds the fundamental substrate components by calling
|
||||
//! [`new_full_parts`](sc_service::new_full_parts). Replace this
|
||||
//! with [`new_full_parts_record_import`](sc_service::new_full_parts_record_import) and
|
||||
//! pass `true` as the last parameter to enable import recording.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/node/src/service.rs", component_instantiation)]
|
||||
//!
|
||||
//! > **Note:**
|
||||
//! >
|
||||
//! > If you see error `Storage root must match that calculated.` during block import, it is likely
|
||||
//! > that this step in the guide was not
|
||||
//! > set up correctly.
|
||||
//!
|
||||
//! ## 3. Add the TransactionExtension to your runtime
|
||||
//!
|
||||
//! In your runtime, you will find a list of TransactionExtensions.
|
||||
//! To enable the reclaiming,
|
||||
//! set [`StorageWeightReclaim`](cumulus_pallet_weight_reclaim::StorageWeightReclaim)
|
||||
//! as a warpper of that list.
|
||||
//! It is necessary that this extension wraps all the other transaction extensions in order to catch
|
||||
//! the whole PoV size of the transactions.
|
||||
//! The extension will check the size of the storage proof before and after an extrinsic execution.
|
||||
//! It reclaims the difference between the calculated size and the benchmarked size.
|
||||
#![doc = docify::embed!("../../templates/teyrchain/runtime/src/lib.rs", template_signed_extra)]
|
||||
//!
|
||||
//! ## Optional: Verify that reclaim works
|
||||
//!
|
||||
//! Start your node with the log target `runtime::storage_reclaim` set to `trace` to enable full
|
||||
//! logging for `StorageWeightReclaim`. The following log is an example from a local testnet. To
|
||||
//! trigger the log, execute any extrinsic on the network.
|
||||
//!
|
||||
//! ```ignore
|
||||
//! ...
|
||||
//! 2024-04-22 17:31:48.014 TRACE runtime::storage_reclaim: [ferdie] Reclaiming storage weight. benchmarked: 3593, consumed: 265 unspent: 0
|
||||
//! ...
|
||||
//! ```
|
||||
//!
|
||||
//! In the above example we see a benchmarked size of 3593 bytes, while the extrinsic only consumed
|
||||
//! 265 bytes of proof size. This results in 3328 bytes of reclaim.
|
||||
#![deny(rustdoc::broken_intra_doc_links)]
|
||||
#![deny(rustdoc::private_intra_doc_links)]
|
||||
@@ -0,0 +1,90 @@
|
||||
//! # Teyrchain forks
|
||||
//!
|
||||
//! In this guide, we will examine how AURA-based teyrchains handle forks. AURA (Authority Round) is
|
||||
//! a consensus mechanism where block authors rotate at fixed time intervals. Each author gets a
|
||||
//! predetermined time slice during which they are allowed to author a block. On its own, this
|
||||
//! mechanism is fork-free.
|
||||
//!
|
||||
//! However, since the relay chain provides security and serves as the source of truth for
|
||||
//! teyrchains, the teyrchain is dependent on it. This relationship can introduce complexities that
|
||||
//! lead to forking scenarios.
|
||||
//!
|
||||
//! ## Background
|
||||
//! Each teyrchain block has a relay parent, which is a relay chain block that provides context to
|
||||
//! our teyrchain block. The constraints the relay chain imposes on our teyrchain can cause forks
|
||||
//! under certain conditions. With asynchronous-backing enabled chains, the node side is building
|
||||
//! blocks on all relay chain forks. This means that no matter which fork of the relay chain
|
||||
//! ultimately progressed, the teyrchain would have a block ready for that fork. The situation
|
||||
//! changes when teyrchains want to produce blocks at a faster cadence. In a scenario where a
|
||||
//! teyrchain might author on 3 cores with elastic scaling, it is not possible to author on all
|
||||
//! relay chain forks. The time constraints do not allow it. Building on two forks would result in 6
|
||||
//! blocks. The authoring of these blocks would consume more time than we have available before the
|
||||
//! next relay chain block arrives. This limitation requires a more fork-resistant approach to
|
||||
//! block-building.
|
||||
//!
|
||||
//! ## Impact of Forks
|
||||
//! When a relay chain fork occurs and the teyrchain builds on a fork that will not be extended in
|
||||
//! the future, the blocks built on that fork are lost and need to be rebuilt. This increases
|
||||
//! latency and reduces throughput, affecting the overall performance of the teyrchain.
|
||||
//!
|
||||
//! # Building on Older Pelay Parents
|
||||
//! Cumulus offers a way to mitigate the occurence of forks. Instead of picking a block at the tip
|
||||
//! of the relay chain to build blocks, the node side can pick a relay chain block that is older. By
|
||||
//! building on 12s old relay chain blocks, forks will already have settled and the teyrchain can
|
||||
//! build fork-free.
|
||||
//!
|
||||
//! ```text
|
||||
//! Without offset:
|
||||
//! Relay Chain: A --- B --- C --- D --- E
|
||||
//! \
|
||||
//! --- D' --- E'
|
||||
//! Teyrchain: X --- Y --- ? (builds on both D and D', wasting resources)
|
||||
//!
|
||||
//! With offset (2 blocks):
|
||||
//! Relay Chain: A --- B --- C --- D --- E
|
||||
//! \
|
||||
//! --- D' --- E'
|
||||
//! Teyrchain: X(A) - Y (B) - Z (on C, fork already resolved)
|
||||
//! ```
|
||||
//! **Note:** It is possible that relay chain forks extend over more than 1-2 blocks. However, it is
|
||||
//! unlikely.
|
||||
//! ## Tradeoffs
|
||||
//! Fork-free teyrchains come with a few tradeoffs:
|
||||
//! - The latency of incoming XCM messages will be delayed by `N * 6s`, where `N` is the number of
|
||||
//! relay chain blocks we want to offset by. For example, by building 2 relay chain blocks behind
|
||||
//! the tip, the XCM latency will be increased by 12 seconds.
|
||||
//! - The available PoV space will be slightly reduced. Assuming a 10mb PoV, teyrchains need to be
|
||||
//! ready to sacrifice around 0.5% of PoV space.
|
||||
//!
|
||||
//! ## Enabling Guide
|
||||
//! The decision whether the teyrchain should build on older relay parents is embedded into the
|
||||
//! runtime. After the changes are implemented, the runtime will enforce that no author can build
|
||||
//! with an offset smaller than the desired offset. If you wish to keep your current teyrchain
|
||||
//! behaviour and do not want aforementioned tradeoffs, set the offset to 0.
|
||||
//!
|
||||
//! **Note:** The APIs mentioned here are available in `pezkuwi-sdk` versions after `stable-2506`.
|
||||
//!
|
||||
//! 1. Define the relay parent offset your teyrchain should respect in the runtime.
|
||||
//! ```ignore
|
||||
//! const RELAY_PARENT_OFFSET = 2;
|
||||
//! ```
|
||||
//! 2. Pass this constant to the `teyrchain-system` pallet.
|
||||
//!
|
||||
//! ```ignore
|
||||
//! impl cumulus_pallet_teyrchain_system::Config for Runtime {
|
||||
//! // Other config items here
|
||||
//! ...
|
||||
//! type RelayParentOffset = ConstU32<RELAY_PARENT_OFFSET>;
|
||||
//! }
|
||||
//! ```
|
||||
//! 3. Implement the `RelayParentOffsetApi` runtime API for your runtime.
|
||||
//!
|
||||
//! ```ignore
|
||||
//! impl cumulus_primitives_core::RelayParentOffsetApi<Block> for Runtime {
|
||||
//! fn relay_parent_offset() -> u32 {
|
||||
//! RELAY_PARENT_OFFSET
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//! 4. Increase the `UNINCLUDED_SEGMENT_CAPICITY` for your runtime. It needs to be increased by
|
||||
//! `RELAY_PARENT_OFFSET * BLOCK_PROCESSING_VELOCITY`.
|
||||