diff --git a/.gitignore b/.gitignore index f582b758a..9e485924f 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ public/sitemap* coverage .env.local +.source diff --git a/content/authors.json b/content/authors.json new file mode 100644 index 000000000..41df08b51 --- /dev/null +++ b/content/authors.json @@ -0,0 +1,10 @@ +[ + { + "id": "unboxed", + "title": "Unboxed Software with updates from the Solana Foundation", + "github": "Unboxed-Software", + "twitter": "unboxedsoftware", + "website": "https://www.beunboxed.com/", + "image": "unboxed.png" + } +] diff --git a/content/cookbook/accounts/calculate-rent.mdx b/content/cookbook/accounts/calculate-rent.mdx new file mode 100644 index 000000000..80ede2379 --- /dev/null +++ b/content/cookbook/accounts/calculate-rent.mdx @@ -0,0 +1,47 @@ +--- +title: How to Calculate Account Creation Cost +description: + "Every time you create an account, that creation costs an amount of SOL. Learn + how to calculate how much an account costs at creation." +--- + +Keeping accounts alive on Solana incurs a storage cost called rent. For the +calculation, you need to consider the amount of data you intend to store in the +account. Rent can be reclaimed in full if the account is closed. + + + + + +```typescript title="calculate-rent.ts" +import { createSolanaRpc } from "@solana/web3.js"; + +const rpc = createSolanaRpc("https://api.devnet.solana.com"); +// 1.5k bytes +const space = 1500n; + +const lamports = await rpc.getMinimumBalanceForRentExemption(space).send(); +console.log("Minimum balance for rent exception:", lamports); +``` + + + + + +```typescript +import { Connection, clusterApiUrl } from "@solana/web3.js"; + +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + +// length of data in bytes in the account to calculate rent for +const dataLength = 1500; +const rentExemptionAmount = + await connection.getMinimumBalanceForRentExemption(dataLength); +console.log({ + rentExemptionAmount, +}); +``` + + + + diff --git a/content/cookbook/accounts/close-account.mdx b/content/cookbook/accounts/close-account.mdx new file mode 100644 index 000000000..1171b4fa2 --- /dev/null +++ b/content/cookbook/accounts/close-account.mdx @@ -0,0 +1,44 @@ +--- +title: How to Close an Account +description: + "When an account is no longer needed, you can close the account to reclaim the + rent. Learn how to close accounts efficiently on Solana." +--- + +Closing accounts enables you to reclaim the SOL that was used to open the +account, but requires deleting of all information in the account. When an +account is closed, make sure that the data is zeroed out in the same instruction +to avoid people reopening the account in the same transaction and getting access +to the data. This is because the account is not actually closed until the +transaction is completed. + +```rust title="close-account.rs" {18-25} +use solana_program::{ + account_info::next_account_info, account_info::AccountInfo, entrypoint, + entrypoint::ProgramResult, pubkey::Pubkey, +}; + +entrypoint!(process_instruction); + +fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + let account_info_iter = &mut accounts.iter(); + + let source_account_info = next_account_info(account_info_iter)?; + let dest_account_info = next_account_info(account_info_iter)?; + + let dest_starting_lamports = dest_account_info.lamports(); + **dest_account_info.lamports.borrow_mut() = dest_starting_lamports + .checked_add(source_account_info.lamports()) + .unwrap(); + **source_account_info.lamports.borrow_mut() = 0; + + source_account_info.assign(&system_program::ID); + source_account_info.realloc(0, false).map_err(Into::into) + + Ok(()) +} +``` diff --git a/content/cookbook/accounts/create-account.mdx b/content/cookbook/accounts/create-account.mdx new file mode 100644 index 000000000..b846e38b8 --- /dev/null +++ b/content/cookbook/accounts/create-account.mdx @@ -0,0 +1,149 @@ +--- +title: How to Create an Account +description: + "Accounts are the basic building blocks of anything on Solana. Learn how to + create accounts on the Solana blockchain." +--- + +Creating an account requires using the System Program `createAccount` +instruction. The Solana runtime will grant the owner program of an account, +access to write to its data or transfer lamports. When creating an account, we +have to preallocate a fixed storage space in bytes (space) and enough lamports +to cover the rent. + + + + + +```typescript title="create-account.ts" +import { + pipe, + createSolanaRpc, + appendTransactionMessageInstructions, + createSolanaRpcSubscriptions, + createTransactionMessage, + generateKeyPairSigner, + getSignatureFromTransaction, + sendAndConfirmTransactionFactory, + setTransactionMessageFeePayerSigner, + setTransactionMessageLifetimeUsingBlockhash, + signTransactionMessageWithSigners, +} from "@solana/web3.js"; +import { getSetComputeUnitPriceInstruction } from "@solana-program/compute-budget"; +import { + getCreateAccountInstruction, + SYSTEM_PROGRAM_ADDRESS, +} from "@solana-program/system"; + +const rpc = createSolanaRpc("https://api.devnet.solana.com"); +const rpcSubscriptions = createSolanaRpcSubscriptions( + "wss://api.devnet.solana.com", +); + +const sendAndConfirmTransaction = sendAndConfirmTransactionFactory({ + rpc, + rpcSubscriptions, +}); + +const space = 0n; // any extra space in the account +const rentLamports = await rpc.getMinimumBalanceForRentExemption(space).send(); +console.log("Minimum balance for rent exception:", rentLamports); + +// todo: load your own signer with SOL +const signer = await generateKeyPairSigner(); + +// generate a new keypair and address to create +const newAccountKeypair = await generateKeyPairSigner(); +console.log("New account address:", newAccountKeypair.address); + +const { value: latestBlockhash } = await rpc.getLatestBlockhash().send(); + +const transactionMessage = pipe( + createTransactionMessage({ version: "legacy" }), + tx => setTransactionMessageFeePayerSigner(signer, tx), + tx => setTransactionMessageLifetimeUsingBlockhash(latestBlockhash, tx), + tx => + appendTransactionMessageInstructions( + [ + // add a priority fee + getSetComputeUnitPriceInstruction({ + microLamports: 200_000, + }), + // create the new account + getCreateAccountInstruction({ + lamports: rentLamports, + newAccount: newAccountKeypair, + payer: signer, + space: space, + // "wallet" accounts are owned by the system program + programAddress: SYSTEM_PROGRAM_ADDRESS, + }), + ], + tx, + ), +); + +const signedTransaction = + await signTransactionMessageWithSigners(transactionMessage); +const signature = getSignatureFromTransaction(signedTransaction); + +await sendAndConfirmTransaction(signedTransaction, { + commitment: "confirmed", +}); +console.log("Signature:", signature); +``` + + + + + +```typescript title="create-account.ts" +import { + SystemProgram, + Keypair, + Transaction, + sendAndConfirmTransaction, + Connection, + clusterApiUrl, + LAMPORTS_PER_SOL, +} from "@solana/web3.js"; + +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +const fromPubkey = Keypair.generate(); + +// Airdrop SOL for transferring lamports to the created account +const airdropSignature = await connection.requestAirdrop( + fromPubkey.publicKey, + LAMPORTS_PER_SOL, +); +await connection.confirmTransaction(airdropSignature); + +// amount of space to reserve for the account +const space = 0; + +// Seed the created account with lamports for rent exemption +const rentExemptionAmount = + await connection.getMinimumBalanceForRentExemption(space); + +const newAccountPubkey = Keypair.generate(); +const createAccountParams = { + fromPubkey: fromPubkey.publicKey, + newAccountPubkey: newAccountPubkey.publicKey, + lamports: rentExemptionAmount, + space, + programId: SystemProgram.programId, +}; + +const createAccountTransaction = new Transaction().add( + SystemProgram.createAccount(createAccountParams), +); + +await sendAndConfirmTransaction(connection, createAccountTransaction, [ + fromPubkey, + newAccountPubkey, +]); +``` + + + + diff --git a/content/cookbook/accounts/create-pda-account.mdx b/content/cookbook/accounts/create-pda-account.mdx new file mode 100644 index 000000000..7b9f0d8e4 --- /dev/null +++ b/content/cookbook/accounts/create-pda-account.mdx @@ -0,0 +1,151 @@ +--- +title: How to Create a PDA's Account +description: + "Program Derived Addresses, also known as PDAs, enable developers to extend + their program's functionality with program-owned accounts. Learn how to create + accounts at PDAs on Solana." +--- + +Accounts found at Program Derived Addresses (PDAs) can only be created on-chain. +The accounts have addresses that have an associated off-curve public key, but no +secret key. + +To generate a PDA, use `findProgramAddressSync` with your required seeds. +Generating with the same seeds will always generate the same PDA. + +## Generating a PDA + +```typescript title="generate-pda.ts" +import { PublicKey } from "@solana/web3.js"; + +const programId = new PublicKey("G1DCNUQTSGHehwdLCAmRyAG8hf51eCHrLNUqkgGKYASj"); + +let [pda, bump] = PublicKey.findProgramAddressSync( + [Buffer.from("test")], + programId, +); +console.log(`bump: ${bump}, pubkey: ${pda.toBase58()}`); +// you will find the result is different from `createProgramAddress`. +// It is expected because the real seed we used to calculate is ["test" + bump] +``` + +## Create an Account at a PDA + +### Program + +```rust title="create-pda.rs" {24-37} +use solana_program::{ + account_info::next_account_info, account_info::AccountInfo, entrypoint, + entrypoint::ProgramResult, program::invoke_signed, pubkey::Pubkey, system_instruction, sysvar::{rent::Rent, Sysvar} +}; + +entrypoint!(process_instruction); + +fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + let account_info_iter = &mut accounts.iter(); + + let payer_account_info = next_account_info(account_info_iter)?; + let pda_account_info = next_account_info(account_info_iter)?; + let rent_sysvar_account_info = &Rent::from_account_info(next_account_info(account_info_iter)?)?; + + // find space and minimum rent required for account + let space = instruction_data[0]; + let bump = instruction_data[1]; + let rent_lamports = rent_sysvar_account_info.minimum_balance(space.into()); + + invoke_signed( + &system_instruction::create_account( + &payer_account_info.key, + &pda_account_info.key, + rent_lamports, + space.into(), + program_id + ), + &[ + payer_account_info.clone(), + pda_account_info.clone() + ], + &[&[&payer_account_info.key.as_ref(), &[bump]]] + )?; + + Ok(()) +} +``` + +## Client + +```typescript title="create-pda.ts" +import { + clusterApiUrl, + Connection, + Keypair, + Transaction, + SystemProgram, + PublicKey, + TransactionInstruction, + LAMPORTS_PER_SOL, + SYSVAR_RENT_PUBKEY, +} from "@solana/web3.js"; + +(async () => { + // program id + const programId = new PublicKey( + "7ZP42kRwUQ2zgbqXoaXzAFaiQnDyp6swNktTSv8mNQGN", + ); + + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // setup fee payer + const feePayer = Keypair.generate(); + const feePayerAirdropSignature = await connection.requestAirdrop( + feePayer.publicKey, + LAMPORTS_PER_SOL, + ); + await connection.confirmTransaction(feePayerAirdropSignature); + + // setup pda + let [pda, bump] = await PublicKey.findProgramAddress( + [feePayer.publicKey.toBuffer()], + programId, + ); + console.log(`bump: ${bump}, pubkey: ${pda.toBase58()}`); + + const data_size = 0; + + let tx = new Transaction().add( + new TransactionInstruction({ + keys: [ + { + pubkey: feePayer.publicKey, + isSigner: true, + isWritable: true, + }, + { + pubkey: pda, + isSigner: false, + isWritable: true, + }, + { + pubkey: SYSVAR_RENT_PUBKEY, + isSigner: false, + isWritable: false, + }, + { + pubkey: SystemProgram.programId, + isSigner: false, + isWritable: false, + }, + ], + data: Buffer.from(new Uint8Array([data_size, bump])), + programId: programId, + }), + ); + + console.log(`txhash: ${await connection.sendTransaction(tx, [feePayer])}`); +})(); +``` diff --git a/content/cookbook/accounts/get-account-balance.mdx b/content/cookbook/accounts/get-account-balance.mdx new file mode 100644 index 000000000..7f13fd3bb --- /dev/null +++ b/content/cookbook/accounts/get-account-balance.mdx @@ -0,0 +1,50 @@ +--- +title: How to Get Account Balance +description: + "Every account on Solana has a balance of SOL stored. Learn how to retrieve + that account balance on Solana." +--- + + + + + +```typescript title="get-account-balance.ts" +import { address, createSolanaRpc } from "@solana/web3.js"; + +const rpc = createSolanaRpc("https://api.devnet.solana.com"); +const LAMPORTS_PER_SOL = 1_000_000_000; // 1 billion lamports per SOL + +const wallet = address("nicktrLHhYzLmoVbuZQzHUTicd2sfP571orwo9jfc8c"); +const { value: balance } = await rpc.getBalance(wallet).send(); +console.log(`Balance: ${Number(balance) / LAMPORTS_PER_SOL} SOL`); +``` + +> As of `v2.0.0`, developers can use the default configurations within the main +> library (`@solana/web3.js`) or import any of its subpackages where better +> composition or more granular control over the imports is desired. See +> [Tree-Shakability](https://github.com/solana-labs/solana-web3.js?tab=readme-ov-file#tree-shakability) +> for more information. + + + + + +```typescript title="get-account-balance.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + LAMPORTS_PER_SOL, +} from "@solana/web3.js"; + +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +const wallet = new PublicKey("nicktrLHhYzLmoVbuZQzHUTicd2sfP571orwo9jfc8c"); + +const balance = await connection.getBalance(wallet); +console.log(`Balance: ${balance / LAMPORTS_PER_SOL} SOL`); +``` + + + + diff --git a/content/cookbook/accounts/meta.json b/content/cookbook/accounts/meta.json new file mode 100644 index 000000000..afbed64bc --- /dev/null +++ b/content/cookbook/accounts/meta.json @@ -0,0 +1,12 @@ +{ + "title": "Accounts", + "pages": [ + "create-account", + "calculate-rent", + "create-pda-account", + "sign-with-pda", + "close-account", + "get-account-balance" + ], + "defaultOpen": true +} diff --git a/content/cookbook/accounts/sign-with-pda.mdx b/content/cookbook/accounts/sign-with-pda.mdx new file mode 100644 index 000000000..665d1e0f4 --- /dev/null +++ b/content/cookbook/accounts/sign-with-pda.mdx @@ -0,0 +1,52 @@ +--- +title: How to Sign with a PDA's Account +description: + "A main feature of accounts at Program Derived Addresses is the ability for + programs to sign using those accounts. Learn how to sign with PDA accounts on + Solana." +--- + +Program derived addresses (PDA) can be used to have accounts owned by programs +that can sign. This is useful if you want a program to own a token account and +you want the program to transfer tokens from one account to another. + +```rust title="sign-with-pda.rs" {22-34} +use solana_program::{ + account_info::next_account_info, account_info::AccountInfo, entrypoint, + entrypoint::ProgramResult, program::invoke_signed, pubkey::Pubkey, system_instruction, +}; + +entrypoint!(process_instruction); + +fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + let account_info_iter = &mut accounts.iter(); + + let pda_account_info = next_account_info(account_info_iter)?; + let to_account_info = next_account_info(account_info_iter)?; + let system_program_account_info = next_account_info(account_info_iter)?; + + // pass bump seed for saving compute budget + let bump_seed = instruction_data[0]; + + invoke_signed( + &system_instruction::transfer( + &pda_account_info.key, + &to_account_info.key, + 100_000_000, // 0.1 SOL + ), + &[ + pda_account_info.clone(), + to_account_info.clone(), + system_program_account_info.clone(), + ], + &[&[b"escrow", &[bump_seed]]], + )?; + + Ok(()) +} + +``` diff --git a/content/cookbook/development/connect-environment.mdx b/content/cookbook/development/connect-environment.mdx new file mode 100644 index 000000000..0a36b8dbf --- /dev/null +++ b/content/cookbook/development/connect-environment.mdx @@ -0,0 +1,31 @@ +--- +title: Connecting to a Solana Environment +description: "Learn how to connect to a Solana environment." +--- + +When you are working on Solana development, you will need to connect to a +specific RPC API endpoint. Solana has 3 public development environments: + +- mainnet-beta https://api.mainnet-beta.solana.com +- devnet https://api.devnet.solana.com +- testnet https://api.testnet.solana.com + +```typescript title="connect-to-environment.ts" +import { clusterApiUrl, Connection } from "@solana/web3.js"; + +(async () => { + const connection = new Connection(clusterApiUrl("mainnet-beta"), "confirmed"); +})(); +``` + +Finally, you can also connect to a private cluster, either one local or running +remotely with the following: + +```ts +import { Connection } from "@solana/web3.js"; + +(async () => { + // This will connect you to your local validator + const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +})(); +``` diff --git a/content/cookbook/development/load-keypair-from-file.mdx b/content/cookbook/development/load-keypair-from-file.mdx new file mode 100644 index 000000000..839b140d3 --- /dev/null +++ b/content/cookbook/development/load-keypair-from-file.mdx @@ -0,0 +1,85 @@ +--- +title: Load a local json file keypair +description: "Learn how to load a keypair from file." +--- + +When running your local project you probably want to use a file json keypair. +This can be very useful for all the cookbook examples as well. You can grind +yourself a keypair using `solana-keygen grind --starts-with a23:1` and then load +and use this one for your projects using the `loadKeypairFromFile` function. + +```typescript title="load-keypair-from-file.ts" +import { + airdropFactory, + createKeyPairSignerFromBytes, + createSolanaRpc, + createSolanaRpcSubscriptions, + devnet, + generateKeyPair, + getAddressFromPublicKey, + KeyPairSigner, + lamports, +} from "@solana/web3.js"; +import fs from "fs"; +import path from "path"; +import os from "os"; + +// The new library takes a brand-new approach to Solana key pairs and addresses, +// which will feel quite different from the classes PublicKey and Keypair from version 1.x. +// All key operations now use the native Ed25519 implementation in JavaScript’s +// Web Crypto API. +async function createKeypair() { + const newKeypair: CryptoKeyPair = await generateKeyPair(); + const publicAddress = await getAddressFromPublicKey(newKeypair.publicKey); + + console.log(`Public key: ${publicAddress}`); +} + +export async function loadDefaultKeypair(): Promise> { + return await loadKeypairFromFile("~/.config/solana/id.json"); +} + +export async function loadDefaultKeypairWithAirdrop( + cluster: string, +): Promise> { + const keypair = await loadDefaultKeypair(); + const rpc = createSolanaRpc(devnet(`https://api.${cluster}.solana.com`)); + const rpcSubscriptions = createSolanaRpcSubscriptions( + devnet(`wss://api.${cluster}.solana.com`), + ); + try { + const result = await rpc.getBalance(keypair.address).send(); + + console.log(`Balance: ${result.value} lamports`); + if (result.value < lamports(500_000n)) { + console.log(`Balance low requesting airdrop`); + const airdrop = airdropFactory({ rpc, rpcSubscriptions }); + await airdrop({ + commitment: "confirmed", + lamports: lamports(1_000_000_000n), + recipientAddress: keypair.address, + }); + } + } catch (err) { + console.error("Error fetching balance:", err); + } + return keypair; +} + +export async function loadKeypairFromFile( + filePath: string, +): Promise> { + // This is here so you can also load the default keypair from the file system. + const resolvedPath = path.resolve( + filePath.startsWith("~") ? filePath.replace("~", os.homedir()) : filePath, + ); + const loadedKeyBytes = Uint8Array.from( + JSON.parse(fs.readFileSync(resolvedPath, "utf8")), + ); + // Here you can also set the second parameter to true in case you need to extract your private key. + const keypairSigner = await createKeyPairSignerFromBytes(loadedKeyBytes); + return keypairSigner; +} + +createKeypair(); +``` diff --git a/content/cookbook/development/meta.json b/content/cookbook/development/meta.json new file mode 100644 index 000000000..1d7c97830 --- /dev/null +++ b/content/cookbook/development/meta.json @@ -0,0 +1,12 @@ +{ + "title": "Development", + "pages": [ + "start-local-validator", + "connect-environment", + "test-sol", + "subscribing-events", + "using-mainnet-accounts-programs", + "load-keypair-from-file" + ], + "defaultOpen": true +} diff --git a/content/cookbook/development/start-local-validator.mdx b/content/cookbook/development/start-local-validator.mdx new file mode 100644 index 000000000..b098b2144 --- /dev/null +++ b/content/cookbook/development/start-local-validator.mdx @@ -0,0 +1,25 @@ +--- +title: How to Start a Local Validator +description: "Learn how to start a local solana validator." +--- + +Testing your program code locally can be a lot more reliable than testing on +devnet, and can help you test before trying it out on devnet. + +You can setup your local-test-validator by installing the +[Solana CLI tool suite](/docs/intro/installation) and running the following +command: + +```shell +solana-test-validator +``` + +Benefits of using local-test-validator include: + +- No RPC rate-limits +- No airdrop limits +- Direct onchain program deployment (`--bpf-program ...`) +- Clone accounts from a public cluster, including programs (`--clone ...`) +- Configurable transaction history retention (`--limit-ledger-size ...`) +- Configurable epoch length (`--slots-per-epoch ...`) +- Jump to an arbitrary slot (`--warp-slot ...`) diff --git a/content/cookbook/development/subscribing-events.mdx b/content/cookbook/development/subscribing-events.mdx new file mode 100644 index 000000000..a9b1094bc --- /dev/null +++ b/content/cookbook/development/subscribing-events.mdx @@ -0,0 +1,43 @@ +--- +title: Subscribing to Events +description: Learn how to subscribe to events in the Solana network. +--- + +Websockets provide a pub/sub interface where you can listen for certain events. +Instead of pinging a typical HTTP endpoint at an interval to get frequent +updates, you can instead receive those updates only when they happen. + +Solana's web3 +[`Connection`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html) +under the hood generates a websocket endpoint and registers a websocket client +when you create a new `Connection` instance (see source code +[here](https://github.com/solana-labs/solana-web3.js/blob/45923ca00e4cc1ed079d8e55ecbee83e5b4dc174/src/connection.ts#L2100)). + +The `Connection` class exposes pub/sub methods - they all start with `on`, like +event emitters. When you call these listener methods, it registers a new +subscription to the websocket client of that `Connection` instance. The example +pub/sub method we use below is +[`onAccountChange`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html#onAccountChange). +The callback will provide the updated state data through arguments (see +[`AccountChangeCallback`](https://solana-labs.github.io/solana-web3.js/v1.x/types/AccountChangeCallback.html) +as an example). + +```typescript title="subscribe-to-events.ts" +import { clusterApiUrl, Connection, Keypair } from "@solana/web3.js"; + +(async () => { + // Establish new connect to devnet - websocket client connected to devnet will also be registered here + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // Create a test wallet to listen to + const wallet = Keypair.generate(); + + // Register a callback to listen to the wallet (ws subscription) + connection.onAccountChange( + wallet.publicKey, + (updatedAccountInfo, context) => + console.log("Updated account info: ", updatedAccountInfo), + "confirmed", + ); +})(); +``` diff --git a/content/cookbook/development/test-sol.mdx b/content/cookbook/development/test-sol.mdx new file mode 100644 index 000000000..27945c725 --- /dev/null +++ b/content/cookbook/development/test-sol.mdx @@ -0,0 +1,29 @@ +--- +title: Getting Test SOL +description: Learn how to get test SOL for development purposes. +--- + +When you're working locally, you need some SOL in order to send transactions. In +non-mainnet environments you can receive SOL by airdropping it to your address + +```typescript title="get-test-sol.ts" +import { Connection, Keypair, LAMPORTS_PER_SOL } from "@solana/web3.js"; + +(async () => { + const keypair = Keypair.generate(); + + const connection = new Connection("http://127.0.0.1:8899", "confirmed"); + + const signature = await connection.requestAirdrop( + keypair.publicKey, + LAMPORTS_PER_SOL, + ); + const { blockhash, lastValidBlockHeight } = + await connection.getLatestBlockhash(); + await connection.confirmTransaction({ + blockhash, + lastValidBlockHeight, + signature, + }); +})(); +``` diff --git a/content/cookbook/development/using-mainnet-accounts-programs.mdx b/content/cookbook/development/using-mainnet-accounts-programs.mdx new file mode 100644 index 000000000..19d030b22 --- /dev/null +++ b/content/cookbook/development/using-mainnet-accounts-programs.mdx @@ -0,0 +1,45 @@ +--- +title: Using Mainnet Accounts and Programs +description: + Learn how to use the Mainnet accounts and programs in your local development + environment. +--- + +Oftentimes, local tests rely on programs and accounts that are not available on +the local validator by default. +The Solana CLI allows to both: + +- Download Programs and Accounts +- Load Programs and Accounts to a local validator + +### How to load accounts from mainnet + +It is possible to download the JUP token mint account to file: + +```shell +# solana account -u --output --output-file
+solana account -u m --output json-compact --output-file jup.json JUPyiwrYJFskUPiHa7hkeR8VUtAeFoSYbKedZNsDvCN +``` + +Loading it to your localnet is then done by passing the account's file and +destination address (on the local cluster) when starting the validator: + +```shell +# solana-test-validator --account
--reset +solana-test-validator --account JUPyiwrYJFskUPiHa7hkeR8VUtAeFoSYbKedZNsDvCN jup.json --reset +``` + +Similarly, it is possible to download the Openbook program: + +```shell +# solana program dump -u
+solana program dump -u m srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX openbook.so +``` + +Loading it to your localnet is then done by passing the program's file and +destination address (on the local cluster) when starting the validator: + +```shell +# solana-test-validator --bpf-program
--reset +solana-test-validator --bpf-program srmqPvymJeFKQ4zGQed1GFppgkRHL9kaELCbyksJtPX openbook.so --reset +``` diff --git a/content/cookbook/index.mdx b/content/cookbook/index.mdx new file mode 100644 index 000000000..30b1d83be --- /dev/null +++ b/content/cookbook/index.mdx @@ -0,0 +1,109 @@ +--- +title: Solana Cookbook +seoTitle: Solana Cookbook - Code examples for Solana development +description: + "The Solana Cookbook is a collection of code snippets, useful examples, and + references for building on Solana." +--- + +The Solana Cookbook is a developer resource that provides examples and +references for building applications on Solana. Each example and reference will +focus on specific aspects of Solana development while providing additional +details and usage examples. + +## Development Guides + +Development guides help developers set up and interact with the Solana ecosystem +using various tools and clients. + +| Guide | Client | Description | +| ------------------------------------------------------------------------------------------------------- | ---------- | ------------------------------------------ | +| [How to Start a Local Validator](/developers/cookbook/development/start-local-validator) | Solana CLI | Set up and run a local Solana validator | +| [Connecting to a Solana Environment](/developers/cookbook/development/connect-environment) | web3.js | Connect to different Solana networks | +| [Getting Test SOL](/developers/cookbook/development/test-sol) | web3.js | Obtain SOL tokens for testing | +| [Subscribing to Events](/developers/cookbook/development/subscribing-events) | web3.js | Listen to Solana program events | +| [Using Mainnet Accounts and Programs](/developers/cookbook/development/using-mainnet-accounts-programs) | Solana CLI | Work with production accounts and programs | + +## Wallet Management + +Learn how to create, restore, and manage Solana wallets using various tools and +libraries. + +| Guide | Client | Description | +| ---------------------------------------------------------------------------------------------- | -------------- | ----------------------------------- | +| [How to Create a Keypair](/developers/cookbook/wallets/create-keypair) | web3.js | Generate new Solana keypairs | +| [How to Restore a Keypair](/developers/cookbook/wallets/restore-keypair) | web3.js | Recover existing keypairs | +| [How to Verify a Keypair](/developers/cookbook/wallets/verify-keypair) | web3.js | Validate keypair authenticity | +| [How to Validate a Public Key](/developers/cookbook/wallets/check-publickey) | web3.js | Check public key validity | +| [How to Generate Mnemonics for Keypairs](/developers/cookbook/wallets/generate-mnemonic) | bip39 | Create seed phrases | +| [How to Restore a Keypair from a Mnemonic](/developers/cookbook/wallets/restore-from-mnemonic) | web3.js, bip39 | Recover keypairs using seed phrases | +| [How to Generate a Vanity Address](/developers/cookbook/wallets/generate-vanity-address) | Solana CLI | Create custom addresses | +| [How to Sign and Verify a Message](/developers/cookbook/wallets/sign-message) | web3.js | Message signing and verification | +| [How to Connect a Wallet with React](/developers/cookbook/wallets/connect-wallet-react) | React, web3.js | Integrate wallets in React apps | + +## Transaction Operations + +Explore various transaction-related operations on the Solana blockchain. + +| Guide | Client | Description | +| ------------------------------------------------------------------------------------------------ | ------- | ------------------------------ | +| [How to Send SOL](/developers/cookbook/transactions/send-sol) | web3.js | Transfer SOL between accounts | +| [How to Send Tokens](/developers/cookbook/transactions/send-tokens) | web3.js | Transfer SPL tokens | +| [How to Calculate Transaction Cost](/developers/cookbook/transactions/calculate-cost) | web3.js | Estimate transaction fees | +| [How to Add a Memo to a Transaction](/developers/cookbook/transactions/add-memo) | web3.js | Include memos in transactions | +| [How to Add Priority Fees to a Transaction](/developers/cookbook/transactions/add-priority-fees) | web3.js | Set transaction priorities | +| [How to Optimize Compute Requested](/developers/cookbook/transactions/optimize-compute) | web3.js | Improve transaction efficiency | +| [Offline Transactions](/developers/cookbook/transactions/offline-transactions) | web3.js | Handle offline operations | + +## Account Management + +Learn how to manage Solana accounts effectively. + +| Guide | Client | Description | +| -------------------------------------------------------------------------------------- | ------------- | -------------------------- | +| [How to Create an Account](/developers/cookbook/accounts/create-account) | web3.js | Create new Solana accounts | +| [How to Calculate Account Creation Cost](/developers/cookbook/accounts/calculate-rent) | web3.js | Estimate account costs | +| [How to Create a PDA's Account](/developers/cookbook/accounts/create-pda-account) | web3.js, Rust | Work with PDAs | +| [How to Sign with a PDA's Account](/developers/cookbook/accounts/sign-with-pda) | Rust | PDA signing operations | +| [How to Close an Account](/developers/cookbook/accounts/close-account) | Rust | Remove accounts | +| [How to Get Account Balance](/developers/cookbook/accounts/get-account-balance) | web3.js | Check account balances | + +## Program Development + +Develop Solana programs with these comprehensive guides. + +| Guide | Client | Description | +| -------------------------------------------------------------------------------------------- | ------ | ---------------------------- | +| [How to Transfer SOL in a Solana Program](/developers/cookbook/programs/transfer-sol) | Rust | Program-based SOL transfers | +| [How to Get Clock in a Program](/developers/cookbook/programs/clock) | Rust | Access program clock | +| [How to Change Account Size](/developers/cookbook/programs/change-account-size) | Rust | Modify account sizes | +| [How to Do Cross Program Invocation](/developers/cookbook/programs/cross-program-invocation) | Rust | CPI operations | +| [How to Create a Program Derived Address](/developers/cookbook/programs/create-pda) | Rust | Generate PDAs | +| [How to Read Accounts in a Program](/developers/cookbook/programs/read-accounts) | Rust | Account data access | +| [Reading Multiple Instructions](/developers/cookbook/programs/read-multiple-instructions) | Rust | Handle multiple instructions | +| [How to Verify Accounts in a Solana Program](/developers/cookbook/programs/verify-accounts) | Rust | Account verification | + +## Token Operations + +Comprehensive guides for working with tokens on Solana. + +| Guide | Client | Description | +| --------------------------------------------------------------------------------------------------------- | ------------- | ------------------------------ | +| [How to Create a Token](/developers/cookbook/tokens/create-mint-account) | web3.js | Create new tokens | +| [How to Get a Token Mint](/developers/cookbook/tokens/get-token-mint) | web3.js | Access token mints | +| [How to Create a Token Account](/developers/cookbook/tokens/create-token-account) | web3.js | Set up token accounts | +| [How to Get a Token Account](/developers/cookbook/tokens/get-token-account) | web3.js | Retrieve token accounts | +| [How to Get a Token Account's Balance](/developers/cookbook/tokens/get-token-balance) | web3.js, rust | Check token balances | +| [How to Mint Tokens](/developers/cookbook/tokens/mint-tokens) | web3.js | Create new tokens | +| [How to Transfer Tokens](/developers/cookbook/tokens/transfer-tokens) | web3.js | Move tokens between accounts | +| [How to Burn Tokens](/developers/cookbook/tokens/burn-tokens) | web3.js | Remove tokens from circulation | +| [How to Close Token Accounts](/developers/cookbook/tokens/close-token-accounts) | web3.js | Clean up token accounts | +| [How to Set Authority on Token Accounts or Mints](/developers/cookbook/tokens/set-update-token-authority) | web3.js | Manage token permissions | +| [How to Delegate Token Accounts](/developers/cookbook/tokens/approve-token-delegate) | web3.js | Set up token delegation | +| [How to Revoke a Token Delegate](/developers/cookbook/tokens/revoke-token-delegate) | web3.js | Remove delegates | +| [How to Use Wrapped SOL](/developers/cookbook/tokens/manage-wrapped-sol) | web3.js | Work with wrapped SOL | +| [How to Get All Token Accounts by Authority](/developers/cookbook/tokens/get-all-token-accounts) | web3.js | List token accounts | +| [How to Create an NFT](/developers/cookbook/tokens/create-nft) | web3.js | Mint NFTs | +| [How to Fetch the NFT Metadata](/developers/cookbook/tokens/fetch-nft-metadata) | Javascript | Fetch NFT metadata | +| [How to Get the Owner of an NFT](/developers/cookbook/tokens/get-nft-owner) | web3.js | Find NFT ownership | +| [How to Get All NFTs from a Wallet](/developers/cookbook/tokens/fetch-all-nfts) | web3.js | List wallet NFTs | diff --git a/content/cookbook/meta.json b/content/cookbook/meta.json new file mode 100644 index 000000000..6b5d86329 --- /dev/null +++ b/content/cookbook/meta.json @@ -0,0 +1,11 @@ +{ + "title": "Solana Cookbook", + "pages": [ + "development", + "wallets", + "transactions", + "accounts", + "programs", + "tokens" + ] +} diff --git a/content/cookbook/programs/change-account-size.mdx b/content/cookbook/programs/change-account-size.mdx new file mode 100644 index 000000000..4e0786875 --- /dev/null +++ b/content/cookbook/programs/change-account-size.mdx @@ -0,0 +1,131 @@ +--- +title: How to change account size +description: "Learn how to change the size of an account in a Solana program." +--- + +You can change a program owned account's size with the use of `realloc`. +`realloc` can resize an account up to 10KB. When you use `realloc` to increase +the size of an account, you must transfer lamports in order to keep that account +rent-exempt. + +```rust title="realloc.rs" +use { + crate::{ + instruction::WhitelistInstruction, + state::WhiteListData, + }, + borsh::{BorshDeserialize, BorshSerialize}, + solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint::ProgramResult, + msg, + program::invoke_signed, + program::invoke, + program_error::ProgramError, + pubkey::Pubkey, + sysvar::Sysvar, + sysvar::rent::Rent, + system_instruction, + }, + std::convert::TryInto, +}; + +pub fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + input: &[u8], +) -> ProgramResult { + // Length = BOOL + VEC + Pubkey * n (n = number of keys) + const INITIAL_ACCOUNT_LEN: usize = 1 + 4 + 0 ; + msg!("input: {:?}", input); + + let instruction = WhitelistInstruction::try_from_slice(input)?; + + let accounts_iter = &mut accounts.iter(); + + let funding_account = next_account_info(accounts_iter)?; + let pda_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + match instruction { + WhitelistInstruction::Initialize => { + msg!("Initialize"); + + let (pda, pda_bump) = Pubkey::find_program_address( + &[ + b"customaddress", + &funding_account.key.to_bytes(), + ], + _program_id, + ); + + let signers_seeds: &[&[u8]; 3] = &[ + b"customaddress", + &funding_account.key.to_bytes(), + &[pda_bump], + ]; + + if pda.ne(&pda_account.key) { + return Err(ProgramError::InvalidAccountData); + } + + let lamports_required = Rent::get()?.minimum_balance(INITIAL_ACCOUNT_LEN); + let create_pda_account_ix = system_instruction::create_account( + &funding_account.key, + &pda_account.key, + lamports_required, + INITIAL_ACCOUNT_LEN.try_into().unwrap(), + &_program_id, + ); + + invoke_signed( + &create_pda_account_ix, + &[ + funding_account.clone(), + pda_account.clone(), + system_program.clone(), + ], + &[signers_seeds], + )?; + + let mut pda_account_state = WhiteListData::try_from_slice(&pda_account.data.borrow())?; + + pda_account_state.is_initialized = true; + pda_account_state.white_list = Vec::new(); + pda_account_state.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; + Ok(()) + } + WhitelistInstruction::AddKey { key } => { + msg!("AddKey"); + + let mut pda_account_state = WhiteListData::try_from_slice(&pda_account.data.borrow())?; + + if !pda_account_state.is_initialized { + return Err(ProgramError::InvalidAccountData); + } + + let new_size = pda_account.data.borrow().len() + 32; + + let rent = Rent::get()?; + let new_minimum_balance = rent.minimum_balance(new_size); + + let lamports_diff = new_minimum_balance.saturating_sub(pda_account.lamports()); + invoke( + &system_instruction::transfer(funding_account.key, pda_account.key, lamports_diff), + &[ + funding_account.clone(), + pda_account.clone(), + system_program.clone(), + ], + )?; + + pda_account.realloc(new_size, false)?; + + pda_account_state.white_list.push(key); + pda_account_state.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; + + Ok(()) + } + } +} +``` diff --git a/content/cookbook/programs/clock.mdx b/content/cookbook/programs/clock.mdx new file mode 100644 index 000000000..239e35d1b --- /dev/null +++ b/content/cookbook/programs/clock.mdx @@ -0,0 +1,291 @@ +--- +title: How to get clock in a program +description: "Learn how to get the clock in a Solana program." +--- + +Getting a clock (ie, the current time) can be done in two ways: + +1. Passing `SYSVAR_CLOCK_PUBKEY` into an instruction +2. Accessing Clock directly inside an instruction. + +It is nice to know both the methods, because some legacy programs still expect +the `SYSVAR_CLOCK_PUBKEY` as an account. + +## Passing Clock as an account inside an instruction + +Let's create an instruction which receives an account for initializing and the +sysvar pubkey + +```rust title="get-clock-sysvar.rs" +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + clock::Clock, + entrypoint, + entrypoint::ProgramResult, + msg, + pubkey::Pubkey, + sysvar::Sysvar, +}; + +entrypoint!(process_instruction); + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct HelloState { + is_initialized: bool, +} + +// Accounts required +/// 1. [signer, writable] Payer +/// 2. [writable] Hello state account +/// 3. [] Clock sys var +pub fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + // Payer account + let _payer_account = next_account_info(accounts_iter)?; + // Hello state account + let hello_state_account = next_account_info(accounts_iter)?; + // Clock sysvar + let sysvar_clock_pubkey = next_account_info(accounts_iter)?; + + let mut hello_state = HelloState::try_from_slice(&hello_state_account.data.borrow())?; + hello_state.is_initialized = true; + hello_state.serialize(&mut &mut hello_state_account.data.borrow_mut()[..])?; + msg!("Account initialized :)"); + + // Type casting [AccountInfo] to [Clock] + let clock = Clock::from_account_info(&sysvar_clock_pubkey)?; + // Getting timestamp + let current_timestamp = clock.unix_timestamp; + msg!("Current Timestamp: {}", current_timestamp); + + Ok(()) +} +``` + +Now we pass the clock's sysvar public address via the client + +```typescript title="clock-sysvar-client.ts" +import { + clusterApiUrl, + Connection, + Keypair, + LAMPORTS_PER_SOL, + PublicKey, + SystemProgram, + SYSVAR_CLOCK_PUBKEY, + Transaction, + TransactionInstruction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +(async () => { + const programId = new PublicKey( + "77ezihTV6mTh2Uf3ggwbYF2NyGJJ5HHah1GrdowWJVD3", + ); + + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const latestBlockHash = await connection.getLatestBlockhash(); + + // Airdropping 1 SOL + const feePayer = Keypair.generate(); + await connection.confirmTransaction( + { + blockhash: latestBlockHash.blockhash, + lastValidBlockHeight: latestBlockHash.lastValidBlockHeight, + signature: await connection.requestAirdrop( + feePayer.publicKey, + LAMPORTS_PER_SOL, + ), + }, + "confirmed", + ); + + // Hello state account + const helloAccount = Keypair.generate(); + + const accountSpace = 1; // because there exists just one boolean variable + const rentRequired = + await connection.getMinimumBalanceForRentExemption(accountSpace); + + // Allocating space for hello state account + const allocateHelloAccountIx = SystemProgram.createAccount({ + fromPubkey: feePayer.publicKey, + lamports: rentRequired, + newAccountPubkey: helloAccount.publicKey, + programId: programId, + space: accountSpace, + }); + + // Passing Clock Sys Var + const passClockIx = new TransactionInstruction({ + programId: programId, + keys: [ + { + isSigner: true, + isWritable: true, + pubkey: feePayer.publicKey, + }, + { + isSigner: false, + isWritable: true, + pubkey: helloAccount.publicKey, + }, + { + isSigner: false, + isWritable: false, + pubkey: SYSVAR_CLOCK_PUBKEY, + }, + ], + }); + + const transaction = new Transaction(); + transaction.add(allocateHelloAccountIx, passClockIx); + + const txHash = await sendAndConfirmTransaction(connection, transaction, [ + feePayer, + helloAccount, + ]); + + console.log(`Transaction succeeded. TxHash: ${txHash}`); +})(); +``` + +## Accessing Clock directly inside an instruction + +Let's create the same instruction, but without expecting the +`SYSVAR_CLOCK_PUBKEY` from the client side. + +```rust title="get-clock-directly.rs" +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + clock::Clock, + entrypoint, + entrypoint::ProgramResult, + msg, + pubkey::Pubkey, + sysvar::Sysvar, +}; + +entrypoint!(process_instruction); + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct HelloState { + is_initialized: bool, +} + +// Accounts required +/// 1. [signer, writable] Payer +/// 2. [writable] Hello state account +pub fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + // Payer account + let _payer_account = next_account_info(accounts_iter)?; + // Hello state account + let hello_state_account = next_account_info(accounts_iter)?; + + // Getting clock directly + let clock = Clock::get()?; + + let mut hello_state = HelloState::try_from_slice(&hello_state_account.data.borrow())?; + hello_state.is_initialized = true; + hello_state.serialize(&mut &mut hello_state_account.data.borrow_mut()[..])?; + msg!("Account initialized :)"); + + // Getting timestamp + let current_timestamp = clock.unix_timestamp; + msg!("Current Timestamp: {}", current_timestamp); + + Ok(()) +} +``` + +The client side instruction, now only needs to pass the state and payer +accounts. + +```typescript title="clock-directly-client.rs" +import { + clusterApiUrl, + Connection, + Keypair, + LAMPORTS_PER_SOL, + PublicKey, + SystemProgram, + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; + +(async () => { + const programId = new PublicKey( + "4ZEdbCtb5UyCSiAMHV5eSHfyjq3QwbG3yXb6oHD7RYjk", + ); + + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const latestBlockHash = await connection.getLatestBlockhash(); + + // Airdropping 1 SOL + const feePayer = Keypair.generate(); + await connection.confirmTransaction( + { + blockhash: latestBlockHash.blockhash, + lastValidBlockHeight: latestBlockHash.lastValidBlockHeight, + signature: await connection.requestAirdrop( + feePayer.publicKey, + LAMPORTS_PER_SOL, + ), + }, + "confirmed", + ); + + // Hello state account + const helloAccount = Keypair.generate(); + + const accountSpace = 1; // because there exists just one boolean variable + const rentRequired = + await connection.getMinimumBalanceForRentExemption(accountSpace); + + // Allocating space for hello state account + const allocateHelloAccountIx = SystemProgram.createAccount({ + fromPubkey: feePayer.publicKey, + lamports: rentRequired, + newAccountPubkey: helloAccount.publicKey, + programId: programId, + space: accountSpace, + }); + + const initIx = new TransactionInstruction({ + programId: programId, + keys: [ + { + isSigner: true, + isWritable: true, + pubkey: feePayer.publicKey, + }, + { + isSigner: false, + isWritable: true, + pubkey: helloAccount.publicKey, + }, + ], + }); + + const transaction = new Transaction(); + transaction.add(allocateHelloAccountIx, initIx); + + const txHash = await sendAndConfirmTransaction(connection, transaction, [ + feePayer, + helloAccount, + ]); + + console.log(`Transaction succeeded. TxHash: ${txHash}`); +})(); +``` diff --git a/content/cookbook/programs/create-pda.mdx b/content/cookbook/programs/create-pda.mdx new file mode 100644 index 000000000..3a6a04753 --- /dev/null +++ b/content/cookbook/programs/create-pda.mdx @@ -0,0 +1,180 @@ +--- +title: How to create a Program Derived Address +description: + "Learn how to create a Program Derived Address (PDA) in a Solana program." +--- + +A Program Derived Address is simply an account owned by the program, but has +no +private key. Instead it's signature is obtained by a set of seeds and a bump +(a +nonce which makes sure it's 'off curve', i.e. not a valid public key). +"**Generating**" a Program Address is +different from "**creating**" it. One can generate a PDA using +`Pubkey::find_program_address`. Creating a PDA essentially means to initialize +the address with space and set the state to it. A normal Keypair account can be +created outside of our program and then fed to initialize it's state. +Unfortunately, for PDAs, it has to be created on chain, due to the nature of not +being able to sign on behalf of itself. Hence we use `invoke_signed` to pass the +seeds of the PDA, along with the funding account's signature which results in +account creation of a PDA. + +```rust title="create-pda.rs" +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + program::invoke_signed, + program_error::ProgramError, + pubkey::Pubkey, + rent::Rent, + system_instruction, + sysvar::Sysvar, +}; + +entrypoint!(process_instruction); + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct HelloState { + is_initialized: bool, +} + +// Accounts required +/// 1. [signer, writable] Funding account +/// 2. [writable] PDA account +/// 3. [] System Program +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + const ACCOUNT_DATA_LEN: usize = 1; + + let accounts_iter = &mut accounts.iter(); + // Getting required accounts + let funding_account = next_account_info(accounts_iter)?; + let pda_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + // Getting PDA Bump from instruction data + let (pda_bump, _) = instruction_data + .split_first() + .ok_or(ProgramError::InvalidInstructionData)?; + + // Checking if passed PDA and expected PDA are equal + let signers_seeds: &[&[u8]; 3] = &[ + b"customaddress", + &funding_account.key.to_bytes(), + &[*pda_bump], + ]; + let pda = Pubkey::create_program_address(signers_seeds, program_id)?; + + if pda.ne(&pda_account.key) { + return Err(ProgramError::InvalidAccountData); + } + + // Assessing required lamports and creating transaction instruction + let lamports_required = Rent::get()?.minimum_balance(ACCOUNT_DATA_LEN); + let create_pda_account_ix = system_instruction::create_account( + &funding_account.key, + &pda_account.key, + lamports_required, + ACCOUNT_DATA_LEN.try_into().unwrap(), + &program_id, + ); + // Invoking the instruction but with PDAs as additional signer + invoke_signed( + &create_pda_account_ix, + &[ + funding_account.clone(), + pda_account.clone(), + system_program.clone(), + ], + &[signers_seeds], + )?; + + // Setting state for PDA + let mut pda_account_state = HelloState::try_from_slice(&pda_account.data.borrow())?; + pda_account_state.is_initialized = true; + pda_account_state.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; + + Ok(()) +} +``` + +One can send the required accounts via client as follows + +```typescript title="create-pda-client.ts" +import { + clusterApiUrl, + Connection, + Keypair, + LAMPORTS_PER_SOL, + PublicKey, + sendAndConfirmTransaction, + SystemProgram, + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; + +const PAYER_KEYPAIR = Keypair.generate(); + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const latestBlockHash = await connection.getLatestBlockhash(); + const programId = new PublicKey( + "6eW5nnSosr2LpkUGCdznsjRGDhVb26tLmiM1P8RV1QQp", + ); + + // Airdrop to Payer + await connection.confirmTransaction( + { + blockhash: latestBlockHash.blockhash, + lastValidBlockHeight: latestBlockHash.lastValidBlockHeight, + signature: await connection.requestAirdrop( + PAYER_KEYPAIR.publicKey, + LAMPORTS_PER_SOL, + ), + }, + "confirmed", + ); + + const [pda, bump] = await PublicKey.findProgramAddress( + [Buffer.from("customaddress"), PAYER_KEYPAIR.publicKey.toBuffer()], + programId, + ); + + console.log(`PDA Pubkey: ${pda.toString()}`); + + const createPDAIx = new TransactionInstruction({ + programId: programId, + data: Buffer.from(Uint8Array.of(bump)), + keys: [ + { + isSigner: true, + isWritable: true, + pubkey: PAYER_KEYPAIR.publicKey, + }, + { + isSigner: false, + isWritable: true, + pubkey: pda, + }, + { + isSigner: false, + isWritable: false, + pubkey: SystemProgram.programId, + }, + ], + }); + + const transaction = new Transaction(); + transaction.add(createPDAIx); + + const txHash = await sendAndConfirmTransaction(connection, transaction, [ + PAYER_KEYPAIR, + ]); + console.log(`Created PDA successfully. Tx Hash: ${txHash}`); +})(); +``` diff --git a/content/cookbook/programs/cross-program-invocation.mdx b/content/cookbook/programs/cross-program-invocation.mdx new file mode 100644 index 000000000..5ac9934dd --- /dev/null +++ b/content/cookbook/programs/cross-program-invocation.mdx @@ -0,0 +1,478 @@ +--- +title: How to do Cross Program Invocation +description: "Learn how to do Cross Program Invocation in Solana programs." +--- + +A cross program invocation, is simply put calling another program's instruction +inside our program. One best example to put forth is Uniswap's `swap` +functionality. The `UniswapV2Router` contract, calls the necessary logic to +swap, and calls the `ERC20` contract's transfer function to swap from one person +to another. The same way, we can call a program's instruction to have multitude +of purposes. + +Lets have a look at our first example which is the +`SPL Token Program's transfer` instruction. The required accounts we would need +for a transfer to happen are + +1. The Source Token Account (The account which we are holding our tokens) +2. The Destination Token Account (The account which we would be transferring our + tokens to) +3. The Source Token Account's Holder (Our wallet address which we would be + signing for) + +```rust title="cpi-transfer.rs" +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + msg, + program::invoke, + program_error::ProgramError, + pubkey::Pubkey, +}; +use spl_token::instruction::transfer; + +entrypoint!(process_instruction); + +// Accounts required +/// 1. [writable] Source Token Account +/// 2. [writable] Destination Token Account +/// 3. [signer] Source Token Account holder's PubKey +/// 4. [] Token Program +pub fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + // Accounts required for token transfer + + // 1. Token account we hold + let source_token_account = next_account_info(accounts_iter)?; + // 2. Token account to send to + let destination_token_account = next_account_info(accounts_iter)?; + // 3. Our wallet address + let source_token_account_holder = next_account_info(accounts_iter)?; + // 4. Token Program + let token_program = next_account_info(accounts_iter)?; + + // Parsing the token transfer amount from instruction data + // a. Getting the 0th to 8th index of the u8 byte array + // b. Converting the obtained non zero u8 to a proper u8 (as little endian integers) + // c. Converting the little endian integers to a u64 number + let token_transfer_amount = instruction_data + .get(..8) + .and_then(|slice| slice.try_into().ok()) + .map(u64::from_le_bytes) + .ok_or(ProgramError::InvalidAccountData)?; + + msg!( + "Transferring {} tokens from {} to {}", + token_transfer_amount, + source_token_account.key.to_string(), + destination_token_account.key.to_string() + ); + + // Creating a new TransactionInstruction + /* + Internal representation of the instruction's return value (Result) + + Ok(Instruction { + program_id: *token_program_id, // PASSED FROM USER + accounts, + data, + }) + */ + + let transfer_tokens_instruction = transfer( + &token_program.key, + &source_token_account.key, + &destination_token_account.key, + &source_token_account_holder.key, + &[&source_token_account_holder.key], + token_transfer_amount, + )?; + + let required_accounts_for_transfer = [ + source_token_account.clone(), + destination_token_account.clone(), + source_token_account_holder.clone(), + ]; + + // Passing the TransactionInstruction to send + invoke( + &transfer_tokens_instruction, + &required_accounts_for_transfer, + )?; + + msg!("Transfer successful"); + + Ok(()) +} +``` + +The corresponding client instruction would be as follows. For knowing the mint +and token creation instructions, please refer to the full code nearby. + +```typescript title="cpi-transfer-client.ts" +import { + clusterApiUrl, + Connection, + Keypair, + LAMPORTS_PER_SOL, + PublicKey, + SystemProgram, + sendAndConfirmTransaction + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; +import { + AccountLayout, + MintLayout, + Token, + TOKEN_PROGRAM_ID, + u64, +} from "@solana/spl-token"; + +import * as BN from "bn.js"; + +// Users +const PAYER_KEYPAIR = Keypair.generate(); +const RECEIVER_KEYPAIR = Keypair.generate().publicKey; + +// Mint and token accounts +const TOKEN_MINT_ACCOUNT = Keypair.generate(); +const SOURCE_TOKEN_ACCOUNT = Keypair.generate(); +const DESTINATION_TOKEN_ACCOUNT = Keypair.generate(); + +// Numbers +const DEFAULT_DECIMALS_COUNT = 9; +const TOKEN_TRANSFER_AMOUNT = 50 * 10 ** DEFAULT_DECIMALS_COUNT; +const TOKEN_TRANSFER_AMOUNT_BUFFER = Buffer.from( + Uint8Array.of(...new BN(TOKEN_TRANSFER_AMOUNT).toArray("le", 8)) +); + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const programId = new PublicKey( + "EfYK91eN3AqTwY1C34W6a33qGAtQ8HJYVhNv7cV4uMZj" + ); + + const mintDataSpace = MintLayout.span; + const mintRentRequired = await connection.getMinimumBalanceForRentExemption( + mintDataSpace + ); + + const tokenDataSpace = AccountLayout.span; + const tokenRentRequired = await connection.getMinimumBalanceForRentExemption( + tokenDataSpace + ); + + // Airdropping 1 SOL + const feePayer = Keypair.generate(); + await connection.confirmTransaction({ + blockhash: latestBlockHash.blockhash, + lastValidBlockHeight: latestBlockHash.lastValidBlockHeight, + signature: await connection.requestAirdrop(feePayer.publicKey, LAMPORTS_PER_SOL), + }, + 'confirmed', + ); + + + // Allocating space and rent for mint account + const createMintAccountIx = SystemProgram.createAccount({ + fromPubkey: PAYER_KEYPAIR.publicKey, + lamports: mintRentRequired, + newAccountPubkey: TOKEN_MINT_ACCOUNT.publicKey, + programId: TOKEN_PROGRAM_ID, + space: mintDataSpace, + }); + + // Initializing mint with decimals and authority + const initializeMintIx = Token.createInitMintInstruction( + TOKEN_PROGRAM_ID, + TOKEN_MINT_ACCOUNT.publicKey, + DEFAULT_DECIMALS_COUNT, + PAYER_KEYPAIR.publicKey, // mintAuthority + PAYER_KEYPAIR.publicKey // freezeAuthority + ); + + // Allocating space and rent for source token account + const createSourceTokenAccountIx = SystemProgram.createAccount({ + fromPubkey: PAYER_KEYPAIR.publicKey, + newAccountPubkey: SOURCE_TOKEN_ACCOUNT.publicKey, + lamports: tokenRentRequired, + programId: TOKEN_PROGRAM_ID, + space: tokenDataSpace, + }); + + // Initializing token account with mint and owner + const initializeSourceTokenAccountIx = Token.createInitAccountInstruction( + TOKEN_PROGRAM_ID, + TOKEN_MINT_ACCOUNT.publicKey, + SOURCE_TOKEN_ACCOUNT.publicKey, + PAYER_KEYPAIR.publicKey + ); + + // Minting tokens to the source token account for transferring later to destination account + const mintTokensIx = Token.createMintToInstruction( + TOKEN_PROGRAM_ID, + TOKEN_MINT_ACCOUNT.publicKey, + SOURCE_TOKEN_ACCOUNT.publicKey, + PAYER_KEYPAIR.publicKey, + [PAYER_KEYPAIR], + TOKEN_TRANSFER_AMOUNT + ); + + // Allocating space and rent for destination token account + const createDestinationTokenAccountIx = SystemProgram.createAccount({ + fromPubkey: PAYER_KEYPAIR.publicKey, + newAccountPubkey: DESTINATION_TOKEN_ACCOUNT.publicKey, + lamports: tokenRentRequired, + programId: TOKEN_PROGRAM_ID, + space: tokenDataSpace, + }); + + // Initializing token account with mint and owner + const initializeDestinationTokenAccountIx = + Token.createInitAccountInstruction( + TOKEN_PROGRAM_ID, + TOKEN_MINT_ACCOUNT.publicKey, + DESTINATION_TOKEN_ACCOUNT.publicKey, + RECEIVER_KEYPAIR + ); + + // Our program's CPI instruction (transfer) + const transferTokensIx = new TransactionInstruction({ + programId: programId, + data: TOKEN_TRANSFER_AMOUNT_BUFFER, + keys: [ + { + isSigner: false, + isWritable: true, + pubkey: SOURCE_TOKEN_ACCOUNT.publicKey, + }, + { + isSigner: false, + isWritable: true, + pubkey: DESTINATION_TOKEN_ACCOUNT.publicKey, + }, + { + isSigner: true, + isWritable: true, + pubkey: PAYER_KEYPAIR.publicKey, + }, + { + isSigner: false, + isWritable: false, + pubkey: TOKEN_PROGRAM_ID, + }, + ], + }); + + const transaction = new Transaction(); + // Adding up all the above instructions + transaction.add( + createMintAccountIx, + initializeMintIx, + createSourceTokenAccountIx, + initializeSourceTokenAccountIx, + mintTokensIx, + createDestinationTokenAccountIx, + initializeDestinationTokenAccountIx, + transferTokensIx + ); + + const txHash = await sendAndConfirmTransaction(connection, transaction, [ + PAYER_KEYPAIR, + TOKEN_MINT_ACCOUNT, + SOURCE_TOKEN_ACCOUNT, + DESTINATION_TOKEN_ACCOUNT, + ]); + + console.log(`Token transfer CPI success: ${txHash}`); +})(); +``` + +Now let's take a look at another example, which is +`System Program's create_account` instruction. There is a slight difference +between the above mentioned instruction and this. There, we never had to pass +the `token_program` as one of the accounts inside the `invoke` function. +However, there are exceptions where you are required to pass the invoking +instruction's `program_id`. In our case it would be the `System Program's` +program_id. ("11111111111111111111111111111111"). So now the required accounts +would be + +1. The payer account who funds the rent +2. The account which is going to be created +3. System Program account + +```rust title="cpi-create-account.rs" +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + msg, + program::invoke, + program_error::ProgramError, + pubkey::Pubkey, + rent::Rent, + system_instruction::create_account, + sysvar::Sysvar, +}; + +entrypoint!(process_instruction); + +// Accounts required +/// 1. [signer, writable] Payer Account +/// 2. [signer, writable] General State Account +/// 3. [] System Program +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + // Accounts required for token transfer + + // 1. Payer account for the state account creation + let payer_account = next_account_info(accounts_iter)?; + // 2. Token account we hold + let general_state_account = next_account_info(accounts_iter)?; + // 3. System Program + let system_program = next_account_info(accounts_iter)?; + + msg!( + "Creating account for {}", + general_state_account.key.to_string() + ); + + // Parsing the token transfer amount from instruction data + // a. Getting the 0th to 8th index of the u8 byte array + // b. Converting the obtained non zero u8 to a proper u8 (as little endian integers) + // c. Converting the little endian integers to a u64 number + let account_span = instruction_data + .get(..8) + .and_then(|slice| slice.try_into().ok()) + .map(u64::from_le_bytes) + .ok_or(ProgramError::InvalidAccountData)?; + + let lamports_required = (Rent::get()?).minimum_balance(account_span as usize); + + // Creating a new TransactionInstruction + /* + Internal representation of the instruction's return value (Instruction) + + Instruction::new_with_bincode( + system_program::id(), // NOT PASSED FROM USER + &SystemInstruction::CreateAccount { + lamports, + space, + owner: *owner, + }, + account_metas, + ) + */ + + let create_account_instruction = create_account( + &payer_account.key, + &general_state_account.key, + lamports_required, + account_span, + program_id, + ); + + let required_accounts_for_create = [ + payer_account.clone(), + general_state_account.clone(), + system_program.clone(), + ]; + + // Passing the TransactionInstruction to send (with the issued program_id) + invoke(&create_account_instruction, &required_accounts_for_create)?; + + msg!("Transfer successful"); + + Ok(()) +} +``` + +The respective client side code will look as follows + +```typescript title="cpi-create-account-client.ts" +import { clusterApiUrl, Connection, Keypair } from "@solana/web3.js"; +import { LAMPORTS_PER_SOL, PublicKey, SystemProgram } from "@solana/web3.js"; +import { + Transaction, + TransactionInstruction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +import * as BN from "bn.js"; + +// Users +const PAYER_KEYPAIR = Keypair.generate(); +const GENERAL_STATE_KEYPAIR = Keypair.generate(); + +const ACCOUNT_SPACE_BUFFER = Buffer.from( + Uint8Array.of(...new BN(100).toArray("le", 8)), +); + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const latestBlockHash = await connection.getLatestBlockhash(); + const programId = new PublicKey( + "DkuQ5wsndkzXfgqDB6Lgf4sDjBi4gkLSak1dM5Mn2RuQ", + ); + + // Airdropping 1 SOL + const feePayer = Keypair.generate(); + await connection.confirmTransaction( + { + blockhash: latestBlockHash.blockhash, + lastValidBlockHeight: latestBlockHash.lastValidBlockHeight, + signature: await connection.requestAirdrop( + feePayer.publicKey, + LAMPORTS_PER_SOL, + ), + }, + "confirmed", + ); + + // Our program's CPI instruction (create_account) + const createAccountIx = new TransactionInstruction({ + programId: programId, + data: ACCOUNT_SPACE_BUFFER, + keys: [ + { + isSigner: true, + isWritable: true, + pubkey: PAYER_KEYPAIR.publicKey, + }, + { + isSigner: true, + isWritable: true, + pubkey: GENERAL_STATE_KEYPAIR.publicKey, + }, + { + isSigner: false, + isWritable: false, + pubkey: SystemProgram.programId, + }, + ], + }); + + const transaction = new Transaction(); + // Adding up all the above instructions + transaction.add(createAccountIx); + + const txHash = await sendAndConfirmTransaction(connection, transaction, [ + PAYER_KEYPAIR, + GENERAL_STATE_KEYPAIR, + ]); + + console.log(`Create Account CPI Success: ${txHash}`); +})(); +``` diff --git a/content/cookbook/programs/meta.json b/content/cookbook/programs/meta.json new file mode 100644 index 000000000..216bd9cb2 --- /dev/null +++ b/content/cookbook/programs/meta.json @@ -0,0 +1,14 @@ +{ + "title": "Writing Programs", + "pages": [ + "transfer-sol", + "clock", + "change-account-size", + "cross-program-invocation", + "create-pda", + "read-accounts", + "read-multiple-instructions", + "verify-accounts" + ], + "defaultOpen": true +} diff --git a/content/cookbook/programs/read-accounts.mdx b/content/cookbook/programs/read-accounts.mdx new file mode 100644 index 000000000..7817f3d55 --- /dev/null +++ b/content/cookbook/programs/read-accounts.mdx @@ -0,0 +1,54 @@ +--- +title: How to read accounts in a program +description: "Learn how to read accounts in a Solana program." +--- + +Almost all instructions in Solana require at least 2 - 3 accounts, and they +would be mentioned over the instruction handlers on what order it's expecting +those set of accounts. It's fairly simple if we take advantage of the `iter()` +method in Rust, instead of manually indicing the accounts. The +`next_account_info` method basically slices the first index of the iterable and +returning the account present inside the accounts array. Let's see a simple +instruction which expects a bunch of accounts and requiring to parse each of +them. + +```rust title="read-accounts.rs" +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + pubkey::Pubkey, +}; + +entrypoint!(process_instruction); + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct HelloState { + is_initialized: bool, +} + +// Accounts required +/// 1. [signer] Payer +/// 2. [writable] Hello state account +/// 3. [] Rent account +/// 4. [] System Program +pub fn process_instruction( + _program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + // Fetching all the accounts as a iterator (facilitating for loops and iterations) + let accounts_iter = &mut accounts.iter(); + // Payer account + let payer_account = next_account_info(accounts_iter)?; + // Hello state account + let hello_state_account = next_account_info(accounts_iter)?; + // Rent account + let rent_account = next_account_info(accounts_iter)?; + // System Program + let system_program = next_account_info(accounts_iter)?; + + Ok(()) +} +``` diff --git a/content/cookbook/programs/read-multiple-instructions.mdx b/content/cookbook/programs/read-multiple-instructions.mdx new file mode 100644 index 000000000..3b2c1e2be --- /dev/null +++ b/content/cookbook/programs/read-multiple-instructions.mdx @@ -0,0 +1,76 @@ +--- +title: Reading Multiple Instructions +description: + "Learn how to read multiple instructions from a transaction in a Solana + program." +--- + +Solana allows us to take a peek at all of the instructions in the current +transaction. We can store them in a variable and iterate over them. We can do +many things with this, like checking for suspicious transactions. + +```rust title="read-multiple-instructions.rs" +use anchor_lang::{ + prelude::*, + solana_program::{ + sysvar, + serialize_utils::{read_pubkey,read_u16} + } +}; + +declare_id!("8DJXJRV8DBFjJDYyU9cTHBVK1F1CTCi6JUBDVfyBxqsT"); + +#[program] +pub mod cookbook { + use super::*; + + pub fn read_multiple_instruction<'info>(ctx: Context, creator_bump: u8) -> Result<()> { + let instruction_sysvar_account = &ctx.accounts.instruction_sysvar_account; + + let instruction_sysvar_account_info = instruction_sysvar_account.to_account_info(); + + let id = "8DJXJRV8DBFjJDYyU9cTHBVK1F1CTCi6JUBDVfyBxqsT"; + + let instruction_sysvar = instruction_sysvar_account_info.data.borrow(); + + let mut idx = 0; + + let num_instructions = read_u16(&mut idx, &instruction_sysvar) + .map_err(|_| MyError::NoInstructionFound)?; + + for index in 0..num_instructions { + let mut current = 2 + (index * 2) as usize; + let start = read_u16(&mut current, &instruction_sysvar).unwrap(); + + current = start as usize; + let num_accounts = read_u16(&mut current, &instruction_sysvar).unwrap(); + current += (num_accounts as usize) * (1 + 32); + let program_id = read_pubkey(&mut current, &instruction_sysvar).unwrap(); + + if program_id != id + { + msg!("Transaction had ix with program id {}", program_id); + return Err(MyError::SuspiciousTransaction.into()); + } + } + + Ok(()) + } + +} + +#[derive(Accounts)] +#[instruction(creator_bump:u8)] +pub struct ReadMultipleInstruction<'info> { + #[account(address = sysvar::instructions::id())] + instruction_sysvar_account: UncheckedAccount<'info> +} + +#[error_code] +pub enum MyError { + #[msg("No instructions found")] + NoInstructionFound, + #[msg("Suspicious transaction detected")] + SuspiciousTransaction +} +``` diff --git a/content/cookbook/programs/transfer-sol.mdx b/content/cookbook/programs/transfer-sol.mdx new file mode 100644 index 000000000..a9f4ec742 --- /dev/null +++ b/content/cookbook/programs/transfer-sol.mdx @@ -0,0 +1,47 @@ +--- +title: How to Transfer SOL in a Solana Program +description: "Learn how to transfer SOL in a Solana program." +--- + +Your Solana Program can transfer lamports from one account to another without +'invoking' the System program. The fundamental rule is that your program can +transfer lamports from any account **owned** by your program to any account at +all. + +The recipient account _does not have to be_ an account owned by your program. + +```rust title="transfer-sol.rs" +/// Transfers lamports from one account (must be program owned) +/// to another account. The recipient can by any account +fn transfer_service_fee_lamports( + from_account: &AccountInfo, + to_account: &AccountInfo, + amount_of_lamports: u64, +) -> ProgramResult { + // Does the from account have enough lamports to transfer? + if **from_account.try_borrow_lamports()? < amount_of_lamports { + return Err(CustomError::InsufficientFundsForTransaction.into()); + } + // Debit from_account and credit to_account + **from_account.try_borrow_mut_lamports()? -= amount_of_lamports; + **to_account.try_borrow_mut_lamports()? += amount_of_lamports; + Ok(()) +} + +/// Primary function handler associated with instruction sent +/// to your program +fn instruction_handler(accounts: &[AccountInfo]) -> ProgramResult { + // Get the 'from' and 'to' accounts + let account_info_iter = &mut accounts.iter(); + let from_account = next_account_info(account_info_iter)?; + let to_service_account = next_account_info(account_info_iter)?; + + // Extract a service 'fee' of 5 lamports for performing this instruction + transfer_service_fee_lamports(from_account, to_service_account, 5u64)?; + + // Perform the primary instruction + // ... etc. + + Ok(()) +} +``` diff --git a/content/cookbook/programs/verify-accounts.mdx b/content/cookbook/programs/verify-accounts.mdx new file mode 100644 index 000000000..8c58c07a6 --- /dev/null +++ b/content/cookbook/programs/verify-accounts.mdx @@ -0,0 +1,99 @@ +--- +title: How to verify accounts in a Solana program +description: "Learn how to verify accounts in a Solana program." +--- + +Since programs in Solana are stateless, we as a program creator have to make +sure the accounts passed are validated as much as possible to avoid any +malicious account entry. The basic checks one can do are + +1. Check if the expected signer account has actually signed +2. Check if the expected state account's have been checked as writable +3. Check if the expected state account's owner is the called program id +4. If initializing the state for the first time, check if the account's already + been initialized or not. +5. Check if any cross program ids passed (whenever needed) are as expected. + +A basic instruction which initializes a hero state account, but with the above +mentioned checks is defined below + +```rust title="verify-accounts.rs" +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + clock::Clock, + entrypoint, + entrypoint::ProgramResult, + msg, + program_error::ProgramError, + pubkey::Pubkey, + rent::Rent, + system_program::ID as SYSTEM_PROGRAM_ID, + sysvar::Sysvar, +}; + +entrypoint!(process_instruction); + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct HelloState { + is_initialized: bool, +} + +// Accounts required +/// 1. [signer] Payer +/// 2. [writable] Hello state account +/// 3. [] System Program +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + // Payer account + let payer_account = next_account_info(accounts_iter)?; + // Hello state account + let hello_state_account = next_account_info(accounts_iter)?; + // System Program + let system_program = next_account_info(accounts_iter)?; + + let rent = Rent::get()?; + + // Checking if payer account is the signer + if !payer_account.is_signer { + return Err(ProgramError::MissingRequiredSignature); + } + + // Checking if hello state account is rent exempt + if !rent.is_exempt(hello_state_account.lamports(), 1) { + return Err(ProgramError::AccountNotRentExempt); + } + + // Checking if hello state account is writable + if !hello_state_account.is_writable { + return Err(ProgramError::InvalidAccountData); + } + + // Checking if hello state account's owner is the current program + if hello_state_account.owner.ne(&program_id) { + return Err(ProgramError::IllegalOwner); + } + + // Checking if the system program is valid + if system_program.key.ne(&SYSTEM_PROGRAM_ID) { + return Err(ProgramError::IncorrectProgramId); + } + + let mut hello_state = HelloState::try_from_slice(&hello_state_account.data.borrow())?; + + // Checking if the state has already been initialized + if hello_state.is_initialized { + return Err(ProgramError::AccountAlreadyInitialized); + } + + hello_state.is_initialized = true; + hello_state.serialize(&mut &mut hello_state_account.data.borrow_mut()[..])?; + msg!("Account initialized :)"); + + Ok(()) +} +``` diff --git a/content/cookbook/tokens/approve-token-delegate.mdx b/content/cookbook/tokens/approve-token-delegate.mdx new file mode 100644 index 000000000..8763abb07 --- /dev/null +++ b/content/cookbook/tokens/approve-token-delegate.mdx @@ -0,0 +1,91 @@ +--- +title: How to Delegate Token Accounts +description: "Learn how to delegate authority of tokens to someone else" +--- + +You can set a delegate with an allowed amount. After you setting, the delegate +is like an another owner of your token account. + + + A token account can only delegate to one account at a time. + + +```typescript title="token-approve.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + approveChecked, + createApproveCheckedInstruction, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const randomGuy = Keypair.generate(); + + const mintPubkey = new PublicKey( + "8mAKLjGGmjKTnmcXeyr3pr7iX13xXVjJJiL6RujDbSPV", + ); + const tokenAccountPubkey = new PublicKey( + "GMxZfDmpR1b3vdJYXHzdF5noVLQogZuUAsDHHQ3ytPfV", + ); + + // 1) use build-in function + { + let txhash = await approveChecked( + connection, // connection + feePayer, // fee payer + mintPubkey, // mint + tokenAccountPubkey, // token account + randomGuy.publicKey, // delegate + alice, // owner of token account + 1e8, // amount, if your decimals is 8, 10^8 for 1 token + 8, // decimals + ); + console.log(`txhash: ${txhash}`); + } + // or + + // 2) compose by yourself + { + let tx = new Transaction().add( + createApproveCheckedInstruction( + tokenAccountPubkey, // token account + mintPubkey, // mint + randomGuy.publicKey, // delegate + alice.publicKey, // owner of token account + 1e8, // amount, if your decimals is 8, 10^8 for 1 token + 8, // decimals + ), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + alice /* fee payer + owner */, + ])}`, + ); + } +})(); +``` diff --git a/content/cookbook/tokens/burn-tokens.mdx b/content/cookbook/tokens/burn-tokens.mdx new file mode 100644 index 000000000..d070cfad1 --- /dev/null +++ b/content/cookbook/tokens/burn-tokens.mdx @@ -0,0 +1,81 @@ +--- +title: How to Burn Tokens +description: "Learn how to burn Tokens on Solana" +--- + +You can burn tokens if you are the token account authority. + +```typescript title="burn-token.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { burnChecked, createBurnCheckedInstruction } from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const mintPubkey = new PublicKey( + "8mAKLjGGmjKTnmcXeyr3pr7iX13xXVjJJiL6RujDbSPV", + ); + + const tokenAccountPubkey = new PublicKey( + "2XYiFjmU1pCXmC2QfEAghk6S7UADseupkNQdnRBXszD5", + ); + + // 1) use build-in function + { + let txhash = await burnChecked( + connection, // connection + feePayer, // payer + tokenAccountPubkey, // token account + mintPubkey, // mint + alice, // owner + 1e8, // amount, if your decimals is 8, 10^8 for 1 token + 8, + ); + console.log(`txhash: ${txhash}`); + } + + // or + + // 2) compose by yourself + { + let tx = new Transaction().add( + createBurnCheckedInstruction( + tokenAccountPubkey, // token account + mintPubkey, // mint + alice.publicKey, // owner of token account + 1e8, // amount, if your decimals is 8, 10^8 for 1 token + 8, // decimals + ), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + alice /* fee payer + token authority */, + ])}`, + ); + } +})(); +``` diff --git a/content/cookbook/tokens/close-token-accounts.mdx b/content/cookbook/tokens/close-token-accounts.mdx new file mode 100644 index 000000000..2cd050e0c --- /dev/null +++ b/content/cookbook/tokens/close-token-accounts.mdx @@ -0,0 +1,79 @@ +--- +title: How to Close Token Accounts +description: + "Learn how to close token accounts on Solana, including cases such as Wrapped + SOL and empty accounts." +--- + +You can close a token account if you don't want to use it anymore. There are two +situations: + +1. Wrapped SOL - Closing converts Wrapped SOL to SOL +2. Other Tokens - You can close it only if token account's balance is 0. + +```typescript title="close-token-account.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { closeAccount, createCloseAccountInstruction } from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const tokenAccountPubkey = new PublicKey( + "2XYiFjmU1pCXmC2QfEAghk6S7UADseupkNQdnRBXszD5", + ); + + // 1) use build-in function + { + let txhash = await closeAccount( + connection, // connection + feePayer, // payer + tokenAccountPubkey, // token account which you want to close + alice.publicKey, // destination + alice, // owner of token account + ); + console.log(`txhash: ${txhash}`); + } + + // or + + // 2) compose by yourself + { + let tx = new Transaction().add( + createCloseAccountInstruction( + tokenAccountPubkey, // token account which you want to close + alice.publicKey, // destination + alice.publicKey, // owner of token account + ), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + alice /* fee payer + owner */, + ])}`, + ); + } +})(); +``` diff --git a/content/cookbook/tokens/create-mint-account.mdx b/content/cookbook/tokens/create-mint-account.mdx new file mode 100644 index 000000000..ffb7d5d70 --- /dev/null +++ b/content/cookbook/tokens/create-mint-account.mdx @@ -0,0 +1,91 @@ +--- +title: How to Create a Token +description: "Learn how to create tokens on Solana." +altRoutes: + - /developers/cookbook/tokens +--- + +Creating tokens is done by creating what is called a "mint account". This mint +account is later used to mint tokens to a user's token account. + +```typescript title="create-mint-account.ts" +import { + clusterApiUrl, + Connection, + Keypair, + sendAndConfirmTransaction, + SystemProgram, + Transaction, +} from "@solana/web3.js"; +import { + createInitializeMintInstruction, + TOKEN_PROGRAM_ID, + MINT_SIZE, + getMinimumBalanceForRentExemptMint, + createMint, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const recentBlockhash = await connection.getLatestBlockhash(); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + // 1) use build-in function + let mintPubkey = await createMint( + connection, // connection + feePayer, // fee payer + alice.publicKey, // mint authority + alice.publicKey, // freeze authority (you can use `null` to disable it. when you disable it, you can't turn it on again) + 8, // decimals + ); + console.log(`mint: ${mintPubkey.toBase58()}`); + + // or + + // 2) compose by yourself + const mint = Keypair.generate(); + console.log(`mint: ${mint.publicKey.toBase58()}`); + + const transaction = new Transaction().add( + // create mint account + SystemProgram.createAccount({ + fromPubkey: feePayer.publicKey, + newAccountPubkey: mint.publicKey, + space: MINT_SIZE, + lamports: await getMinimumBalanceForRentExemptMint(connection), + programId: TOKEN_PROGRAM_ID, + }), + // init mint account + createInitializeMintInstruction( + mint.publicKey, // mint pubkey + 8, // decimals + alice.publicKey, // mint authority + alice.publicKey, // freeze authority (you can use `null` to disable it. when you disable it, you can't turn it on again) + ), + ); + + // Send transaction + const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [feePayer, mint], // Signers + ); + + console.log(`txhash: ${transactionSignature}`); +})(); +``` diff --git a/content/cookbook/tokens/create-nft.mdx b/content/cookbook/tokens/create-nft.mdx new file mode 100644 index 000000000..8e01a87f0 --- /dev/null +++ b/content/cookbook/tokens/create-nft.mdx @@ -0,0 +1,183 @@ +--- +title: How to create an NFT +description: "Learn how to create an NFT on Solana, using Arweave and Metaplex." +--- + +To create an NFT you have to: + +1. Upload the image to IPFS like Arweave +2. Upload the JSON metadata to Arweave or similar storage service. +3. Call metaplex to create an account for the NFT + +### Upload to Arweave + +```typescript title="upload-to-arweave.ts" +import fs from "node:fs"; +import Arweave from "arweave"; + +(async () => { + const arweave = Arweave.init({ + host: "localhost", + port: 1984, + protocol: "http", + timeout: 20000, + logging: false, + }); + + const host = arweave.getConfig().api.host; + const port = arweave.getConfig().api.port; + const protocol = arweave.getConfig().api.protocol; + + // Upload image to Arweave + const data = fs.readFileSync("./code/nfts/upload-arweave/lowres-dog.png"); + + const transaction = await arweave.createTransaction({ + data: data, + }); + + transaction.addTag("Content-Type", "image/png"); + + // Instead of generating a new wallet, you can use an existing one from your file system + // useful in production environments + // const wallet = JSON.parse(fs.readFileSync("./code/nfts/upload-arweave/wallet.json", "utf-8")) + const wallet = await arweave.wallets.generate(); + const address = await arweave.wallets.getAddress(wallet); + console.log("address:, ", address); + + await arweave.api.get(`/mint/${encodeURI(addr)}/10000000000000000`); + await arweave.transactions.sign(transaction, wallet); + + const response = await arweave.transactions.post(transaction); + console.log(response); + + const id = transaction.id; + const imageUrl = id ? `${protocol}://${host}:${port}/${id}` : null; + console.log("imageUrl", imageUrl); + + // Upload metadata to Arweave + + const metadata = { + name: "Custom NFT #1", + symbol: "CNFT", + description: "A description about my custom NFT #1", + seller_fee_basis_points: 500, + external_url: "https://www.customnft.com/", + attributes: [ + { + trait_type: "NFT type", + value: "Custom", + }, + ], + collection: { + name: "Test Collection", + family: "Custom NFTs", + }, + properties: { + files: [ + { + uri: imageUrl, + type: "image/png", + }, + ], + category: "image", + maxSupply: 0, + creators: [ + { + address: "CBBUMHRmbVUck99mTCip5sHP16kzGj3QTYB8K3XxwmQx", + share: 100, + }, + ], + }, + image: imageUrl, + }; + + const metadataString = JSON.stringify(metadata); + + const metadataTransaction = await arweave.createTransaction({ + data: metadataString, + }); + + metadataTransaction.addTag("Content-Type", "application/json"); + + await arweave.transactions.sign(metadataTransaction, wallet); + + console.log("metadata txid", metadataTransaction.id); + + const txnResult = await arweave.transactions.post(metadataTransaction); + + console.log(txnResult); +})(); +``` + +### Mint the NFT + +```typescript title="mint-nft.ts" +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { + generateSigner, + percentAmount, + keypairIdentity, +} from "@metaplex-foundation/umi"; +import { clusterApiUrl } from "@solana/web3.js"; +import { + createNft, + fetchDigitalAsset, + mplTokenMetadata, +} from "@metaplex-foundation/mpl-token-metadata"; +import "dotenv/config"; + +(async () => { + try { + console.log("Loading keypair from environment..."); + const privateKey = JSON.parse(process.env.SOLANA_PRIVATE_KEY || "[]"); + if (privateKey.length === 0) { + throw new Error("SOLANA_PRIVATE_KEY is not set in .env file"); + } + + console.log("Creating Umi instance..."); + const umi = createUmi(clusterApiUrl("devnet")); + + const keypair = umi.eddsa.createKeypairFromSecretKey( + new Uint8Array(privateKey), + ); + + // Use keypairIdentity to set the keypair as the signer + const signer = keypairIdentity(keypair); + umi.use(signer); + umi.use(mplTokenMetadata()); + + console.log("Keypair loaded. Public key:", keypair.publicKey); + + console.log("Generating new mint address..."); + const mint = generateSigner(umi); + + console.log("Creating NFT..."); + const { signature } = await createNft(umi, { + mint, + name: "My NFT", + // Replace this with your Arweave metadata URI + uri: "https://ffaaqinzhkt4ukhbohixfliubnvpjgyedi3f2iccrq4efh3s.arweave.net/KUAIIbk6p8oo4XHRcq0U__C2r0mwQaNl0gQow4Qp9yk", + maxSupply: 1, + sellerFeeBasisPoints: percentAmount(0), + creators: [ + { + address: keypair.publicKey, + share: 100, + verified: true, + }, + ], + }).sendAndConfirm(umi); + + console.log("NFT created successfully!"); + console.log("Mint address:", mint.publicKey); + console.log("Transaction signature:", signature); + + console.log("Fetching digital asset..."); + const asset = await fetchDigitalAsset(umi, mint.publicKey); + console.log("Digital Asset:", asset); + } catch (error) { + console.error("Error:", error); + console.error("Stack trace:", error.stack); + } +})(); +``` diff --git a/content/cookbook/tokens/create-token-account.mdx b/content/cookbook/tokens/create-token-account.mdx new file mode 100644 index 000000000..041148ccb --- /dev/null +++ b/content/cookbook/tokens/create-token-account.mdx @@ -0,0 +1,99 @@ +--- +title: How to Create a Token Account +description: + "Learn to create Solana token accounts, which hold tokens for users." +--- + +A token account is required for a user to hold tokens. + +A user will have at least one token account for every type of token they own. + +Associated Token Accounts are deterministically created accounts for every +keypair. ATAs are the recommended method of managing token accounts. + +```typescript title="ata.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + createAssociatedTokenAccount, + getAssociatedTokenAddress, + createAssociatedTokenAccountInstruction, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const mintPubkey = new PublicKey( + "2SKpuBU9ksneBZD4nqbZkw75NE11HsSHsGRtW2BZh5aQ", + ); + + // 1) use build-in function + { + let ata = await createAssociatedTokenAccount( + connection, // connection + feePayer, // fee payer + mintPubkey, // mint + alice.publicKey, // owner, + ); + console.log(`ATA: ${ata.toBase58()}`); + } + + // or + + // 2) composed by yourself + { + // calculate ATA + let ata = await getAssociatedTokenAddress( + mintPubkey, // mint + alice.publicKey, // owner + ); + console.log(`ATA: ${ata.toBase58()}`); + + // if your wallet is off-curve, you should use + // let ata = await getAssociatedTokenAddress( + // mintPubkey, // mint + // alice.publicKey // owner + // true, // allowOwnerOffCurve + // ); + + let transaction = new Transaction().add( + createAssociatedTokenAccountInstruction( + feePayer.publicKey, // payer + ata, // ata + alice.publicKey, // owner + mintPubkey, // mint + ), + ); + + const signature = await sendAndConfirmTransaction( + connection, + transaction, + [feePayer], // Signers + ); + + console.log(`txhash: ${await signature}`); + } +})(); +``` diff --git a/content/cookbook/tokens/fetch-all-nfts.mdx b/content/cookbook/tokens/fetch-all-nfts.mdx new file mode 100644 index 000000000..34bf9d545 --- /dev/null +++ b/content/cookbook/tokens/fetch-all-nfts.mdx @@ -0,0 +1,49 @@ +--- +title: How to get all NFTs from a wallet? +description: + "Learn how to fetch all non-fungible tokens (NFTs) from a wallet on Solana." +--- + +```typescript title="get-nfts-by-wallet.ts" +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { publicKey } from "@metaplex-foundation/umi"; +import { fetchAllDigitalAssetWithTokenByOwner } from "@metaplex-foundation/mpl-token-metadata"; +import { clusterApiUrl } from "@solana/web3.js"; + +BigInt.prototype.toJSON = function () { + return this.toString(); +}; + +(async () => { + try { + // Create a UMI instance + const umi = createUmi(clusterApiUrl("devnet")); + + // The owner's public key + const ownerPublicKey = publicKey( + "2R4bHmSBHkHAskerTHE6GE1Fxbn31kaD5gHqpsPySVd7", + ); + + console.log("Fetching NFTs..."); + const allNFTs = await fetchAllDigitalAssetWithTokenByOwner( + umi, + ownerPublicKey, + ); + + console.log(`Found ${allNFTs.length} NFTs for the owner:`); + allNFTs.forEach((nft, index) => { + console.log(`\nNFT #${index + 1}:`); + console.log("Mint Address:", nft.publicKey); + console.log("Name:", nft.metadata.name); + console.log("Symbol:", nft.metadata.symbol); + console.log("URI:", nft.metadata.uri); + }); + + // If you need the full NFT data + console.log("\nFull NFT data:"); + console.log(JSON.stringify(allNFTs, null, 2)); + } catch (error) { + console.error("Error:", error); + } +})(); +``` diff --git a/content/cookbook/tokens/fetch-nft-metadata.mdx b/content/cookbook/tokens/fetch-nft-metadata.mdx new file mode 100644 index 000000000..fb44434b0 --- /dev/null +++ b/content/cookbook/tokens/fetch-nft-metadata.mdx @@ -0,0 +1,58 @@ +--- +title: How to Fetch the NFT Metadata +description: + "Learn how to fetch the metadata of a non-fungible token (NFT) on Solana." +--- + +```typescript title="get-nft-metadata.ts" +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { + createSignerFromKeypair, + generateSigner, + signerIdentity, +} from "@metaplex-foundation/umi"; +import { + fetchDigitalAsset, + mplTokenMetadata, +} from "@metaplex-foundation/mpl-token-metadata"; +import { PublicKey } from "@metaplex-foundation/js"; + +(async () => { + try { + // Create a UMI instance + const umi = createUmi("https://api.mainnet-beta.solana.com"); + + // Use the mplTokenMetadata plugin + umi.use(mplTokenMetadata()); + + // Generate a new keypair (you can replace this with your own keypair if needed) + const keypair = generateSigner(umi); + umi.use(signerIdentity(createSignerFromKeypair(umi, keypair))); + + // The mint address of the NFT you want to fetch + const mintAddress = new PublicKey( + "Ay1U9DWphDgc7hq58Yj1yHabt91zTzvV2YJbAWkPNbaK", + ); + + console.log("Fetching NFT metadata..."); + const asset = await fetchDigitalAsset(umi, mintAddress); + + console.log("NFT Metadata:"); + + // If you want to access specific metadata fields: + console.log("\nName:", asset.metadata.name); + console.log("Symbol:", asset.metadata.symbol); + console.log("URI:", asset.metadata.uri); + + // Fetch and log the JSON metadata + if (asset.metadata.uri) { + const response = await fetch(asset.metadata.uri); + const jsonMetadata = await response.json(); + console.log("\nJSON Metadata:"); + console.log(JSON.stringify(jsonMetadata, null, 2)); + } + } catch (error) { + console.error("Error:", error); + } +})(); +``` diff --git a/content/cookbook/tokens/get-all-token-accounts.mdx b/content/cookbook/tokens/get-all-token-accounts.mdx new file mode 100644 index 000000000..c902f8066 --- /dev/null +++ b/content/cookbook/tokens/get-all-token-accounts.mdx @@ -0,0 +1,73 @@ +--- +title: How to Get All Token Accounts by Authority +description: + "Learn how to retrieve Solana token accounts by owner, including all accounts + or filtered by mint." +--- + +You can fetch token accounts by owner. There are two ways to do it. + +1. Get All Token Account + +```typescript title="get-token-account-by-owner-all.ts" +import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; +import { TOKEN_PROGRAM_ID } from "@solana/spl-token"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const owner = new PublicKey("G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY"); + let response = await connection.getParsedTokenAccountsByOwner(owner, { + programId: TOKEN_PROGRAM_ID, + }); + + response.value.forEach(accountInfo => { + console.log(`pubkey: ${accountInfo.pubkey.toBase58()}`); + console.log(`mint: ${accountInfo.account.data["parsed"]["info"]["mint"]}`); + console.log( + `owner: ${accountInfo.account.data["parsed"]["info"]["owner"]}`, + ); + console.log( + `decimals: ${accountInfo.account.data["parsed"]["info"]["tokenAmount"]["decimals"]}`, + ); + console.log( + `amount: ${accountInfo.account.data["parsed"]["info"]["tokenAmount"]["amount"]}`, + ); + console.log("===================="); + }); +})(); +``` + +2. Filter By Mint + +```typescript title="get-account-by-owner-by-mint.ts" +import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const owner = new PublicKey("G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY"); + const mint = new PublicKey("54dQ8cfHsW1YfKYpmdVZhWpb9iSi6Pac82Nf7sg3bVb"); + + let response = await connection.getParsedTokenAccountsByOwner(owner, { + mint: mint, + }); + + response.value.forEach(accountInfo => { + console.log(`pubkey: ${accountInfo.pubkey.toBase58()}`); + console.log(`mint: ${accountInfo.account.data["parsed"]["info"]["mint"]}`); + console.log( + `owner: ${accountInfo.account.data["parsed"]["info"]["owner"]}`, + ); + console.log( + `decimals: ${accountInfo.account.data["parsed"]["info"]["tokenAmount"]["decimals"]}`, + ); + console.log( + `amount: ${accountInfo.account.data["parsed"]["info"]["tokenAmount"]["amount"]}`, + ); + console.log("===================="); + }); +})(); +``` diff --git a/content/cookbook/tokens/get-nft-owner.mdx b/content/cookbook/tokens/get-nft-owner.mdx new file mode 100644 index 000000000..6b3bba2f7 --- /dev/null +++ b/content/cookbook/tokens/get-nft-owner.mdx @@ -0,0 +1,34 @@ +--- +title: How to get the owner of an NFT +description: + "Learn how to get the owner of a non-fungible token (NFT) on Solana." +--- + +If you have the mint key of an NFT, you can find its current owner by +sneak-peeking at the largest token account for that mint key. + +Remember that NFTs have a supply of 1, and they are indivisible, meaning that +only one token account will hold that token at any point in time, whilst all +other token accounts for that mint key will have a balance of 0. + +Once the largest token account is identified, we can retrieve its owner. + +```typescript title="get-nft-owner.ts" +import { Connection, PublicKey } from "@solana/web3.js"; + +(async () => { + const connection = new Connection("https://api.mainnet-beta.solana.com"); + const tokenMint = "9ARngHhVaCtH5JFieRdSS5Y8cdZk2TMF4tfGSWFB9iSK"; + + const largestAccounts = await connection.getTokenLargestAccounts( + new PublicKey(tokenMint), + ); + const largestAccountInfo = await connection.getParsedAccountInfo( + largestAccounts.value[0].address, + ); + console.log(largestAccountInfo?.value?.data); + + const owner = largestAccountInfo?.value?.data?.parsed.info.owner; + console.log("NFT owner :", owner); +})(); +``` diff --git a/content/cookbook/tokens/get-token-account.mdx b/content/cookbook/tokens/get-token-account.mdx new file mode 100644 index 000000000..a7c25834c --- /dev/null +++ b/content/cookbook/tokens/get-token-account.mdx @@ -0,0 +1,46 @@ +--- +title: How to Get a Token Account +description: + "Learn how to retrieve Solana token account details, including owner, mint, + and balance" +--- + +Every token account has information on the token such as the owner, mint, +amount(balance). + +```typescript title="get-token-account.ts" +import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; +import { getAccount } from "@solana/spl-token"; + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const tokenAccountPubkey = new PublicKey( + "2XYiFjmU1pCXmC2QfEAghk6S7UADseupkNQdnRBXszD5", + ); + + let tokenAccount = await getAccount(connection, tokenAccountPubkey); + console.log(tokenAccount); + /* + { + address: PublicKey { + _bn: + }, + mint: PublicKey { + _bn: + }, + owner: PublicKey { + _bn: + }, + amount: 0n, + delegate: null, + delegatedAmount: 0n, + isInitialized: true, + isFrozen: false, + isNative: false, + rentExemptReserve: null, + closeAuthority: null + } + */ +})(); +``` diff --git a/content/cookbook/tokens/get-token-balance.mdx b/content/cookbook/tokens/get-token-balance.mdx new file mode 100644 index 000000000..74f0e75cb --- /dev/null +++ b/content/cookbook/tokens/get-token-balance.mdx @@ -0,0 +1,48 @@ +--- +title: How to get a token account's balance +description: + "Learn how to quickly retrieve a Solana token account's balance with a single + call. Includes code examples in both TypeScript and Rust." +--- + +The token account has the token balance, which can be retrieved with a single +call + +```typescript title="get-token-balance.ts" +import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const tokenAccount = new PublicKey( + "FWZedVtyKQtP4CXhT7XDnLidRADrJknmZGA2qNjpTPg8", + ); + + let tokenAmount = await connection.getTokenAccountBalance(tokenAccount); + console.log(`amount: ${tokenAmount.value.amount}`); + console.log(`decimals: ${tokenAmount.value.decimals}`); +})(); +``` + +```rust title="get-token-balance.rs" +use solana_client::rpc_client::RpcClient; +use solana_program::pubkey::Pubkey; +use solana_sdk::commitment_config::CommitmentConfig; +use std::str::FromStr; + +fn main() { + let rpc_url = String::from("https://api.devnet.solana.com"); + let connection = RpcClient::new_with_commitment(rpc_url, CommitmentConfig::confirmed()); + + let token_account = Pubkey::from_str("FWZedVtyKQtP4CXhT7XDnLidRADrJknmZGA2qNjpTPg8").unwrap(); + let balance = connection + .get_token_account_balance(&token_account) + .unwrap(); + + println!("amount: {}, decimals: {}", balance.amount, balance.decimals); +} +``` + + + A token account can only hold one kind of mint. When you specify a token account, you also specific a mint too. + diff --git a/content/cookbook/tokens/get-token-mint.mdx b/content/cookbook/tokens/get-token-mint.mdx new file mode 100644 index 000000000..83dbb0865 --- /dev/null +++ b/content/cookbook/tokens/get-token-mint.mdx @@ -0,0 +1,42 @@ +--- +title: How to Get a Token Mint +description: + "Learn how to retrieve Solana token mint information, including supply, + authority, and decimals." +--- + +In order to get the current supply, authority, or decimals a token has, you will +need to get the account info for the token mint. + +```typescript title="get-mint-account.ts" +import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; +import { getMint } from "@solana/spl-token"; + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const mintAccountPublicKey = new PublicKey( + "8mAKLjGGmjKTnmcXeyr3pr7iX13xXVjJJiL6RujDbSPV", + ); + + let mintAccount = await getMint(connection, mintAccountPublicKey); + + console.log(mintAccount); + /* + { + address: PublicKey { + _bn: + }, + mintAuthority: PublicKey { + _bn: + }, + supply: 0n, + decimals: 8, + isInitialized: true, + freezeAuthority: PublicKey { + _bn: + } + } + */ +})(); +``` diff --git a/content/cookbook/tokens/manage-wrapped-sol.mdx b/content/cookbook/tokens/manage-wrapped-sol.mdx new file mode 100644 index 000000000..b3f50a297 --- /dev/null +++ b/content/cookbook/tokens/manage-wrapped-sol.mdx @@ -0,0 +1,174 @@ +--- +title: How to Use Wrapped SOL +description: + "Learn how to use wrapped SOL on Solana, including creating token accounts and + adding balance through SOL transfers or token transfers." +--- + +Wrapped SOL just like any other token mint. The difference is using `syncNative` +and creating token accounts specifically on the `NATIVE_MINT` address. + +## Create Token Account + +Like creating +[SPL token accounts](/developers/cookbook/tokens/create-token-account) but +replace mint with `NATIVE_MINT` + +```js +import { NATIVE_MINT } from "@solana/spl-token"; +``` + +## Add Balance + +There are two ways to add balance for Wrapped SOL + +### 1. By SOL Transfer + +```typescript title="add-balance-by-sol.ts" +import { + clusterApiUrl, + Connection, + Keypair, + Transaction, + SystemProgram, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + NATIVE_MINT, + getAssociatedTokenAddress, + createSyncNativeInstruction, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + // remember to create ATA first + let ata = await getAssociatedTokenAddress( + NATIVE_MINT, // mint + alice.publicKey, // owner + ); + + let amount = 1 * 1e9; /* Wrapped SOL's decimals is 9 */ + + let tx = new Transaction().add( + // transfer SOL + SystemProgram.transfer({ + fromPubkey: alice.publicKey, + toPubkey: ata, + lamports: amount, + }), + // sync wrapped SOL balance + createSyncNativeInstruction(ata), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [feePayer, alice])}`, + ); +})(); +``` + +### 2. By Token Transfer + +```typescript title="add-balance-by-token.ts" +import { + clusterApiUrl, + Connection, + Keypair, + Transaction, + SystemProgram, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + TOKEN_PROGRAM_ID, + NATIVE_MINT, + getMinimumBalanceForRentExemptAccount, + getAssociatedTokenAddress, + ACCOUNT_SIZE, + createInitializeAccountInstruction, + createTransferInstruction, + createCloseAccountInstruction, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + // remember to create ATA first + let ata = await getAssociatedTokenAddress( + NATIVE_MINT, // mint + alice.publicKey, // owner + ); + + let auxAccount = Keypair.generate(); + let amount = 1 * 1e9; /* Wrapped SOL's decimals is 9 */ + + let tx = new Transaction().add( + // create token account + SystemProgram.createAccount({ + fromPubkey: alice.publicKey, + newAccountPubkey: auxAccount.publicKey, + space: ACCOUNT_SIZE, + lamports: + (await getMinimumBalanceForRentExemptAccount(connection)) + amount, // rent + amount + programId: TOKEN_PROGRAM_ID, + }), + // init token account + createInitializeAccountInstruction( + auxAccount.publicKey, + NATIVE_MINT, + alice.publicKey, + ), + // transfer WSOL + createTransferInstruction( + auxAccount.publicKey, + ata, + alice.publicKey, + amount, + ), + // close aux account + createCloseAccountInstruction( + auxAccount.publicKey, + alice.publicKey, + alice.publicKey, + ), + ); + + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + auxAccount, + alice, + ])}`, + ); +})(); +``` diff --git a/content/cookbook/tokens/meta.json b/content/cookbook/tokens/meta.json new file mode 100644 index 000000000..d8c8d8814 --- /dev/null +++ b/content/cookbook/tokens/meta.json @@ -0,0 +1,24 @@ +{ + "title": "Tokens", + "pages": [ + "create-mint-account", + "get-token-mint", + "create-token-account", + "get-token-account", + "get-token-balance", + "mint-tokens", + "transfer-tokens", + "burn-tokens", + "close-token-accounts", + "set-update-token-authority", + "approve-token-delegate", + "revoke-token-delegate", + "manage-wrapped-sol", + "get-all-token-accounts", + "create-nft", + "fetch-nft-metadata", + "get-nft-owner", + "fetch-all-nfts" + ], + "defaultOpen": true +} diff --git a/content/cookbook/tokens/mint-tokens.mdx b/content/cookbook/tokens/mint-tokens.mdx new file mode 100644 index 000000000..d403b0744 --- /dev/null +++ b/content/cookbook/tokens/mint-tokens.mdx @@ -0,0 +1,100 @@ +--- +title: How to Mint Tokens +description: + "Learn how to mint tokens on Solana, increasing supply and transferring new + tokens to a specific account." +--- + +When you mint tokens, you increase the supply and transfer the new tokens to a +specific token account. + +```typescript title="mint-tokens.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + createMintToCheckedInstruction, + mintToChecked, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const mintPubkey = new PublicKey( + "8mAKLjGGmjKTnmcXeyr3pr7iX13xXVjJJiL6RujDbSPV", + ); + + const tokenAccountPubkey = new PublicKey( + "2XYiFjmU1pCXmC2QfEAghk6S7UADseupkNQdnRBXszD5", + ); + + // 1) use build-in function + { + let txhash = await mintToChecked( + connection, // connection + feePayer, // fee payer + mintPubkey, // mint + tokenAccountPubkey, // receiver (should be a token account) + alice, // mint authority + 1e8, // amount. if your decimals is 8, you mint 10^8 for 1 token. + 8, // decimals + ); + console.log(`txhash: ${txhash}`); + + // if alice is a multisig account + // let txhash = await mintToChecked( + // connection, // connection + // feePayer, // fee payer + // mintPubkey, // mint + // tokenAccountPubkey, // receiver (should be a token account) + // alice.publicKey, // !! mint authority pubkey !! + // 1e8, // amount. if your decimals is 8, you mint 10^8 for 1 token. + // 8, // decimals + // [signer1, signer2 ...], + // ); + } + + // or + + // 2) compose by yourself + { + let tx = new Transaction().add( + createMintToCheckedInstruction( + mintPubkey, // mint + tokenAccountPubkey, // receiver (should be a token account) + alice.publicKey, // mint authority + 1e8, // amount. if your decimals is 8, you mint 10^8 for 1 token. + 8, // decimals + // [signer1, signer2 ...], // only multisig account will use + ), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + alice /* fee payer + mint authority */, + ])}`, + ); + } +})(); +``` diff --git a/content/cookbook/tokens/revoke-token-delegate.mdx b/content/cookbook/tokens/revoke-token-delegate.mdx new file mode 100644 index 000000000..877f87303 --- /dev/null +++ b/content/cookbook/tokens/revoke-token-delegate.mdx @@ -0,0 +1,73 @@ +--- +title: How to Revoke a Token Delegate +description: + "Learn how to revoke a token delegate on Solana, resetting delegate + permissions and amounts." +--- + +Revoke will set delegate to null and set delegated amount to 0. + +```typescript title="revoke-token-delegate.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { createRevokeInstruction, revoke } from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const tokenAccountPubkey = new PublicKey( + "DRS5CSgPQp4uvPPcUA34tckfYFNUPNBJi77fVbnSfQHr", + ); + + // 1) use build-in function + { + let txhash = await revoke( + connection, // connection + feePayer, // payer + tokenAccountPubkey, // token account + alice, // owner of token account + ); + console.log(`txhash: ${txhash}`); + } + + // or + + // 2) compose by yourself + { + let tx = new Transaction().add( + createRevokeInstruction( + tokenAccountPubkey, // token account + alice.publicKey, // owner of token account + ), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + alice /* fee payer + origin auth */, + ])}`, + ); + } +})(); +``` diff --git a/content/cookbook/tokens/set-update-token-authority.mdx b/content/cookbook/tokens/set-update-token-authority.mdx new file mode 100644 index 000000000..8ae713a2a --- /dev/null +++ b/content/cookbook/tokens/set-update-token-authority.mdx @@ -0,0 +1,99 @@ +--- +title: How to Set Authority on Token Accounts or Mints +description: + "Learn how to set authorities on Solana token accounts and mints. Crucial for + controlling who can modify or manage tokens." +--- + +You can set/update authority. There are 4 types: + +1. MintTokens (mint account) +2. FreezeAccount (mint account) +3. AccountOwner (token account) +4. CloseAccount (token account) + +```typescript title="set-authority.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + AuthorityType, + createSetAuthorityInstruction, + setAuthority, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const randomGuy = Keypair.generate(); + console.log(`random guy: ${randomGuy.publicKey.toBase58()}`); + + const mintPubkey = new PublicKey( + "8mAKLjGGmjKTnmcXeyr3pr7iX13xXVjJJiL6RujDbSPV", + ); + + // authority type + + // 1) for mint account + // AuthorityType.MintTokens + // AuthorityType.FreezeAccount + + // 2) for token account + // AuthorityType.AccountOwner + // AuthorityType.CloseAccount + + // 1) use build-in function + { + let txhash = await setAuthority( + connection, // connection + feePayer, // payer + mintPubkey, // mint account || token account + alice, // current authority + AuthorityType.MintTokens, // authority type + randomGuy.publicKey, // new authority (you can pass `null` to close it) + ); + console.log(`txhash: ${txhash}`); + } + + // or + + // 2) compose by yourself + { + let tx = new Transaction().add( + createSetAuthorityInstruction( + mintPubkey, // mint account || token account + alice.publicKey, // current auth + AuthorityType.MintTokens, // authority type + feePayer.publicKey, // new auth (you can pass `null` to close it) + ), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + alice /* fee payer + origin auth */, + ])}`, + ); + } +})(); +``` diff --git a/content/cookbook/tokens/transfer-tokens.mdx b/content/cookbook/tokens/transfer-tokens.mdx new file mode 100644 index 000000000..ccfb8f364 --- /dev/null +++ b/content/cookbook/tokens/transfer-tokens.mdx @@ -0,0 +1,90 @@ +--- +title: How to Transfer Tokens +description: "Transfer tokens between Solana accounts" +--- + +You can transfer tokens from one token account to another token account. + +```typescript title="transfer-token.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + createTransferCheckedInstruction, + TOKEN_PROGRAM_ID, + transferChecked, +} from "@solana/spl-token"; +import bs58 from "bs58"; + +(async () => { + // connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8 + const feePayer = Keypair.fromSecretKey( + bs58.decode( + "588FU4PktJWfGfxtzpAAXywSNt74AvtroVzGfKkVN1LwRuvHwKGr851uH8czM5qm4iqLbs1kKoMKtMJG4ATR7Ld2", + ), + ); + + // G2FAbFQPFa5qKXCetoFZQEvF9BVvCKbvUZvodpVidnoY + const alice = Keypair.fromSecretKey( + bs58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + + const mintPubkey = new PublicKey( + "8mAKLjGGmjKTnmcXeyr3pr7iX13xXVjJJiL6RujDbSPV", + ); + + const tokenAccountXPubkey = new PublicKey( + "2XYiFjmU1pCXmC2QfEAghk6S7UADseupkNQdnRBXszD5", + ); + const tokenAccountYPubkey = new PublicKey( + "GMxZfDmpR1b3vdJYXHzdF5noVLQogZuUAsDHHQ3ytPfV", + ); + + // 1) use build-in function + { + let txhash = await transferChecked( + connection, // connection + feePayer, // payer + tokenAccountXPubkey, // from (should be a token account) + mintPubkey, // mint + tokenAccountYPubkey, // to (should be a token account) + alice, // from's owner + 1e8, // amount, if your decimals is 8, send 10^8 for 1 token + 8, // decimals + ); + console.log(`txhash: ${txhash}`); + } + + // or + + // 2) compose by yourself + { + let tx = new Transaction().add( + createTransferCheckedInstruction( + tokenAccountXPubkey, // from (should be a token account) + mintPubkey, // mint + tokenAccountYPubkey, // to (should be a token account) + alice.publicKey, // from's owner + 1e8, // amount, if your decimals is 8, send 10^8 for 1 token + 8, // decimals + ), + ); + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [ + feePayer, + alice /* fee payer + owner */, + ])}`, + ); + } +})(); +``` diff --git a/content/cookbook/transactions/add-memo.mdx b/content/cookbook/transactions/add-memo.mdx new file mode 100644 index 000000000..506eeec17 --- /dev/null +++ b/content/cookbook/transactions/add-memo.mdx @@ -0,0 +1,185 @@ +--- +title: How to Add a Memo to a Transaction +description: + "Transactions come with metadata information about what was transacted. Learn + how to add a memo to your transactions on Solana." +--- + +Any transaction can add a message making use of the memo program. In web3.js@1 +the programID from the Memo Program has to be added manually +`MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr` in V2 you can use +`getAddMemoInstruction`. + + + + + +```typescript title="add-memo.ts" {61-72} +import { + airdropFactory, + appendTransactionMessageInstructions, + createSolanaRpc, + createSolanaRpcSubscriptions, + createTransactionMessage, + devnet, + generateKeyPairSigner, + getComputeUnitEstimateForTransactionMessageFactory, + getSignatureFromTransaction, + lamports, + pipe, + prependTransactionMessageInstructions, + sendAndConfirmTransactionFactory, + setTransactionMessageFeePayerSigner, + setTransactionMessageLifetimeUsingBlockhash, + signTransactionMessageWithSigners, + type Transaction, +} from "@solana/web3.js"; +import { + getSetComputeUnitLimitInstruction, + getSetComputeUnitPriceInstruction, +} from "@solana-program/compute-budget"; +import { getAddMemoInstruction } from "@solana-program/memo"; + +async function writeMemo(message: string) { + // Create an RPC. + const CLUSTER = "devnet"; + const rpc = createSolanaRpc(devnet(`https://api.${CLUSTER}.solana.com`)); + const rpcSubscriptions = createSolanaRpcSubscriptions( + devnet(`wss://api.${CLUSTER}.solana.com`), + ); + + // Create an airdrop function. + const airdrop = airdropFactory({ rpc, rpcSubscriptions }); + + // Create a utility that estimates a transaction message's compute consumption. + const getComputeUnitEstimate = + getComputeUnitEstimateForTransactionMessageFactory({ rpc }); + + // Create a transaction sending function. + const sendAndConfirmTransaction = sendAndConfirmTransactionFactory({ + rpc, + rpcSubscriptions, + }); + + // Create and fund an account. + const keypairSigner = await generateKeyPairSigner(); + console.log("Created an account with address", keypairSigner.address); + console.log("Requesting airdrop"); + await airdrop({ + commitment: "confirmed", + lamports: lamports(1000_000n), + recipientAddress: keypairSigner.address, + }); + console.log("Airdrop confirmed"); + + // Create a memo transaction. + console.log("Creating a memo transaction"); + const { value: latestBlockhash } = await rpc.getLatestBlockhash().send(); + const transactionMessage = pipe( + createTransactionMessage({ version: "legacy" }), + m => setTransactionMessageFeePayerSigner(keypairSigner, m), + m => setTransactionMessageLifetimeUsingBlockhash(latestBlockhash, m), + m => + appendTransactionMessageInstructions( + [ + getSetComputeUnitPriceInstruction({ microLamports: 5000n }), + getAddMemoInstruction({ memo: message }), + ], + m, + ), + ); + + // Figure out how many compute units to budget for this transaction + // so that you can right-size the compute budget to maximize the + // chance that it will be selected for inclusion into a block. + console.log("Estimating the compute consumption of the transaction"); + const estimatedComputeUnits = + await getComputeUnitEstimate(transactionMessage); + console.log( + `Transaction is estimated to consume ${estimatedComputeUnits} compute units`, + ); + const budgetedTransactionMessage = prependTransactionMessageInstructions( + [getSetComputeUnitLimitInstruction({ units: estimatedComputeUnits })], + transactionMessage, + ); + + // Sign and send the transaction. + console.log("Signing and sending the transaction"); + const signedTx = await signTransactionMessageWithSigners( + budgetedTransactionMessage, + ); + const signature = getSignatureFromTransaction(signedTx); + console.log( + "Sending transaction https://explorer.solana.com/tx/" + + signature + + "/?cluster=" + + CLUSTER, + ); + await sendAndConfirmTransaction(signedTx, { commitment: "confirmed" }); + console.log("Transaction confirmed"); +} + +writeMemo("Hello, Solana!"); +``` + + + + + +```typescript title="add-memo.ts" {38-46} +import { + Connection, + Keypair, + SystemProgram, + LAMPORTS_PER_SOL, + PublicKey, + Transaction, + TransactionInstruction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +(async () => { + const fromKeypair = Keypair.generate(); + const toKeypair = Keypair.generate(); + + const connection = new Connection( + "https://api.devnet.solana.com", + "confirmed", + ); + + const airdropSignature = await connection.requestAirdrop( + fromKeypair.publicKey, + LAMPORTS_PER_SOL, + ); + + await connection.confirmTransaction(airdropSignature); + + const lamportsToSend = 10; + + const transferTransaction = new Transaction().add( + SystemProgram.transfer({ + fromPubkey: fromKeypair.publicKey, + toPubkey: toKeypair.publicKey, + lamports: lamportsToSend, + }), + ); + + transferTransaction.add( + new TransactionInstruction({ + keys: [ + { pubkey: fromKeypair.publicKey, isSigner: true, isWritable: true }, + ], + data: Buffer.from("Memo message to send in this transaction", "utf-8"), + programId: new PublicKey("MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr"), + }), + ); + + await sendAndConfirmTransaction(connection, transferTransaction, [ + fromKeypair, + ]); +})(); +``` + + + + diff --git a/content/cookbook/transactions/add-priority-fees.mdx b/content/cookbook/transactions/add-priority-fees.mdx new file mode 100644 index 000000000..4904923a8 --- /dev/null +++ b/content/cookbook/transactions/add-priority-fees.mdx @@ -0,0 +1,212 @@ +--- +title: How to Add Priority Fees to a Transaction +description: + "Transactions executed in order they are prioritized on Solana. Learn how to + increase your transaction priority with priority fees on Solana." +--- + +Transaction (TX) priority is achieved by paying a Prioritization Fee in addition +to the Base Fee. By default the compute budget is the product of 200,000 Compute +Units (CU) \* number of instructions, with a max of 1.4M CU. The Base Fee is +5,000 Lamports per signature. A microLamport is 0.000001 Lamports. + +> You can find a detailed guide here on +> [how to use priority fees](/developers/guides/advanced/how-to-use-priority-fees). + +The total compute budget or Prioritization Fee for a single TX can be changed by +adding instructions from the ComputeBudgetProgram. + +`ComputeBudgetProgram.setComputeUnitPrice({ microLamports: number })` will add a +Prioritization Fee above the Base Fee (5,000 Lamports). The value provided in +microLamports will be multiplied by the CU budget to determine the +Prioritization Fee in Lamports. For example, if your CU budget is 1M CU, and you +add 1 microLamport/CU, the Prioritization Fee will be 1 Lamport (1M \* +0.000001). The total fee will then be 5001 Lamports. + +Use `ComputeBudgetProgram.setComputeUnitLimit({ units: number })` to set the new +compute budget. The value provided will replace the default value. Transactions +should request the minimum amount of CU required for execution to maximize +throughput, or minimize fees. + + + + + +```typescript title="add-priority-fees.ts" {61-72} {37-38} {77-87} +import { + airdropFactory, + appendTransactionMessageInstructions, + createSolanaRpc, + createSolanaRpcSubscriptions, + createTransactionMessage, + devnet, + generateKeyPairSigner, + getComputeUnitEstimateForTransactionMessageFactory, + getSignatureFromTransaction, + lamports, + pipe, + prependTransactionMessageInstructions, + sendAndConfirmTransactionFactory, + setTransactionMessageFeePayerSigner, + setTransactionMessageLifetimeUsingBlockhash, + signTransactionMessageWithSigners, +} from "@solana/web3.js"; +import { + getSetComputeUnitLimitInstruction, + getSetComputeUnitPriceInstruction, +} from "@solana-program/compute-budget"; +import { getAddMemoInstruction } from "@solana-program/memo"; + +async function writeMemoWithPriorityFees(message: string) { + // Create an RPC. + const CLUSTER = "devnet"; + const rpc = createSolanaRpc(devnet(`https://api.${CLUSTER}.solana.com`)); + const rpcSubscriptions = createSolanaRpcSubscriptions( + devnet(`wss://api.${CLUSTER}.solana.com`), + ); + + // Create an airdrop function. + const airdrop = airdropFactory({ rpc, rpcSubscriptions }); + + // Create a utility that estimates a transaction message's compute consumption. + const getComputeUnitEstimate = + getComputeUnitEstimateForTransactionMessageFactory({ rpc }); + + // Create a transaction sending function. + const sendAndConfirmTransaction = sendAndConfirmTransactionFactory({ + rpc, + rpcSubscriptions, + }); + + // Create and fund an account. + const keypairSigner = await generateKeyPairSigner(); + console.log("Created an account with address", keypairSigner.address); + console.log("Requesting airdrop"); + await airdrop({ + commitment: "confirmed", + lamports: lamports(1000_000n), + recipientAddress: keypairSigner.address, + }); + console.log("Airdrop confirmed"); + + // Create a memo transaction. + console.log("Creating a memo transaction"); + const { value: latestBlockhash } = await rpc.getLatestBlockhash().send(); + const transactionMessage = pipe( + createTransactionMessage({ version: "legacy" }), + m => setTransactionMessageFeePayerSigner(keypairSigner, m), + m => setTransactionMessageLifetimeUsingBlockhash(latestBlockhash, m), + m => + appendTransactionMessageInstructions( + [ + getSetComputeUnitPriceInstruction({ microLamports: 5000n }), + getAddMemoInstruction({ memo: message }), + ], + m, + ), + ); + + // Figure out how many compute units to budget for this transaction + // so that you can right-size the compute budget to maximize the + // chance that it will be selected for inclusion into a block. + console.log("Estimating the compute consumption of the transaction"); + var estimatedComputeUnits = await getComputeUnitEstimate(transactionMessage); + // While these estimates are quite accurate they are not perfect. So you may want to add a + // buffer if you expect that the transaction may consume more compute units than estimated. + // Its not possible to exactly know what the transaction will consume when + // you send it in the future. The state may change. You can add a buffer to the estimate to account for this. + // estimatedComputeUnits += 1000; + // estimatedComputeUnits *= 1.1; + // You can read more about the issue here: https://github.com/solana-labs/solana-web3.js/tree/master/packages/library#getcomputeunitestimatefortransactionmessagefactoryrpc + + console.log( + `Transaction is estimated to consume ${estimatedComputeUnits} compute units`, + ); + const budgetedTransactionMessage = prependTransactionMessageInstructions( + [getSetComputeUnitLimitInstruction({ units: estimatedComputeUnits })], + transactionMessage, + ); + + // Sign and send the transaction. + console.log("Signing and sending the transaction"); + const signedTx = await signTransactionMessageWithSigners( + budgetedTransactionMessage, + ); + const signature = getSignatureFromTransaction(signedTx); + console.log( + "Sending transaction https://explorer.solana.com/tx/" + + signature + + "/?cluster=" + + CLUSTER, + ); + await sendAndConfirmTransaction(signedTx, { commitment: "confirmed" }); + console.log("Transaction confirmed"); +} + +writeMemoWithPriorityFees("Hello, priority fees!"); +``` + + + + + +```typescript title="add-priority-fees.ts" {25-28, 30-33} +import { BN } from "@coral-xyz/anchor"; +import { + Keypair, + Connection, + LAMPORTS_PER_SOL, + sendAndConfirmTransaction, + ComputeBudgetProgram, + SystemProgram, + Transaction, +} from "@solana/web3.js"; + +(async () => { + const payer = Keypair.generate(); + const toAccount = Keypair.generate().publicKey; + + const connection = new Connection("http://127.0.0.1:8899", "confirmed"); + + const airdropSignature = await connection.requestAirdrop( + payer.publicKey, + LAMPORTS_PER_SOL, + ); + + await connection.confirmTransaction(airdropSignature); + + // request a specific compute unit budget + const modifyComputeUnits = ComputeBudgetProgram.setComputeUnitLimit({ + units: 1000000, + }); + + // set the desired priority fee + const addPriorityFee = ComputeBudgetProgram.setComputeUnitPrice({ + microLamports: 1, + }); + + // Total fee will be 5,001 Lamports for 1M CU + const transaction = new Transaction() + .add(modifyComputeUnits) + .add(addPriorityFee) + .add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount, + lamports: 10000000, + }), + ); + + const signature = await sendAndConfirmTransaction(connection, transaction, [ + payer, + ]); + console.log(signature); + + const result = await connection.getParsedTransaction(signature); + console.log(result); +})(); +``` + + + + diff --git a/content/cookbook/transactions/calculate-cost.mdx b/content/cookbook/transactions/calculate-cost.mdx new file mode 100644 index 000000000..73c54080e --- /dev/null +++ b/content/cookbook/transactions/calculate-cost.mdx @@ -0,0 +1,215 @@ +--- +title: How to Calculate Transaction Cost +description: + "Every transaction costs an amount of fees to execute on Solana. Learn how to + calculate transaction fees on Solana." +--- + +The number of signatures a transaction requires are used to calculate the +transaction cost. As long as you are not creating an account, this will be the +base transaction cost. To find out more about costs to create an account, check +out [calculating rent costs](/developers/cookbook/accounts/calculate-rent). + + + + + +```typescript title="calculate-cost.ts" {101-118} +import { + airdropFactory, + appendTransactionMessageInstructions, + compileTransactionMessage, + createSignerFromKeyPair, + createSolanaRpc, + createSolanaRpcSubscriptions, + createTransactionMessage, + devnet, + generateKeyPairSigner, + getBase64Decoder, + getCompiledTransactionMessageEncoder, + getComputeUnitEstimateForTransactionMessageFactory, + getSignatureFromTransaction, + lamports, + pipe, + prependTransactionMessageInstructions, + sendAndConfirmTransactionFactory, + setTransactionMessageFeePayerSigner, + setTransactionMessageLifetimeUsingBlockhash, + signTransactionMessageWithSigners, + type TransactionMessageBytesBase64, +} from "@solana/web3.js"; +import { + getSetComputeUnitLimitInstruction, + getSetComputeUnitPriceInstruction, +} from "@solana-program/compute-budget"; +import { getAddMemoInstruction } from "@solana-program/memo"; +import { loadDefaultKeypairWithAirdrop } from "./CreateKeypair"; + +async function calculateCost(message: string) { + // Create an RPC. + const CLUSTER = "devnet"; + const rpc = createSolanaRpc(devnet(`https://api.${CLUSTER}.solana.com`)); + const rpcSubscriptions = createSolanaRpcSubscriptions( + devnet(`wss://api.${CLUSTER}.solana.com`), + ); + + // Create a utility that estimates a transaction message's compute consumption. + const getComputeUnitEstimate = + getComputeUnitEstimateForTransactionMessageFactory({ rpc }); + + // Create a transaction sending function. + const sendAndConfirmTransaction = sendAndConfirmTransactionFactory({ + rpc, + rpcSubscriptions, + }); + + // Create an airdrop function. + const airdrop = airdropFactory({ rpc, rpcSubscriptions }); + + // Create and fund an account. + const signer = await generateKeyPairSigner(); + console.log("Created an account with address", signer.address); + console.log("Requesting airdrop"); + await airdrop({ + commitment: "confirmed", + lamports: lamports(1000_000n), + recipientAddress: signer.address, + }); + console.log("Airdrop confirmed"); + + // Create a memo transaction. + console.log("Creating a memo transaction"); + const { value: latestBlockhash } = await rpc.getLatestBlockhash().send(); + const transactionMessage = pipe( + createTransactionMessage({ version: "legacy" }), + m => setTransactionMessageFeePayerSigner(signer, m), + m => setTransactionMessageLifetimeUsingBlockhash(latestBlockhash, m), + m => + appendTransactionMessageInstructions( + [ + getSetComputeUnitPriceInstruction({ microLamports: 5000n }), + getAddMemoInstruction({ memo: message }), + ], + m, + ), + ); + + // Figure out how many compute units to budget for this transaction + // so that you can right-size the compute budget to maximize the + // chance that it will be selected for inclusion into a block. + console.log("Estimating the compute consumption of the transaction"); + const estimatedComputeUnits = + await getComputeUnitEstimate(transactionMessage); + console.log( + `Transaction is estimated to consume ${estimatedComputeUnits} compute units`, + ); + + const budgetedTransactionMessage = prependTransactionMessageInstructions( + [getSetComputeUnitLimitInstruction({ units: estimatedComputeUnits })], + transactionMessage, + ); + + const base64EncodedMessage = pipe( + // Start with the message you want the fee for. + budgetedTransactionMessage, + + // Compile it. + compileTransactionMessage, + + // Convert the compiled message into a byte array. + getCompiledTransactionMessageEncoder().encode, + + // Encode that byte array as a base64 string. + getBase64Decoder().decode, + ) as TransactionMessageBytesBase64; + + const transactionCost = await rpc + .getFeeForMessage(base64EncodedMessage) + .send(); + + console.log( + "Transaction is estimated to cost " + transactionCost.value + " lamports", + ); + + // Sign and send the transaction. + console.log("Signing and sending the transaction"); + const signedTx = await signTransactionMessageWithSigners( + budgetedTransactionMessage, + ); + const signature = getSignatureFromTransaction(signedTx); + console.log( + "Sending transaction https://explorer.solana.com/tx/" + + signature + + "/?cluster=" + + CLUSTER, + ); + await sendAndConfirmTransaction(signedTx, { commitment: "confirmed" }); + console.log("Transaction confirmed"); + // Transaction is estimated to consume 6236 compute units + // Transaction is estimated to cost 5032 lamports +} + +calculateCost("Hello, Fees!"); +``` + + + + +```typescript title="calculate-cost.ts {108-111}" +import { + clusterApiUrl, + Connection, + Keypair, + Message, + SystemProgram, + SYSTEM_INSTRUCTION_LAYOUTS, + Transaction, +} from "@solana/web3.js"; +import bs58 from "bs58"; + +(async () => { + // Connect to cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const payer = Keypair.generate(); + const recipient = Keypair.generate(); + + const type = SYSTEM_INSTRUCTION_LAYOUTS.Transfer; + const data = Buffer.alloc(type.layout.span); + const layoutFields = Object.assign({ instruction: type.index }); + type.layout.encode(layoutFields, data); + + const recentBlockhash = await connection.getLatestBlockhash(); + + const messageParams = { + accountKeys: [ + payer.publicKey.toString(), + recipient.publicKey.toString(), + SystemProgram.programId.toString(), + ], + header: { + numReadonlySignedAccounts: 0, + numReadonlyUnsignedAccounts: 1, + numRequiredSignatures: 1, + }, + instructions: [ + { + accounts: [0, 1], + data: bs58.encode(data), + programIdIndex: 2, + }, + ], + recentBlockhash: recentBlockhash.blockhash, + }; + + const message = new Message(messageParams); + + const fees = await connection.getFeeForMessage(message); + console.log(`Estimated SOL transfer cost: ${fees.value} lamports`); + // Estimated SOL transfer cost: 5000 lamports +})(); +``` + + + + diff --git a/content/cookbook/transactions/meta.json b/content/cookbook/transactions/meta.json new file mode 100644 index 000000000..fd410e732 --- /dev/null +++ b/content/cookbook/transactions/meta.json @@ -0,0 +1,13 @@ +{ + "title": "Transactions", + "pages": [ + "send-sol", + "send-tokens", + "calculate-cost", + "add-memo", + "add-priority-fees", + "optimize-compute", + "offline-transactions" + ], + "defaultOpen": true +} diff --git a/content/cookbook/transactions/offline-transactions.mdx b/content/cookbook/transactions/offline-transactions.mdx new file mode 100644 index 000000000..b955e7f5d --- /dev/null +++ b/content/cookbook/transactions/offline-transactions.mdx @@ -0,0 +1,388 @@ +--- +title: Offline Transactions +description: Learn how to create and sign transactions offline. +--- + +## Sign Transaction + +To create an offline transaction, you have to sign the transaction and then +anyone can broadcast it on the network. + +```typescript title="sign-transaction.ts" +import { + clusterApiUrl, + Connection, + Keypair, + Transaction, + SystemProgram, + LAMPORTS_PER_SOL, + Message, +} from "@solana/web3.js"; +import * as nacl from "tweetnacl"; +import * as bs58 from "bs58"; + +// to complete a offline transaction, I will separate them into four steps +// 1. Create Transaction +// 2. Sign Transaction +// 3. Recover Transaction +// 4. Send Transaction + +(async () => { + // create connection + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // create a example tx, alice transfer to bob and feePayer is `feePayer` + // alice and feePayer are signer in this tx + const feePayer = Keypair.generate(); + await connection.confirmTransaction( + await connection.requestAirdrop(feePayer.publicKey, LAMPORTS_PER_SOL), + ); + const alice = Keypair.generate(); + await connection.confirmTransaction( + await connection.requestAirdrop(alice.publicKey, LAMPORTS_PER_SOL), + ); + const bob = Keypair.generate(); + + // 1. Create Transaction + let tx = new Transaction().add( + SystemProgram.transfer({ + fromPubkey: alice.publicKey, + toPubkey: bob.publicKey, + lamports: 0.1 * LAMPORTS_PER_SOL, + }), + ); + tx.recentBlockhash = (await connection.getRecentBlockhash()).blockhash; + tx.feePayer = feePayer.publicKey; + let realDataNeedToSign = tx.serializeMessage(); // the real data singer need to sign. + + // 2. Sign Transaction + // use any lib you like, the main idea is to use ed25519 to sign it. + // the return signature should be 64 bytes. + let feePayerSignature = nacl.sign.detached( + realDataNeedToSign, + feePayer.secretKey, + ); + let aliceSignature = nacl.sign.detached(realDataNeedToSign, alice.secretKey); + + // 3. Recover Transaction + + // you can verify signatures before you recovering the transaction + let verifyFeePayerSignatureResult = nacl.sign.detached.verify( + realDataNeedToSign, + feePayerSignature, + feePayer.publicKey.toBytes(), // you should use the raw pubkey (32 bytes) to verify + ); + console.log(`verify feePayer signature: ${verifyFeePayerSignatureResult}`); + + let verifyAliceSignatureResult = nacl.sign.detached.verify( + realDataNeedToSign, + aliceSignature, + alice.publicKey.toBytes(), + ); + console.log(`verify alice signature: ${verifyAliceSignatureResult}`); + + // there are two ways you can recover the tx + // 3.a Recover Transaction (use populate then addSignature) + { + let recoverTx = Transaction.populate(Message.from(realDataNeedToSign)); + recoverTx.addSignature(feePayer.publicKey, Buffer.from(feePayerSignature)); + recoverTx.addSignature(alice.publicKey, Buffer.from(aliceSignature)); + + // 4. Send transaction + console.log( + `txhash: ${await connection.sendRawTransaction(recoverTx.serialize())}`, + ); + } + + // or + + // 3.b. Recover Transaction (use populate with signature) + { + let recoverTx = Transaction.populate(Message.from(realDataNeedToSign), [ + bs58.encode(feePayerSignature), + bs58.encode(aliceSignature), + ]); + + // 4. Send transaction + console.log( + `txhash: ${await connection.sendRawTransaction(recoverTx.serialize())}`, + ); + } + + // if this process takes too long, your recent blockhash will expire (after 150 blocks). + // you can use `durable nonce` to get rid of it. +})(); +``` + +## Partial Sign Transaction + +When a transaction requires multiple signatures, you can partially sign it. The +other signers can then sign and broadcast it on the network. + +Some examples of when this is useful: + +- Send an SPL token in return for payment +- Sign a transaction so that you can later verify its authenticity +- Call custom programs in a transaction that require your signature + +In this example Bob sends Alice an SPL token in return for her payment: + +```typescript title="partial-sign-transaction.ts" +import { + createTransferCheckedInstruction, + getAssociatedTokenAddress, + getMint, + getOrCreateAssociatedTokenAccount, +} from "@solana/spl-token"; +import { + clusterApiUrl, + Connection, + Keypair, + LAMPORTS_PER_SOL, + PublicKey, + SystemProgram, + Transaction, +} from "@solana/web3.js"; +import base58 from "bs58"; + +/* The transaction: + * - sends 0.01 SOL from Alice to Bob + * - sends 1 token from Bob to Alice + * - is partially signed by Bob, so Alice can approve + send it + */ + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const alicePublicKey = new PublicKey( + "5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CmPEwKgVWr8", + ); + const bobKeypair = Keypair.fromSecretKey( + base58.decode( + "4NMwxzmYj2uvHuq8xoqhY8RXg63KSVJM1DXkpbmkUY7YQWuoyQgFnnzn6yo3CMnqZasnNPNuAT2TLwQsCaKkUddp", + ), + ); + const tokenAddress = new PublicKey( + "Gh9ZwEmdLJ8DscKNTkTqPbNwLNNBjuSzaG9Vp2KGtKJr", + ); + const bobTokenAddress = await getAssociatedTokenAddress( + tokenAddress, + bobKeypair.publicKey, + ); + + // Alice may not have a token account, so Bob creates one if not + const aliceTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + bobKeypair, // Bob pays the fee to create it + tokenAddress, // which token the account is for + alicePublicKey, // who the token account is for + ); + + // Get the details about the token mint + const tokenMint = await getMint(connection, tokenAddress); + + // Get a recent blockhash to include in the transaction + const { blockhash } = await connection.getLatestBlockhash("finalized"); + + const transaction = new Transaction({ + recentBlockhash: blockhash, + // Alice pays the transaction fee + feePayer: alicePublicKey, + }); + + // Transfer 0.01 SOL from Alice -> Bob + transaction.add( + SystemProgram.transfer({ + fromPubkey: alicePublicKey, + toPubkey: bobKeypair.publicKey, + lamports: 0.01 * LAMPORTS_PER_SOL, + }), + ); + + // Transfer 1 token from Bob -> Alice + transaction.add( + createTransferCheckedInstruction( + bobTokenAddress, // source + tokenAddress, // mint + aliceTokenAccount.address, // destination + bobKeypair.publicKey, // owner of source account + 1 * 10 ** tokenMint.decimals, // amount to transfer + tokenMint.decimals, // decimals of token + ), + ); + + // Partial sign as Bob + transaction.partialSign(bobKeypair); + + // Serialize the transaction and convert to base64 to return it + const serializedTransaction = transaction.serialize({ + // We will need Alice to deserialize and sign the transaction + requireAllSignatures: false, + }); + const transactionBase64 = serializedTransaction.toString("base64"); + return transactionBase64; + + // The caller of this can convert it back to a transaction object: + const recoveredTransaction = Transaction.from( + Buffer.from(transactionBase64, "base64"), + ); +})(); +``` + +## Durable Nonce + +`recentBlockhash` is an important value for a transaction. Your transaction +will +be rejected if you use an expired blockhash (older than 150 blocks). Instead of +a recent blockhash, you can use a durable nonce, which never expires. To use a +durable nonce, your transaction must: + +1. use a `nonce` stored in `nonce account` as a recent blockhash +2. put `nonce advance` operation in the first instruction + +### Create Nonce Account + +```typescript title="create-nonce-account.ts" +import { + clusterApiUrl, + Connection, + Keypair, + Transaction, + NONCE_ACCOUNT_LENGTH, + SystemProgram, + LAMPORTS_PER_SOL, +} from "@solana/web3.js"; + +(async () => { + // Setup our connection and wallet + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const feePayer = Keypair.generate(); + + // Fund our wallet with 1 SOL + const airdropSignature = await connection.requestAirdrop( + feePayer.publicKey, + LAMPORTS_PER_SOL, + ); + await connection.confirmTransaction(airdropSignature); + + // you can use any keypair as nonce account authority, + // this uses the default Solana keypair file (id.json) as the nonce account authority + const nonceAccountAuth = await getKeypairFromFile(); + + let nonceAccount = Keypair.generate(); + console.log(`nonce account: ${nonceAccount.publicKey.toBase58()}`); + + let tx = new Transaction().add( + // create nonce account + SystemProgram.createAccount({ + fromPubkey: feePayer.publicKey, + newAccountPubkey: nonceAccount.publicKey, + lamports: + await connection.getMinimumBalanceForRentExemption( + NONCE_ACCOUNT_LENGTH, + ), + space: NONCE_ACCOUNT_LENGTH, + programId: SystemProgram.programId, + }), + // init nonce account + SystemProgram.nonceInitialize({ + noncePubkey: nonceAccount.publicKey, // nonce account pubkey + authorizedPubkey: nonceAccountAuth.publicKey, // nonce account authority (for advance and close) + }), + ); + + console.log( + `txhash: ${await sendAndConfirmTransaction(connection, tx, [feePayer, nonceAccount])}`, + ); +})(); +``` + +### Get Nonce Account + +```typescript title="get-nonce-account.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + NonceAccount, +} from "@solana/web3.js"; + +(async () => { + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + const nonceAccountPubkey = new PublicKey( + "7H18z3v3rZEoKiwY3kh8DLn9eFT6nFCQ2m4kiC7RZ3a4", + ); + + let accountInfo = await connection.getAccountInfo(nonceAccountPubkey); + let nonceAccount = NonceAccount.fromAccountData(accountInfo.data); + console.log(`nonce: ${nonceAccount.nonce}`); + console.log(`authority: ${nonceAccount.authorizedPubkey.toBase58()}`); + console.log(`fee calculator: ${JSON.stringify(nonceAccount.feeCalculator)}`); +})(); +``` + +### Use Nonce Account + +```typescript title="use-nonce-account.ts" +import { + clusterApiUrl, + Connection, + PublicKey, + Keypair, + Transaction, + SystemProgram, + NonceAccount, + LAMPORTS_PER_SOL, +} from "@solana/web3.js"; +import * as bs58 from "bs58"; +import { getKeypairFromFile } from "@solana-developers/helpers"; + +(async () => { + // Setup our connection and wallet + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + const feePayer = Keypair.generate(); + + // Fund our wallet with 1 SOL + const airdropSignature = await connection.requestAirdrop( + feePayer.publicKey, + LAMPORTS_PER_SOL, + ); + await connection.confirmTransaction(airdropSignature); + + // you can use any keypair as nonce account authority, + // but nonceAccountAuth must be the same as the one used in nonce account creation + // load default solana keypair for nonce account authority + const nonceAccountAuth = await getKeypairFromFile(); + + const nonceAccountPubkey = new PublicKey( + "7H18z3v3rZEoKiwY3kh8DLn9eFT6nFCQ2m4kiC7RZ3a4", + ); + let nonceAccountInfo = await connection.getAccountInfo(nonceAccountPubkey); + let nonceAccount = NonceAccount.fromAccountData(nonceAccountInfo.data); + + let tx = new Transaction().add( + // nonce advance must be the first instruction + SystemProgram.nonceAdvance({ + noncePubkey: nonceAccountPubkey, + authorizedPubkey: nonceAccountAuth.publicKey, + }), + // after that, you do what you really want to do, here we append a transfer instruction as an example. + SystemProgram.transfer({ + fromPubkey: feePayer.publicKey, + toPubkey: nonceAccountAuth.publicKey, + lamports: 1, + }), + ); + // assign `nonce` as recentBlockhash + tx.recentBlockhash = nonceAccount.nonce; + tx.feePayer = feePayer.publicKey; + tx.sign( + feePayer, + nonceAccountAuth, + ); /* fee payer + nonce account authority + ... */ + + console.log(`txhash: ${await connection.sendRawTransaction(tx.serialize())}`); +})(); +``` diff --git a/content/cookbook/transactions/optimize-compute.mdx b/content/cookbook/transactions/optimize-compute.mdx new file mode 100644 index 000000000..0d2311a67 --- /dev/null +++ b/content/cookbook/transactions/optimize-compute.mdx @@ -0,0 +1,53 @@ +--- +title: How to Optimize Compute Requested +--- + +Optimizing the Compute Requested on a transaction is important to ensure that +the transaction is both processed in a timely manner as well as to avoid paying +too much in priority fees. + +For more information about requesting optimal compute, +[check out the full guide](/developers/guides/advanced/how-to-request-optimal-compute). +You can also find more information about +[using priority fees](/developers/guides/advanced/how-to-use-priority-fees) in +this detailed guide. + +```typescript title="optimize-compute.ts" +// import { ... } from "@solana/web3.js" + +async function buildOptimalTransaction( + connection: Connection, + instructions: Array, + signer: Signer, + lookupTables: Array, +) { + const [microLamports, units, recentBlockhash] = await Promise.all([ + 100 /* Get optimal priority fees - https://solana.com/developers/guides/advanced/how-to-use-priority-fees*/, + getSimulationComputeUnits( + connection, + instructions, + signer.publicKey, + lookupTables, + ), + connection.getLatestBlockhash(), + ]); + + instructions.unshift( + ComputeBudgetProgram.setComputeUnitPrice({ microLamports }), + ); + if (units) { + // probably should add some margin of error to units + instructions.unshift(ComputeBudgetProgram.setComputeUnitLimit({ units })); + } + return { + transaction: new VersionedTransaction( + new TransactionMessage({ + instructions, + recentBlockhash: recentBlockhash.blockhash, + payerKey: signer.publicKey, + }).compileToV0Message(lookupTables), + ), + recentBlockhash, + }; +} +``` diff --git a/content/cookbook/transactions/send-sol.mdx b/content/cookbook/transactions/send-sol.mdx new file mode 100644 index 000000000..6eb2d923d --- /dev/null +++ b/content/cookbook/transactions/send-sol.mdx @@ -0,0 +1,177 @@ +--- +title: How to Send SOL +description: + "The most common action on Solana is sending SOL. Learn how to send SOL on + Solana." +--- + +To send SOL, you will need to interact with the [SystemProgram][1]. + + + + + +```typescript title="send-sol.ts" {70-74} +import { + address, + airdropFactory, + appendTransactionMessageInstructions, + createSolanaRpc, + createSolanaRpcSubscriptions, + createTransactionMessage, + devnet, + generateKeyPairSigner, + getComputeUnitEstimateForTransactionMessageFactory, + getSignatureFromTransaction, + lamports, + pipe, + prependTransactionMessageInstructions, + sendAndConfirmTransactionFactory, + setTransactionMessageFeePayerSigner, + setTransactionMessageLifetimeUsingBlockhash, + signTransactionMessageWithSigners, +} from "@solana/web3.js"; +import { + getSetComputeUnitLimitInstruction, + getSetComputeUnitPriceInstruction, +} from "@solana-program/compute-budget"; +import { getAddMemoInstruction } from "@solana-program/memo"; +import { getTransferSolInstruction } from "@solana-program/system"; + +async function transferSol() { + // Create an RPC. Use localnet for solana-test-validator. This will get you easier airdrops. + const CLUSTER = "devnet"; + const rpc = createSolanaRpc(devnet(`https://api.${CLUSTER}.solana.com`)); + const rpcSubscriptions = createSolanaRpcSubscriptions( + devnet(`wss://api.${CLUSTER}.solana.com`), + ); + + // Create an airdrop function. + const airdrop = airdropFactory({ rpc, rpcSubscriptions }); + + // Create a utility that estimates a transaction message's compute consumption. + const getComputeUnitEstimate = + getComputeUnitEstimateForTransactionMessageFactory({ rpc }); + + // Create a transaction sending function. + const sendAndConfirmTransaction = sendAndConfirmTransactionFactory({ + rpc, + rpcSubscriptions, + }); + + // Create and fund an account. + const keypairSigner = await generateKeyPairSigner(); + console.log("Created an account with address", keypairSigner.address); + console.log("Requesting airdrop"); + await airdrop({ + commitment: "confirmed", + lamports: lamports(1_000_000_000n), + recipientAddress: keypairSigner.address, + }); + console.log("Airdrop confirmed"); + + // Create a memo transaction. + console.log("Creating a memo transaction"); + const { value: latestBlockhash } = await rpc.getLatestBlockhash().send(); + const transactionMessage = pipe( + createTransactionMessage({ version: "legacy" }), + m => setTransactionMessageFeePayerSigner(keypairSigner, m), + m => setTransactionMessageLifetimeUsingBlockhash(latestBlockhash, m), + m => + appendTransactionMessageInstructions( + [ + getSetComputeUnitPriceInstruction({ microLamports: 5000n }), + getTransferSolInstruction({ + source: keypairSigner, + destination: address("web3Qm5PuFapMJqe6PWRWfRBarkeqE2ZC8Eew3zwHH2"), + amount: lamports(1_000_000n), + }), + ], + m, + ), + ); + + // Figure out how many compute units to budget for this transaction + // so that you can right-size the compute budget to maximize the + // chance that it will be selected for inclusion into a block. + console.log("Estimating the compute consumption of the transaction"); + const estimatedComputeUnits = + await getComputeUnitEstimate(transactionMessage); + console.log( + `Transaction is estimated to consume ${estimatedComputeUnits} compute units`, + ); + const budgetedTransactionMessage = prependTransactionMessageInstructions( + [getSetComputeUnitLimitInstruction({ units: estimatedComputeUnits })], + transactionMessage, + ); + + // Sign and send the transaction. + console.log("Signing and sending the transaction"); + const signedTx = await signTransactionMessageWithSigners( + budgetedTransactionMessage, + ); + const signature = getSignatureFromTransaction(signedTx); + console.log( + "Sending transaction https://explorer.solana.com/tx/" + + signature + + "/?cluster=" + + CLUSTER, + ); + await sendAndConfirmTransaction(signedTx, { commitment: "confirmed" }); + console.log("Transaction confirmed"); +} + +transferSol(); +``` + + + + + +```typescript title="send-sol.ts" {28-38} +import { + Connection, + Keypair, + SystemProgram, + LAMPORTS_PER_SOL, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +(async () => { + const fromKeypair = Keypair.generate(); + const toKeypair = Keypair.generate(); + + const connection = new Connection( + "https://api.devnet.solana.com", + "confirmed", + ); + + const airdropSignature = await connection.requestAirdrop( + fromKeypair.publicKey, + LAMPORTS_PER_SOL, + ); + + await connection.confirmTransaction(airdropSignature); + + const lamportsToSend = 1_000_000; + + const transferTransaction = new Transaction().add( + SystemProgram.transfer({ + fromPubkey: fromKeypair.publicKey, + toPubkey: toKeypair.publicKey, + lamports: lamportsToSend, + }), + ); + + await sendAndConfirmTransaction(connection, transferTransaction, [ + fromKeypair, + ]); +})(); +``` + + + + + +[1]: https://docs.anza.xyz/runtime/programs#system-program diff --git a/content/cookbook/transactions/send-tokens.mdx b/content/cookbook/transactions/send-tokens.mdx new file mode 100644 index 000000000..c7274e559 --- /dev/null +++ b/content/cookbook/transactions/send-tokens.mdx @@ -0,0 +1,95 @@ +--- +title: How to Send Tokens +description: + "Using tokens is common within any application on Solana. Learn how to send + tokens on Solana." +--- + +Use the [Token Program][1] to transfer SPL Tokens. In order to send a SPL token, +you need to know its SPL token account address. You can both get the address and +send tokens with the following example. + +```typescript title="send-tokens.ts" +import { + Connection, + clusterApiUrl, + Keypair, + LAMPORTS_PER_SOL, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + createMint, + getOrCreateAssociatedTokenAccount, + mintTo, + createTransferInstruction, +} from "@solana/spl-token"; + +(async () => { + // Connect to cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // Generate a new wallet keypair and airdrop SOL + const fromWallet = Keypair.generate(); + const fromAirdropSignature = await connection.requestAirdrop( + fromWallet.publicKey, + LAMPORTS_PER_SOL, + ); + // Wait for airdrop confirmation + await connection.confirmTransaction(fromAirdropSignature); + + // Generate a new wallet to receive newly minted token + const toWallet = Keypair.generate(); + + // Create new token mint + const mint = await createMint( + connection, + fromWallet, + fromWallet.publicKey, + null, + 9, + ); + + // Get the token account of the fromWallet Solana address, if it does not exist, create it + const fromTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + fromWallet, + mint, + fromWallet.publicKey, + ); + + //get the token account of the toWallet Solana address, if it does not exist, create it + const toTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + fromWallet, + mint, + toWallet.publicKey, + ); + + // Minting 1 new token to the "fromTokenAccount" account we just returned/created + await mintTo( + connection, + fromWallet, + mint, + fromTokenAccount.address, + fromWallet.publicKey, + 1000000000, // it's 1 token, but in lamports + [], + ); + + // Add token transfer instructions to transaction + const transaction = new Transaction().add( + createTransferInstruction( + fromTokenAccount.address, + toTokenAccount.address, + fromWallet.publicKey, + 1, + ), + ); + + // Sign transaction, broadcast, and confirm + await sendAndConfirmTransaction(connection, transaction, [fromWallet]); +})(); +``` + +[1]: https://spl.solana.com/token diff --git a/content/cookbook/wallets/check-publickey.mdx b/content/cookbook/wallets/check-publickey.mdx new file mode 100644 index 000000000..f18f54eb0 --- /dev/null +++ b/content/cookbook/wallets/check-publickey.mdx @@ -0,0 +1,66 @@ +--- +title: How to Validate a Public Key +description: + "Public keys on Solana can be validated with a small amount of code. Learn how + to validate public keys on Solana." +--- + +In certain special cases (e.g. a Program Derived Address), public keys may not +have a private key associated with them. You can check this by looking to see if +the public key lies on the ed25519 curve. Only public keys that lie on the curve +can be controlled by users with wallets. + + + + + +```typescript +import { isAddress } from "@solana/web3.js"; + +// Note that generateKeyPair() will always give a public key that is valid for users + +// Valid public key +const key = "5oNDL3swdJJF1g9DzJiZ4ynHXgszjAEpUkxVYejchzrY"; + +// Lies on the ed25519 curve and is suitable for users +console.log("Valid Address: ", isAddress(key)); + +// // Valid public key +const offCurveAddress = "4BJXYkfvg37zEmBbsacZjeQDpTNx91KppxFJxRqrz48e"; + +// // Not on the ed25519 curve, therefore not suitable for users +console.log("Valid Off Curve Address: ", isAddress(offCurveAddress)); + +// // Not a valid public key +const errorPubkey = "testPubkey"; +console.log("Invalid Address: ", isAddress(errorPubkey)); +``` + + + + +```typescript +import { PublicKey } from "@solana/web3.js"; + +// Note that Keypair.generate() will always give a public key that is valid for users + +// Valid public key +const key = new PublicKey("5oNDL3swdJJF1g9DzJiZ4ynHXgszjAEpUkxVYejchzrY"); +// Lies on the ed25519 curve and is suitable for users +console.log(PublicKey.isOnCurve(key.toBytes())); + +// Valid public key +const offCurveAddress = new PublicKey( + "4BJXYkfvg37zEmBbsacZjeQDpTNx91KppxFJxRqrz48e", +); + +// Not on the ed25519 curve, therefore not suitable for users +console.log(PublicKey.isOnCurve(offCurveAddress.toBytes())); + +// Not a valid public key +const errorPubkey = new PublicKey("testPubkey"); +console.log(PublicKey.isOnCurve(errorPubkey.toBytes())); +``` + + + diff --git a/content/cookbook/wallets/connect-wallet-react.mdx b/content/cookbook/wallets/connect-wallet-react.mdx new file mode 100644 index 000000000..f3ceda145 --- /dev/null +++ b/content/cookbook/wallets/connect-wallet-react.mdx @@ -0,0 +1,86 @@ +--- +title: How to Connect a Wallet with React +description: + "Every application on Solana requires a connection with a user's wallet to + use. Learn how to connect to wallets on Solana." +--- + +Solana's [wallet-adapter](https://github.com/anza-xyz/wallet-adapter) library +makes it easy to manage wallet connections client-side. For a full length guide, +check out +[Add Solana Wallet Adapter to a NextJS application](/developers/guides/wallets/add-solana-wallet-adapter-to-nextjs). + +> For web3.js v2, please reference the +> [react example](https://github.com/solana-labs/solana-web3.js/tree/master/examples/react-app) +> from the +> [Anza Web3js v2 Blog](https://www.anza.xyz/blog/solana-web3-js-2-release). + +## How to Connect to a Wallet with React + +> Currently, `create-solana-dapp` only works with Solana Web3.js v1. + +For quick setup with React use: + +```bash +npx create-solana-dapp +``` + +For manual setup, run the following command to install the required +dependencies: + +```bash +npm install --save \ + @solana/wallet-adapter-base \ + @solana/wallet-adapter-react \ + @solana/wallet-adapter-react-ui \ + @solana/wallet-adapter-wallets \ + @solana/web3.js@1 \ + react +``` + +The `WalletProvider` can than be setup to connect to a user's wallet and later +send transactions. + +```typescript +import React, { FC, useMemo } from 'react'; +import { ConnectionProvider, WalletProvider } from '@solana/wallet-adapter-react'; +import { WalletAdapterNetwork } from '@solana/wallet-adapter-base'; +import { UnsafeBurnerWalletAdapter } from '@solana/wallet-adapter-wallets'; +import { + WalletModalProvider, + WalletDisconnectButton, + WalletMultiButton +} from '@solana/wallet-adapter-react-ui'; +import { clusterApiUrl } from '@solana/web3.js'; + +// Default styles that can be overridden by your app +require('@solana/wallet-adapter-react-ui/styles.css'); + +export const Wallet: FC = () => { + // The network can be set to 'devnet', 'testnet', or 'mainnet-beta'. + const network = WalletAdapterNetwork.Devnet; + + // You can also provide a custom RPC endpoint. + const endpoint = useMemo(() => clusterApiUrl(network), [network]); + + const wallets = useMemo( + () => [ + new UnsafeBurnerWalletAdapter(), + ], + // eslint-disable-next-line react-hooks/exhaustive-deps + [network] + ); + + return ( + + + + + + { /* Your app's components go here, nested within the context providers. */ } + + + + ); +}; +``` diff --git a/content/cookbook/wallets/create-keypair.mdx b/content/cookbook/wallets/create-keypair.mdx new file mode 100644 index 000000000..a918cb444 --- /dev/null +++ b/content/cookbook/wallets/create-keypair.mdx @@ -0,0 +1,37 @@ +--- +title: How to Create a Keypair +description: + "Every transaction requires a signature from a keypair on Solana. Learn how to + create Keypairs on Solana." +--- + +Any transaction on the Solana blockchain requires a keypair or wallet. If you +are [connecting to a wallet](/developers/cookbook/wallets/connect-wallet-react), +you do not need to worry about the keypair. Otherwise a keypair must be +generated for signing transactions. + + + + + +```javascript +import { generateKeyPairSigner } from "@solana/web3.js"; + +const signer = await generateKeyPairSigner(); +console.log("address: ", signer.address); +``` + + + + + +```javascript +import { Keypair } from "@solana/web3.js"; + +const keypair = Keypair.generate(); +console.log("address:", keypair.publicKey.toBase58()); +``` + + + + diff --git a/content/cookbook/wallets/generate-mnemonic.mdx b/content/cookbook/wallets/generate-mnemonic.mdx new file mode 100644 index 000000000..c152bb70c --- /dev/null +++ b/content/cookbook/wallets/generate-mnemonic.mdx @@ -0,0 +1,17 @@ +--- +title: How to Generate Mnemonics for Keypairs +description: + "Mnemonics make is easy for users to store their keypair's secret. Learn how + to use mnemonics on Solana." +--- + +One way to generate a Keypair is through the use of a Mnemonic. Mnemonics are +generally used to make the user experience within wallets better than a Keypair +file by using a list of readable words (instead of a shorter string of random +numbers and letters). + +```typescript title="generate-mnemonic.ts" +import * as bip39 from "bip39"; + +const mnemonic = bip39.generateMnemonic(); +``` diff --git a/content/cookbook/wallets/generate-vanity-address.mdx b/content/cookbook/wallets/generate-vanity-address.mdx new file mode 100644 index 000000000..571a267c6 --- /dev/null +++ b/content/cookbook/wallets/generate-vanity-address.mdx @@ -0,0 +1,22 @@ +--- +title: How to Generate a Vanity Address +description: + "Creating custom addresses on Solana is a fun way to make your public key + unique. Learn how to create vanity addresses on Solana." +--- + +Vanity publickeys, or custom addresses, are keys that have start with specific +characters. + +For example, a person may want a publickey to start with `elv1s`, or maybe even +`cook`. These can help other people remember who the key belongs to, making the +key more easily identifiable. + +**Note**: The more characters in your vanity address, the longer it will take. + +You can generate a vanity address using the +[Solana CLI](/docs/intro/installation): + +```bash +solana-keygen grind --starts-with e1v1s:1 +``` diff --git a/content/cookbook/wallets/meta.json b/content/cookbook/wallets/meta.json new file mode 100644 index 000000000..ca9a8e7e4 --- /dev/null +++ b/content/cookbook/wallets/meta.json @@ -0,0 +1,15 @@ +{ + "title": "Wallets", + "pages": [ + "create-keypair", + "restore-keypair", + "verify-keypair", + "check-publickey", + "generate-mnemonic", + "restore-from-mnemonic", + "generate-vanity-address", + "sign-message", + "connect-wallet-react" + ], + "defaultOpen": true +} diff --git a/content/cookbook/wallets/restore-from-mnemonic.mdx b/content/cookbook/wallets/restore-from-mnemonic.mdx new file mode 100644 index 000000000..49d7019e2 --- /dev/null +++ b/content/cookbook/wallets/restore-from-mnemonic.mdx @@ -0,0 +1,46 @@ +--- +title: How to Restore a Keypair from a Mnemonic +description: "Learn how to restore keypairs from a mnemonic on Solana" +--- + +Many wallet extensions use mnemonics to represent their secret keys. You can +convert the mnemonic to Keypairs for local testing. + +## Restoring BIP39 format mnemonics + +```typescript title="restore-bip39-mnemonic.ts" +import { Keypair } from "@solana/web3.js"; +import * as bip39 from "bip39"; + +const mnemonic = + "pill tomorrow foster begin walnut borrow virtual kick shift mutual shoe scatter"; + +// arguments: (mnemonic, password) +const seed = bip39.mnemonicToSeedSync(mnemonic, ""); +const keypair = Keypair.fromSeed(seed.slice(0, 32)); + +console.log(`${keypair.publicKey.toBase58()}`); + +// output: 5ZWj7a1f8tWkjBESHKgrLmXshuXxqeY9SYcfbshpAqPG +``` + +## Restoring BIP44 formant mnemonics + +```typescript title="restore-bip44-mnemonic.ts" +import { Keypair } from "@solana/web3.js"; +import { HDKey } from "micro-ed25519-hdkey"; +import * as bip39 from "bip39"; + +const mnemonic = + "neither lonely flavor argue grass remind eye tag avocado spot unusual intact"; + +// arguments: (mnemonic, password) +const seed = bip39.mnemonicToSeedSync(mnemonic, ""); +const hd = HDKey.fromMasterSeed(seed.toString("hex")); + +for (let i = 0; i < 10; i++) { + const path = `m/44'/501'/${i}'/0'`; + const keypair = Keypair.fromSeed(hd.derive(path).privateKey); + console.log(`${path} => ${keypair.publicKey.toBase58()}`); +} +``` diff --git a/content/cookbook/wallets/restore-keypair.mdx b/content/cookbook/wallets/restore-keypair.mdx new file mode 100644 index 000000000..007c3d112 --- /dev/null +++ b/content/cookbook/wallets/restore-keypair.mdx @@ -0,0 +1,76 @@ +--- +title: How to Restore a Keypair +description: "Learn how to restore keypairs from a secret on Solana." +--- + +If you already have your secret key or bytes, you can get your Keypair from the +secret to test out your dApp. + +## From Bytes + + + + + +```typescript +import { createKeyPairFromBytes } from "@solana/web3.js"; + +const keypairBytes = new Uint8Array([ + 174, 47, 154, 16, 202, 193, 206, 113, 199, 190, 53, 133, 169, 175, 31, 56, + 222, 53, 138, 189, 224, 216, 117, 173, 10, 149, 53, 45, 73, 251, 237, 246, 15, + 185, 186, 82, 177, 240, 148, 69, 241, 227, 167, 80, 141, 89, 240, 121, 121, + 35, 172, 247, 68, 251, 226, 218, 48, 63, 176, 109, 168, 89, 238, 135, +]); + +const keypair = await createKeyPairFromBytes(keypairBytes); +``` + + + + +```typescript +import { Keypair } from "@solana/web3.js"; + +const keypairBytes = Uint8Array.from([ + 174, 47, 154, 16, 202, 193, 206, 113, 199, 190, 53, 133, 169, 175, 31, 56, + 222, 53, 138, 189, 224, 216, 117, 173, 10, 149, 53, 45, 73, 251, 237, 246, 15, + 185, 186, 82, 177, 240, 148, 69, 241, 227, 167, 80, 141, 89, 240, 121, 121, + 35, 172, 247, 68, 251, 226, 218, 48, 63, 176, 109, 168, 89, 238, 135, +]); + +const keypair = Keypair.fromSecretKey(keypairBytes); +``` + + + + +## From Base58 String + + + + + +```typescript +import { createKeyPairFromBytes, getBase58Codec } from "@solana/web3.js"; + +const keypairBase58 = + "5MaiiCavjCmn9Hs1o3eznqDEhRwxo7pXiAYez7keQUviUkauRiTMD8DrESdrNjN8zd9mTmVhRvBJeg5vhyvgrAhG"; +const keypairBytes = getBase58Codec().decode(keypairBase58); +const keypair = await createKeyPairFromBytes(keypairBytes); +``` + + + + +```typescript +import { Keypair } from "@solana/web3.js"; +import * as bs58 from "bs58"; + +const keypairBase58 = + "5MaiiCavjCmn9Hs1o3eznqDEhRwxo7pXiAYez7keQUviUkauRiTMD8DrESdrNjN8zd9mTmVhRvBJeg5vhyvgrAhG"; +const keypairBytes = bs58.decode(keypairBase58); +const keypair = Keypair.fromSecretKey(keypairBytes); +``` + + + diff --git a/content/cookbook/wallets/sign-message.mdx b/content/cookbook/wallets/sign-message.mdx new file mode 100644 index 000000000..32fc1b25d --- /dev/null +++ b/content/cookbook/wallets/sign-message.mdx @@ -0,0 +1,69 @@ +--- +title: How to Sign and Verify a Message +description: "Learn how to sign messages on Solana." +--- + +The primary function of a keypair is to sign messages, transactions and enable +verification of the signature. Verification of a signature allows the recipient +to be sure that the data was signed by the owner of a specific private key. + + + + + +```typescript +import { + generateKeyPair, + signBytes, + verifySignature, + getUtf8Encoder, + getBase58Decoder, +} from "@solana/web3.js"; + +const keys = await generateKeyPair(); +const message = getUtf8Encoder().encode("Hello, World!"); +const signedBytes = await signBytes(keys.privateKey, message); + +const decoded = getBase58Decoder().decode(signedBytes); +console.log("Signature:", decoded); + +const verified = await verifySignature(keys.publicKey, signedBytes, message); +console.log("Verified:", verified); +``` + + + + + +In Solana Web3.js v1, we can use the +[TweetNaCl](https://www.npmjs.com/package/tweetnacl) crypto library: + +```typescript +import { Keypair } from "@solana/web3.js"; +import nacl from "tweetnacl"; +import { decodeUTF8 } from "tweetnacl-util"; + +const keypair = Keypair.fromSecretKey( + Uint8Array.from([ + 174, 47, 154, 16, 202, 193, 206, 113, 199, 190, 53, 133, 169, 175, 31, 56, + 222, 53, 138, 189, 224, 216, 117, 173, 10, 149, 53, 45, 73, 251, 237, 246, + 15, 185, 186, 82, 177, 240, 148, 69, 241, 227, 167, 80, 141, 89, 240, 121, + 121, 35, 172, 247, 68, 251, 226, 218, 48, 63, 176, 109, 168, 89, 238, 135, + ]), +); + +const message = "The quick brown fox jumps over the lazy dog"; +const messageBytes = decodeUTF8(message); + +const signature = nacl.sign.detached(messageBytes, keypair.secretKey); +const result = nacl.sign.detached.verify( + messageBytes, + signature, + keypair.publicKey.toBytes(), +); + +console.log(result); +``` + + + diff --git a/content/cookbook/wallets/verify-keypair.mdx b/content/cookbook/wallets/verify-keypair.mdx new file mode 100644 index 000000000..b6b7c06be --- /dev/null +++ b/content/cookbook/wallets/verify-keypair.mdx @@ -0,0 +1,25 @@ +--- +title: How to Verify a Keypair +description: "Learn how to verify keypairs on Solana." +--- + +If you are given a keypair, you can verify whether or not the secret matches the +given public key + +```typescript title="verify-keypair.ts" +import { Keypair, PublicKey } from "@solana/web3.js"; + +const publicKey = new PublicKey("24PNhTaNtomHhoy3fTRaMhAFCRj4uHqhZEEoWrKDbR5p"); + +const keypair = Keypair.fromSecretKey( + Uint8Array.from([ + 174, 47, 154, 16, 202, 193, 206, 113, 199, 190, 53, 133, 169, 175, 31, 56, + 222, 53, 138, 189, 224, 216, 117, 173, 10, 149, 53, 45, 73, 251, 237, 246, + 15, 185, 186, 82, 177, 240, 148, 69, 241, 227, 167, 80, 141, 89, 240, 121, + 121, 35, 172, 247, 68, 251, 226, 218, 48, 63, 176, 109, 168, 89, 238, 135, + ]), +); + +console.log(keypair.publicKey.toBase58() === publicKey.toBase58()); +// output: true +``` diff --git a/content/courses/connecting-to-offchain-data/index.mdx b/content/courses/connecting-to-offchain-data/index.mdx new file mode 100644 index 000000000..39a2a961f --- /dev/null +++ b/content/courses/connecting-to-offchain-data/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Connecting to offchain data +description: Connect to offchain data from inside your Anchor programs. +--- diff --git a/content/courses/connecting-to-offchain-data/meta.json b/content/courses/connecting-to-offchain-data/meta.json new file mode 100644 index 000000000..9106e416f --- /dev/null +++ b/content/courses/connecting-to-offchain-data/meta.json @@ -0,0 +1,3 @@ +{ + "pages": ["oracles", "verifiable-randomness-functions"] +} diff --git a/content/courses/connecting-to-offchain-data/oracles.mdx b/content/courses/connecting-to-offchain-data/oracles.mdx new file mode 100644 index 000000000..7296b66d9 --- /dev/null +++ b/content/courses/connecting-to-offchain-data/oracles.mdx @@ -0,0 +1,1413 @@ +--- +title: Oracles and Oracle Networks +objectives: + - Explain why onchain programs cannot readily access real-world data on their + own + - Explain how oracles solve the problem of accessing real-world data onchain + - Explain how incentivized oracle networks make data more trustworthy + - Effectively weigh the tradeoffs between using various types of oracles + - Use oracles from an onchain program to access real-world data +description: Access real-world data inside a Solana program. +--- + +## Summary + +- Oracles are services that provide external data to a blockchain network. +- Solana has a rich ecosystem of oracle providers. Some notable oracle providers + include [Pyth Network](https://pyth.network), + [Switchboard](https://switchboard.xyz), [Chainlink](https://chain.link), and + [DIA](https://www.diadata.org/solana-price-oracles/). +- You can build your own oracle to create a custom data feed. +- When choosing oracle providers, consider reliability, accuracy, + decentralization, update frequency, and cost. Be aware of security risks: + oracles can be potential points of failure or attack. For critical data, use + reputable providers and consider multiple independent oracles to mitigate + risks. + +## Lesson + +Oracles are services that provide external data to a blockchain network. +Blockchains are siloed environments that do not inherently know the outside +world. Oracles solve this limitation by offering a decentralized way to get +various types of data onchain, such as: + +- Results of sporting events +- Weather data +- Political election results +- Market data +- Randomness + +While the implementation may differ across blockchains, oracles generally work +as follows: + +1. Data is sourced offchain. +2. The data is published onchain via a transaction and stored in an account. +3. Programs can read the data stored in the account and use it in the program's + logic. + +This lesson will cover the basics of how oracles work, the state of oracles on +Solana, and how to effectively use oracles in your Solana development. + +### Trust and Oracle Networks + +The primary challenge for oracles is trust. Since blockchains execute +irreversible financial transactions, developers and users need to trust the +validity and accuracy of oracle data. The first step in trusting an oracle is +understanding its implementation. + +Broadly speaking, there are three types of implementations: + +1. **Single, centralized oracle publishes data onchain.** + - **Pro:** It's simple; there's one source of truth. + - **Con:** Nothing prevents the oracle provider from supplying inaccurate + data. +2. **Network of oracles publishes data, with consensus determining the final + result.** + + - **Pro:** Consensus reduces the likelihood of bad data being pushed onchain. + - **Con:** There's no direct disincentive for bad actors to publish incorrect + data to sway consensus. + +3. **Oracle network with proof-of-stake mechanism:** Oracles are required to + stake tokens to participate. If an oracle's response deviates too far from + the consensus, its stake is taken by the protocol and it can no longer + report. + - **Pro:** This approach prevents any single oracle from overly influencing + the final result while incentivizing honest and accurate reporting. + - **Con:** Building decentralized networks is challenging; proper incentives + and sufficient participation are necessary for success. + +Each implementation has its place depending on the oracle's use case. For +example, using centralized oracles for a blockchain-based game may be +acceptable. However, you may be less comfortable with a centralized oracle +providing price data for trading applications. + +You may create standalone oracles for your own applications to access offchain +data. However, these are unlikely to be used by the broader community, where +decentralization is a core principle. Be cautious about using centralized +third-party oracles as well. + +In an ideal scenario, all important or valuable data would be provided onchain +via a highly efficient oracle network with a trustworthy proof-of-stake +consensus mechanism. A staking system incentivizes oracle providers to ensure +the accuracy of their data to protect their staked funds. + +Even when an oracle network claims to have a consensus mechanism, be aware of +the risks. If the total value at stake in downstream applications exceeds the +staked amount of the oracle network, there may still be sufficient incentive for +collusion among oracles. + +As a developer, it is your responsibility to understand how an oracle network is +configured and assess whether it can be trusted. Generally, oracles should only +be used for non-mission-critical functions, and worst-case scenarios should +always be accounted for. + +### Oracles on Solana + +Solana has a diverse ecosystem of oracle providers, each with unique offerings. +Some notable ones include: + +- [**Pyth**](https://www.pyth.network/price-feeds) + Focuses primarily on financial data published by top-tier financial + institutions. Pyth's data providers are approved entities that publish market + data updates, which are then aggregated and made available onchain via the + Pyth program. This data is not fully decentralized since only approved + providers can publish it. However, the key advantage is that Pyth offers + high-quality, vetted data directly sourced from these institutions. +- [**Switchboard**](https://switchboard.xyz) + Completely decentralized oracle network with a variety of data feeds. You can + explore these feeds on + [Switchboard website](https://app.switchboard.xyz/solana/mainnet). Anyone can + run a Switchboard oracle or consume its data, but that means users need to be + diligent in researching the quality of the feeds they use. +- [**Chainlink**](https://chain.link) + Decentralized oracle network providing secure offchain computations and + real-world data across multiple blockchains. +- [**DIA**](https://www.diadata.org/solana-price-oracles/) + Open-source oracle platform delivering transparent and verified data for + digital assets and traditional financial instruments. + +In this lesson, we'll be using **Switchboard**. However, the concepts are +applicable to most oracles, so you should select the oracle provider that best +fits your needs. + +Switchboard follows a stake-weighted oracle network model, as discussed in the +previous section, but with an additional layer of security via +[**Trusted Execution Environments (TEEs)**](https://en.wikipedia.org/wiki/Trusted_execution_environment). +TEEs are secure environments isolated from the rest of the system where +sensitive code can be executed. In simple terms, TEEs can take a program and an +input, execute the program, and produce an output along with a proof. To learn +more about TEEs, check out +[Switchboard's Architecture Design documentation](https://docs.switchboard.xyz/docs/switchboard/readme/architecture-design#trusted-execution-environments-for-layered-security). + +By incorporating TEEs, Switchboard is able to verify each oracle's software, +ensuring its integrity within the network. If an oracle operator acts +maliciously or alters the approved code, the data quote verification process +will fail. This allows Switchboard to support more than just data reporting; it +can also run offchain custom and confidential computations. + +### Switchboard Oracles + +Switchboard oracles store data on Solana using data feeds, also called +**aggregators**. These data feeds consist of multiple jobs that are aggregated +to produce a single result. Aggregators are represented onchain as regular +Solana accounts managed by the Switchboard program, with updates written +directly to these accounts. Let's review some key terms to understand how +Switchboard operates: + +- **[Aggregator (Data Feed)](https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/aggregator.rs)** - + Contains the data feed configuration, including how updates are requested, + processed, and resolved onchain. The aggregator account, owned by the + Switchboard program stores the final data onchain. +- **[Job](https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/job.rs)** - + Each data source corresponds to a job account, which defines the tasks for + fetching and transforming offchain data. It acts as the blueprint for how data + is retrieved for a particular source. +- **[Oracle](https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/oracle.rs)** - + An oracle acts as the intermediary between the internet and the blockchain. It + reads job definitions from the feed, calculates results, and submits them + onchain. +- **Oracle Queue** - A pool of oracles that are assigned update requests in a + round-robin fashion. Oracles in the queue must continuously heartbeat onchain + to provide updates. The queue's data and configuration are stored in an + [onchain account](https://github.com/switchboard-xyz/solana-sdk/blob/main/javascript/solana.js/src/generated/oracle-program/accounts/OracleQueueAccountData.ts) + managed by the Switchboard program. +- **Oracle Consensus** - Oracles come to a consensus by using the median of the + responses as the accepted onchain result. The feed authority controls how many + oracles are required to respond for added security. + +Switchboard incentivizes oracles to update data feeds through a reward system. +Each data feed has a `LeaseContract` account, which is a pre-funded escrow that +rewards oracles for fulfilling update requests. The `leaseAuthority` can +withdraw funds, but anyone can contribute to the contract. When a user requests +a feed update, the escrow rewards both the user and the crank turners (those who +run software to systematically send update requests). Once oracles submit +results onchain, they are paid from this escrow. + +Oracles must also stake tokens to participate in updates. If an oracle submits a +result outside the queue's configured parameters, they can have their stake +slashed, provided the queue has `slashingEnabled`. This mechanism ensures that +oracles act in good faith by providing accurate data. + +#### How Data is Published Onchain + +1. **Oracle Queue Setup** - When an update request is made, the next `N` oracles + are assigned from the queue and moved to the back after completion. Each + queue has its own configuration that dictates security and behavior, tailored + to specific use cases. Queues are stored onchain as accounts and can be + created via the + [`oracleQueueInit` instruction](https://github.com/switchboard-xyz/solana-sdk/blob/main/javascript/solana.js/src/generated/oracle-program/instructions/oracleQueueInit.ts). + - Key + [Oracle Queue configurations](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/queue/struct.OracleQueueAccountData.html): + - `oracle_timeout`: Removes stale oracles after a heartbeat timeout. + - `reward`: Defines rewards for oracles and round openers. + - `min_stake`: The minimum stake required for an oracle to participate. + - `size`: The current number of oracles in the queue. + - `max_size`: The maximum number of oracles a queue can support. +2. **[Aggregator/data feed setup](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/aggregator/struct.AggregatorAccountData.html)** - + Each feed is linked to a single oracle queue and contains configuration + details on how updates are requested and processed. +3. **[Job Account Setup](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/job/struct.JobAccountData.html)** - + Each data source requires a job account that defines how oracles retrieve and + fulfill the feed's update requests. These job accounts also specify where + data is sourced. +4. **Request Assignment** - When an update is requested, the oracle queue + assigns the task to different oracles in the queue. Each oracle processes + data from the sources defined in the feed's job accounts, calculating a + weighted median result based on the data. + +5. **Consensus and Result Calculation** - After the required number of oracle + responses + ([`minOracleResults`](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/aggregator/struct.AggregatorAccountData.html#structfield.min_oracle_results)) + is received, the result is calculated as the median of the responses. Oracles + that submit responses within the set parameters are rewarded, while those + outside the threshold are penalized (if `slashingEnabled` is active). +6. **Data Storage** - The final result is stored in the aggregator account, + where it can be accessed onchain for consumption by other programs. + +#### How to Use Switchboard Oracles + +To incorporate offchain data into a Solana program using Switchboard oracles, +the first step is to find a data feed that suits your needs. Switchboard offers +many [publicly available feeds](https://app.switchboard.xyz/solana/mainnet) for +various data types. When selecting a feed, you should consider the following +factors: + +- **Accuracy/Reliability**: Evaluate how precise the data needs to be for your + application. +- **Data Source**: Choose a feed based on where the data is sourced from. +- **Update Cadence**: Understand how frequently the feed is updated to ensure it + meets your use case. + +When consuming public feeds, you won't have control over these aspects, so it's +important to choose carefully based on your requirements. + +For example, Switchboard offers a +[BTC/USD feed](https://app.switchboard.xyz/solana/mainnet/feed/8SXvChNYFhRq4EZuZvnhjrB3jJRQCv4k3P4W6hesH3Ee), +which provides the current Bitcoin price in USD. This feed is available on both +Solana devnet and mainnet with the following public key: +`8SXvChNYFhRq4EZuZvnhjrB3jJRQCv4k3P4W6hesH3Ee`. + +Here's a snapshot of what the onchain data for a Switchboard feed account looks +like: + +```rust +// From the switchboard solana program +// https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/aggregator.rs#L60 + +pub struct AggregatorAccountData { + /// Name of the aggregator to store onchain. + pub name: [u8; 32], + ... + ... + /// Pubkey of the queue the aggregator belongs to. + pub queue_pubkey: Pubkey, + ... + /// Minimum number of oracle responses required before a round is validated. + pub min_oracle_results: u32, + /// Minimum number of job results before an oracle accepts a result. + pub min_job_results: u32, + /// Minimum number of seconds required between aggregator rounds. + pub min_update_delay_seconds: u32, + ... + /// Change percentage required between a previous round and the current round. If variance percentage is not met, reject new oracle responses. + pub variance_threshold: SwitchboardDecimal, + ... + /// Latest confirmed update request result that has been accepted as valid. This is where you will find the data you are requesting in latest_confirmed_round.result + pub latest_confirmed_round: AggregatorRound, + ... + /// The previous confirmed round result. + pub previous_confirmed_round_result: SwitchboardDecimal, + /// The slot when the previous confirmed round was opened. + pub previous_confirmed_round_slot: u64, + ... +} +``` + +You can view the full code for this data structure in the +[Switchboard program here](https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/aggregator.rs#L60). + +Some relevant fields and configurations on the `AggregatorAccountData` type are: + +- `min_oracle_results` - Minimum number of oracle responses required before a + round is validated. +- `min_job_results` - Minimum number of job results before an oracle accepts a + result. +- `variance_threshold` - Change percentage required between a previous round and + the current round. If variance percentage is not met, reject new oracle + responses. +- `latest_confirmed_round` - Latest confirmed update request result that has + been accepted as valid. This is where you will find the data of the feed in + `latest_confirmed_round.result` +- `min_update_delay_seconds` - Minimum number of seconds required between + aggregator rounds. + +The first three configurations listed above directly impact the accuracy and +reliability of a data feed: + +- The `min_job_results` field represents the minimum number of successful + responses an oracle must receive from data sources before it can submit its + response onchain. For example, if `min_job_results` is set to three, each + oracle must pull data from at least three job sources. The higher this number, + the more reliable and accurate the data will be, reducing the influence of any + single data source. + +- The `min_oracle_results` field is the minimum number of oracle responses + required for a round to be successful. Each oracle in a queue pulls data from + each source defined as a job, takes the weighted median of those responses, + and submits that median onchain. The program then waits for + `min_oracle_results` of these weighted medians and calculates the median of + those, which is the final result stored in the data feed account. + +- The `min_update_delay_seconds` field is related to the feed's update cadence. + This value must have passed between rounds of updates before the Switchboard + program will accept results. + +It can help to view the jobs tab for a feed in Switchboard's explorer. For +example, check out the +[BTC_USD feed in the explorer](https://app.switchboard.xyz/solana/devnet/feed/8SXvChNYFhRq4EZuZvnhjrB3jJRQCv4k3P4W6hesH3Ee). +Each job defines the data sources the oracles fetch from and the weight assigned +to each source. You can view the actual API endpoints that provide the data for +this feed. When selecting a feed for your program, these considerations are key. + +Below are two of the jobs related to the BTC_USD feed, showing data from +[MEXC](https://www.mexc.com/) and [Coinbase](https://www.coinbase.com/). + +![Oracle Jobs](/assets/courses/unboxed/oracle-jobs.png) + +Once you've chosen a feed, you can start reading the data from that feed by +deserializing and reading the state stored in the account. The easiest way to do +this is by using the `AggregatorAccountData` struct from the +`switchboard_solana` crate in your program. + +```rust +// Import anchor and switchboard crates +use {anchor_lang::prelude::*, switchboard_solana::AggregatorAccountData}; + +... + +#[derive(Accounts)] +pub struct ConsumeDataAccounts<'info> { + // Pass in data feed account and deserialize to AggregatorAccountData + pub feed_aggregator: AccountLoader<'info, AggregatorAccountData>, + ... +} +``` + +Using zero-copy deserialization with `AccountLoader` allows the program to +access specific data within large accounts like `AggregatorAccountData` without +loading the entire account into memory. This improves memory efficiency and +performance by only accessing the necessary parts of the account. It avoids +deserializing the whole account, saving both time and resources. This is +especially useful for large account structures. + +When using `AccountLoader`, you can access the data in three ways: + +- `load_init`: Used after initializing an account (this ignores the missing + account discriminator that gets added only after the user's instruction code) +- `load`: Used when the account is immutable +- `load_mut`: Used when the account is mutable + +To dive deeper, check out the +[Advanced Program Architecture lesson](/developers/courses/program-optimization/program-architecture), +where we discuss `Zero-Copy` and `AccountLoader` in more detail. + +With the aggregator account passed into your program, you can use it to retrieve +the latest oracle result. Specifically, you can use the `get_result()` method on +the aggregator type: + +```rust +// Inside an Anchor program +... + +let feed = &ctx.accounts.feed_aggregator.load()?; +// get result +let val: f64 = feed.get_result()?.try_into()?; +``` + +The `get_result()` method defined on the `AggregatorAccountData` struct is safer +than fetching the data with `latest_confirmed_round.result` because Switchboard +has implemented some nifty safety checks. + +```rust +// From switchboard program +// https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/aggregator.rs#L206 + +pub fn get_result(&self) -> anchor_lang::Result { + if self.resolution_mode == AggregatorResolutionMode::ModeSlidingResolution { + return Ok(self.latest_confirmed_round.result); + } + let min_oracle_results = self.min_oracle_results; + let latest_confirmed_round_num_success = self.latest_confirmed_round.num_success; + if min_oracle_results > latest_confirmed_round_num_success { + return Err(SwitchboardError::InvalidAggregatorRound.into()); + } + Ok(self.latest_confirmed_round.result) +} +``` + +You can also view the current value stored in an `AggregatorAccountData` account +client-side in Typescript. + +```typescript +import { AggregatorAccount, SwitchboardProgram } from "@switchboard-xyz/solana.js"; +import { PublicKey, SystemProgram, Connection } from "@solana/web3.js"; +import { Big } from "@switchboard-xyz/common"; +... +... + +const DEVNET_RPC_URL = "https://api.devnet.solana.com"; +const SOL_USD_SWITCHBOARD_FEED = new PublicKey( + "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR", +); +// Create keypair for test user +let user = new anchor.web3.Keypair(); + +// Fetch switchboard devnet program object +switchboardProgram = await SwitchboardProgram.load( + new Connection(DEVNET_RPC_URL), + payer, +); + +// Pass switchboard program object and feed pubkey into AggregatorAccount constructor +aggregatorAccount = new AggregatorAccount( + switchboardProgram, + SOL_USD_SWITCHBOARD_FEED, +); + +// Fetch latest SOL price +const solPrice: Big | null = await aggregatorAccount.fetchLatestValue(); +if (solPrice === null) { + throw new Error("Aggregator holds no value"); +} +``` + +Remember, Switchboard data feeds are just accounts that are updated by third +parties (oracles). Given that, you can do anything with the account that you can +typically do with accounts external to your program. + +#### Best Practices and Common Pitfalls + +When incorporating Switchboard feeds into your programs, there are two groups of +concerns to consider: choosing a feed and actually consuming the data from that +feed. + +Always audit the configurations of a feed before deciding to incorporate it into +a program. Configurations like **Min Update Delay**, **Min job Results**, and +**Min Oracle Results** can directly affect the data that is eventually persisted +onchain to the aggregator account. For example, looking at the config section of +the +[BTC_USD feed](https://app.switchboard.xyz/solana/devnet/feed/8SXvChNYFhRq4EZuZvnhjrB3jJRQCv4k3P4W6hesH3Ee) +you can see its relevant configurations. + +![Oracle Configs](/assets/courses/unboxed/oracle-configs.png) + +The BTC_USD feed has a Min Update Delay = 6 seconds. This means that the price +of BTC is only updated at a minimum of every 6 seconds on this feed. This is +probably fine for most use cases, but if you wanted to use this feed for +something latency sensitive, it's probably not a good choice. + +It's also worthwhile to audit a feed's sources in the Jobs section of the oracle +explorer. Since the value that is persisted onchain is the weighted median +result the oracles pull from each source, the sources directly influence what is +stored in the feed. Check for shady links and potentially run the APIs yourself +for a time to gain confidence in them. + +Once you have found a feed that fits your needs, you still need to make sure +you're using the feed appropriately. For example, you should still implement +necessary security checks on the account passed into your instruction. Any +account can be passed into your program's instructions, so you should verify +it's the account you expect it to be. + +In Anchor, if you deserialize the account to the `AggregatorAccountData` type +from the `switchboard_solana` crate, Anchor checks that the account is owned by +the Switchboard program. If your program expects that only a specific data feed +will be passed in the instruction, then you can also verify that the public key +of the account passed in matches what it should be. One way to do this is to +hard code the address in the program somewhere and use account constraints to +verify the address passed in matches what is expected. + +```rust +use { + anchor_lang::prelude::*, + solana_program::{pubkey, pubkey::Pubkey}, + switchboard_solana::AggregatorAccountData, +}; + +pub static BTC_USDC_FEED: Pubkey = pubkey!("8SXvChNYFhRq4EZuZvnhjrB3jJRQCv4k3P4W6hesH3Ee"); + +... +... + +#[derive(Accounts)] +pub struct TestInstruction<'info> { + // Switchboard SOL feed aggregator + #[account( + address = BTC_USDC_FEED + )] + pub feed_aggregator: AccountLoader<'info, AggregatorAccountData>, +} +``` + +On top of ensuring the feed account is the one you expect, you can also do some +checks on the data stored in the feed in your program's instruction logic. Two +common things to check for are data staleness and the confidence interval. + +Each data feed updates the current value stored in it when triggered by the +oracles. This means the updates are dependent on the oracles in the queue that +it's assigned to. Depending on what you intend to use the data feed for, it may +be beneficial to verify that the value stored in the account was updated +recently. For example, a lending protocol that needs to determine if a loan's +collateral has fallen below a certain level may need the data to be no older +than a few seconds. You can have your code check the timestamp of the most +recent update stored in the aggregator account. The following code snippet +checks that the timestamp of the most recent update on the data feed was no more +than 30 seconds ago. + +```rust +use { + anchor_lang::prelude::*, + anchor_lang::solana_program::clock, + switchboard_solana::{AggregatorAccountData, SwitchboardDecimal}, +}; + +... +... + +let feed = &ctx.accounts.feed_aggregator.load()?; +if (clock::Clock::get().unwrap().unix_timestamp - feed.latest_confirmed_round.round_open_timestamp) <= 30 { + valid_transfer = true; +} +``` + +The `latest_confirmed_round` field on the `AggregatorAccountData` struct is of +type `AggregatorRound` defined as: + +```rust +// https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/aggregator.rs#L17 + +pub struct AggregatorRound { + /// Maintains the number of successful responses received from nodes. + /// Nodes can submit one successful response per round. + pub num_success: u32, + /// Number of error responses. + pub num_error: u32, + /// Whether an update request round has ended. + pub is_closed: bool, + /// Maintains the `solana_program::clock::Slot` that the round was opened at. + pub round_open_slot: u64, + /// Maintains the `solana_program::clock::UnixTimestamp;` the round was opened at. + pub round_open_timestamp: i64, + /// Maintains the current median of all successful round responses. + pub result: SwitchboardDecimal, + /// Standard deviation of the accepted results in the round. + pub std_deviation: SwitchboardDecimal, + /// Maintains the minimum node response this round. + pub min_response: SwitchboardDecimal, + /// Maintains the maximum node response this round. + pub max_response: SwitchboardDecimal, + /// Pubkeys of the oracles fulfilling this round. + pub oracle_pubkeys_data: [Pubkey; 16], + /// Represents all successful node responses this round. `NaN` if empty. + pub medians_data: [SwitchboardDecimal; 16], + /// Current rewards/slashes oracles have received this round. + pub current_payout: [i64; 16], + /// Keep track of which responses are fulfilled here. + pub medians_fulfilled: [bool; 16], + /// Keeps track of which errors are fulfilled here. + pub errors_fulfilled: [bool; 16], +} +``` + +There are some other relevant fields that may be of interest to you in the +Aggregator account like `num_success`, `medians_data`, `std_deviation`, etc. +`num_success` is the number of successful responses received from oracles in +this round of updates. `medians_data` is an array of all of the successful +responses received from oracles this round. This is the dataset that is used to +derive the median and the final result. `std_deviation` is the standard +deviation of the accepted results in this round. You might want to check for a +low standard deviation, meaning that all of the oracle responses were similar. +The switchboard program is in charge of updating the relevant fields on this +struct every time it receives an update from an oracle. + +The `AggregatorAccountData` also has a `check_confidence_interval()` method that +you can use as another verification on the data stored in the feed. The method +allows you to pass in a `max_confidence_interval`. If the standard deviation of +the results received from the oracle is greater than the given +`max_confidence_interval`, it returns an error. + +```rust +// https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/accounts/aggregator.rs#L228 + +pub fn check_confidence_interval( + &self, + max_confidence_interval: SwitchboardDecimal, +) -> anchor_lang::Result<()> { + if self.latest_confirmed_round.std_deviation > max_confidence_interval { + return Err(SwitchboardError::ConfidenceIntervalExceeded.into()); + } + Ok(()) +} +``` + +You can incorporate this into your program like so: + +```rust +use { + crate::{errors::*}, + anchor_lang::prelude::*, + std::convert::TryInto, + use switchboard_solana::{AggregatorAccountData, SwitchboardDecimal}, +}; + +... +... + +let feed = &ctx.accounts.feed_aggregator.load()?; + +// Check feed does not exceed max_confidence_interval +feed.check_confidence_interval(SwitchboardDecimal::from_f64(max_confidence_interval)) + .map_err(|_| error!(ErrorCode::ConfidenceIntervalExceeded))?; +``` + +Lastly, it's important to plan for worst-case scenarios in your programs. Plan +for feeds going stale and plan for feed accounts closing. + +### Conclusion + +If you want functional programs that can perform actions based on real-world +data, you'll need to use oracles. Fortunately, there are reliable oracle +networks, such as Switchboard, that simplify the process. However, it's crucial +to perform thorough due diligence on any oracle network you choose, as you are +ultimately responsible for your program's behavior. + +## Lab + +Let's practice working with oracles! We'll be building a "Michael Burry Escrow" +program, which locks SOL in an escrow account until its value surpasses a +specified USD threshold. The program is named after +[Michael Burry](https://en.wikipedia.org/wiki/Michael_Burry), the investor known +for predicting the 2008 housing market crash. + +For this, we'll use the +[SOL_USD oracle on devnet](https://app.switchboard.xyz/solana/devnet/feed/GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR) +from Switchboard. The program will have two key instructions: + +- **Deposit**: Lock up the SOL and set a USD price target for unlocking. +- **Withdraw**: Check the USD price, and if the target is met, withdraw the SOL. + +### 1. Program Setup + +To get started, let's create the program with + +```zsh +anchor init burry-escrow --template=multiple +``` + +Next, replace the program ID in `lib.rs` and `Anchor.toml` by running command +`anchor keys sync`. + +Next, add the following to the bottom of your `Anchor.toml` file. This will tell +Anchor how to configure our local testing environment. This will allow us to +test our program locally without having to deploy and send transactions to +devnet. + +At the bottom of `Anchor.toml`: + +```toml title="Anchor.toml" +[test] +startup_wait = 5000 +shutdown_wait = 2000 +upgradeable = false + +[test.validator] +bind_address = "0.0.0.0" +url = "https://api.devnet.solana.com" +ledger = ".anchor/test-ledger" +rpc_port = 8899 + +[[test.validator.clone]] # switchboard-solana devnet programID +address = "SW1TCH7qEPTdLsDHRgPuMQjbQxKdH2aBStViMFnt64f" + +[[test.validator.clone]] # switchboard-solana devnet IDL +address = "Fi8vncGpNKbq62gPo56G4toCehWNy77GgqGkTaAF5Lkk" + +[[test.validator.clone]] # switchboard-solana SOL/USD Feed +address = "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR" +``` + +Additionally, we want to import the `switchboard-solana` crate in our +`Cargo.toml` file. Make sure your dependencies look as follows: + +```toml title="Cargo.toml" +[dependencies] +anchor-lang = "0.30.1" +switchboard-solana = "0.30.4" +``` + +Before diving into the program logic, let's review the structure of our smart +contract. For smaller programs, it's tempting to put all the code in a single +`lib.rs` file. However, organizing the code across different files helps +maintain clarity and scalability. Our program will be structured as follows +within the `programs/burry-escrow` directory: + +```sh +└── burry-escrow + ├── Cargo.toml + ├── Xargo.toml + └── src + ├── constants.rs + ├── error.rs + ├── instructions + │ ├── deposit.rs + │ ├── mod.rs + │ └── withdraw.rs + ├── lib.rs + └── state.rs +``` + +In this structure, `lib.rs` serves as the entry point to the program, while the +logic for each instruction handler is stored in separate files under the +`instructions` directory. Go ahead and set up the architecture as shown above, +and we'll proceed from there. + +### 2. Setup lib.rs + +Before writing the logic, we'll set up the necessary boilerplate in `lib.rs`. +This file acts as the entry point for the program, defining the API endpoints +that all transactions will pass through. The actual logic will be housed in the +`/instructions` directory. + +```rust title="lib.rs" +use anchor_lang::prelude::*; +use instructions::{deposit::*, withdraw::*}; + +pub mod errors; +pub mod instructions; +pub mod state; +pub mod constants; + +declare_id!("YOUR_PROGRAM_KEY_HERE"); + +#[program] +pub mod burry_escrow { + use super::*; + + pub fn deposit(ctx: Context, escrow_amount: u64, unlock_price: f64) -> Result<()> { + deposit_handler(ctx, escrow_amount, unlock_price) + } + + pub fn withdraw(ctx: Context) -> Result<()> { + withdraw_handler(ctx) + } +} +``` + +### 3. Define state.rs + +Next, let's define our program's data account: `Escrow`. This account will store +two key pieces of information: + +- `unlock_price`: The price of SOL in USD at which withdrawals are allowed + (e.g., hard-coded to $21.53). +- `escrow_amount`: Tracks the amount of lamports held in the escrow account. + +```rust title="state.rs" +use anchor_lang::prelude::*; + +#[account] +#[derive(InitSpace)] +pub struct Escrow { + pub unlock_price: f64, + pub escrow_amount: u64, +} +``` + +### 4. Constants + +Next, we'll define `DISCRIMINATOR_SIZE` as 8, the PDA seed as `"MICHAEL BURRY"`, +and hard-code the SOL/USD oracle pubkey as `SOL_USDC_FEED` in the `constants.rs` +file. + +```rust title="constants.rs" +pub const DISCRIMINATOR_SIZE: usize = 8; +pub const ESCROW_SEED: &[u8] = b"MICHAEL BURRY"; +pub const SOL_USDC_FEED: &str = "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR"; +``` + +### 5. Errors + +Next, let's define the custom errors we'll use throughout the program. Inside +the `error.rs` file, paste the following: + +```rust title="error.rs" +use anchor_lang::prelude::*; + +#[error_code] +#[derive(Eq, PartialEq)] +pub enum EscrowErrorCode { + #[msg("Not a valid Switchboard account")] + InvalidSwitchboardAccount, + #[msg("Switchboard feed has not been updated in 5 minutes")] + StaleFeed, + #[msg("Switchboard feed exceeded provided confidence interval")] + ConfidenceIntervalExceeded, + #[msg("Current SOL price is not above Escrow unlock price.")] + SolPriceBelowUnlockPrice, +} +``` + +### 6. Setup mod.rs + +Let's set up our `instructions/mod.rs` file. + +```rust title="mod.rs" +pub mod deposit; +pub mod withdraw; +``` + +### 7. Deposit + +Now that we have all of the boilerplate out of the way, let's move on to our +`Deposit` instruction. This will live in the `/src/instructions/deposit.rs` +file. + +When a user deposits, a PDA should be created with the "MICHAEL BURRY" string +and the user's pubkey as seeds. This ensures that a user can only open one +escrow account at a time. The instruction should initialize an account at this +PDA and transfer the SOL that the user wants to lock up to it. The user will +need to be a signer. + +Let's first build the `Deposit` context struct. To do this, we need to think +about what accounts will be necessary for this instruction. We start with the +following: + +```rust title="deposit.rs" +use crate::constants::*; +use crate::state::*; +use anchor_lang::prelude::*; +use anchor_lang::solana_program::{program::invoke, system_instruction::transfer}; + +#[derive(Accounts)] +pub struct Deposit<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + init, + seeds = [ESCROW_SEED, user.key().as_ref()], + bump, + payer = user, + space = DISCRIMINATOR_SIZE + Escrow::INIT_SPACE + )] + pub escrow_account: Account<'info, Escrow>, + + pub system_program: Program<'info, System>, +} +``` + +Notice the constraints we added to the accounts: + +- Because we'll be transferring SOL from the User account to the `escrow` + account, they both need to be mutable. +- We know the `escrow_account` is supposed to be a PDA derived with the “MICHAEL + BURRY” string and the user's pubkey. We can use Anchor account constraints to + guarantee that the address passed in actually meets that requirement. +- We also know that we have to initialize an account at this PDA to store some + state for the program. We use the `init` constraint here. + +Let's move onto the actual logic. All we need to do is to initialize the state +of the `escrow` account and transfer the SOL. We expect the user to pass in the +amount of SOL they want to lock up in escrow and the price to unlock it at. We +will store these values in the `escrow` account. + +After that, the method should execute the transfer. This program will be locking +up native SOL. Because of this, we don't need to use token accounts or the +Solana token program. We'll have to use the `system_program` to transfer the +lamports the user wants to lock up in escrow and invoke the transfer +instruction. + +```rust title="deposit.rs" +pub fn deposit_handler(ctx: Context, escrow_amount: u64, unlock_price: f64) -> Result<()> { + msg!("Depositing funds in escrow..."); + + let escrow = &mut ctx.accounts.escrow_account; + escrow.unlock_price = unlock_price; + escrow.escrow_amount = escrow_amount; + + let transfer_instruction = + transfer(&ctx.accounts.user.key(), &escrow.key(), escrow_amount); + + invoke( + &transfer_instruction, + &[ + ctx.accounts.user.to_account_info(), + ctx.accounts.escrow_account.to_account_info(), + ctx.accounts.system_program.to_account_info(), + ], + )?; + + msg!( + "Transfer complete. Escrow will unlock SOL at {}", + &ctx.accounts.escrow_account.unlock_price + ); + + Ok(()) +} +``` + +That's is the gist of the deposit instruction handler! The final result of the +`deposit.rs` file should look as follows: + +```rust title="deposit.rs" +use crate::constants::*; +use crate::state::*; +use anchor_lang::prelude::*; +use anchor_lang::solana_program::{program::invoke, system_instruction::transfer}; + +pub fn deposit_handler(ctx: Context, escrow_amount: u64, unlock_price: f64) -> Result<()> { + msg!("Depositing funds in escrow..."); + + let escrow = &mut ctx.accounts.escrow_account; + escrow.unlock_price = unlock_price; + escrow.escrow_amount = escrow_amount; + + let transfer_instruction = + transfer(&ctx.accounts.user.key(), &escrow.key(), escrow_amount); + + invoke( + &transfer_instruction, + &[ + ctx.accounts.user.to_account_info(), + ctx.accounts.escrow_account.to_account_info(), + ctx.accounts.system_program.to_account_info(), + ], + )?; + + msg!( + "Transfer complete. Escrow will unlock SOL at {}", + &ctx.accounts.escrow_account.unlock_price + ); + + Ok(()) +} + +#[derive(Accounts)] +pub struct Deposit<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + init, + seeds = [ESCROW_SEED, user.key().as_ref()], + bump, + payer = user, + space = DISCRIMINATOR_SIZE + Escrow::INIT_SPACE + )] + pub escrow_account: Account<'info, Escrow>, + + pub system_program: Program<'info, System>, +} +``` + +### 8. Withdraw + +The `Withdraw` instruction will require the same three accounts as the `Deposit` +instruction, plus the `SOL_USDC` Switchboard feed account. This code will be +placed in the `withdraw.rs` file. + +```rust title="withdraw.rs" +use crate::constants::*; +use crate::errors::*; +use crate::state::*; +use anchor_lang::prelude::*; +use anchor_lang::solana_program::clock::Clock; +use std::str::FromStr; +use switchboard_solana::AggregatorAccountData; + +#[derive(Accounts)] +pub struct Withdraw<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + mut, + seeds = [ESCROW_SEED, user.key().as_ref()], + bump, + close = user + )] + pub escrow_account: Account<'info, Escrow>, + + #[account( + address = Pubkey::from_str(SOL_USDC_FEED).unwrap() + )] + pub feed_aggregator: AccountLoader<'info, AggregatorAccountData>, + + pub system_program: Program<'info, System>, +} +``` + +Notice we're using the close constraint because once the transaction completes, +we want to close the `escrow_account`. The SOL used as rent in the account will +be transferred to the user account. + +We also use the address constraints to verify that the feed account passed in is +actually the `usdc_sol` feed and not some other feed (we have the SOL_USDC_FEED +address hard coded). In addition, the AggregatorAccountData struct that we +deserialize comes from the Switchboard rust crate. It verifies that the given +account is owned by the switchboard program and allows us to easily look at its +values. You'll notice it's wrapped in a `AccountLoader`. This is because the +feed is actually a fairly large account and it needs to be zero copied. + +Now let's implement the withdraw instruction handler's logic. First, we check if +the feed is stale. Then we fetch the current price of SOL stored in the +`feed_aggregator` account. Lastly, we want to check that the current price is +above the escrow `unlock_price`. If it is, then we transfer the SOL from the +escrow account back to the user and close the account. If it isn't, then the +instruction handler should finish and return an error. + +```rust title="withdraw.rs" +pub fn withdraw_handler(ctx: Context) -> Result<()> { + let feed = &ctx.accounts.feed_aggregator.load()?; + let escrow = &ctx.accounts.escrow_account; + + let current_sol_price: f64 = feed.get_result()?.try_into()?; + + // Check if the feed has been updated in the last 5 minutes (300 seconds) + feed.check_staleness(Clock::get().unwrap().unix_timestamp, 300) + .map_err(|_| error!(EscrowErrorCode::StaleFeed))?; + + msg!("Current SOL price is {}", current_sol_price); + msg!("Unlock price is {}", escrow.unlock_price); + + if current_sol_price < escrow.unlock_price { + return Err(EscrowErrorCode::SolPriceBelowUnlockPrice.into()); + } + + .... +} +``` + +To finish the logic off, we will execute the transfer, this time we will have to +transfer the funds in a different way. Because we are transferring from an +account that also holds data we cannot use the `system_program::transfer` method +like before. If we try to, the instruction will fail to execute with the +following error. + +```zsh +'Transfer: `from` must not carry data' +``` + +To account for this, we'll use `try_borrow_mut_lamports()` on each account and +add/subtract the amount of lamports stored in each account. + +```rust title="withdraw.rs" +// Transfer lamports from escrow to user +**escrow.to_account_info().try_borrow_mut_lamports()? = escrow +.to_account_info() +.lamports() +.checked_sub(escrow_lamports) +.ok_or(ProgramError::InsufficientFunds)?; + +**ctx +.accounts +.user +.to_account_info() +.try_borrow_mut_lamports()? = ctx +.accounts +.user +.to_account_info() +.lamports() +.checked_add(escrow_lamports) +.ok_or(ProgramError::InvalidArgument)?; +``` + +The final withdraw method in the `withdraw.rs` file should look like this: + +```rust title="withdraw.rs" +use crate::constants::*; +use crate::errors::*; +use crate::state::*; +use anchor_lang::prelude::*; +use anchor_lang::solana_program::clock::Clock; +use std::str::FromStr; +use switchboard_solana::AggregatorAccountData; + +pub fn withdraw_handler(ctx: Context) -> Result<()> { + let feed = &ctx.accounts.feed_aggregator.load()?; + let escrow = &ctx.accounts.escrow_account; + + let current_sol_price: f64 = feed.get_result()?.try_into()?; + + // Check if the feed has been updated in the last 5 minutes (300 seconds) + feed.check_staleness(Clock::get().unwrap().unix_timestamp, 300) + .map_err(|_| error!(EscrowErrorCode::StaleFeed))?; + + msg!("Current SOL price is {}", current_sol_price); + msg!("Unlock price is {}", escrow.unlock_price); + + if current_sol_price < escrow.unlock_price { + return Err(EscrowErrorCode::SolPriceBelowUnlockPrice.into()); + } + + let escrow_lamports = escrow.escrow_amount; + + // Transfer lamports from escrow to user + **escrow.to_account_info().try_borrow_mut_lamports()? = escrow + .to_account_info() + .lamports() + .checked_sub(escrow_lamports) + .ok_or(ProgramError::InsufficientFunds)?; + + **ctx + .accounts + .user + .to_account_info() + .try_borrow_mut_lamports()? = ctx + .accounts + .user + .to_account_info() + .lamports() + .checked_add(escrow_lamports) + .ok_or(ProgramError::InvalidArgument)?; + + Ok(()) +} + +#[derive(Accounts)] +pub struct Withdraw<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + mut, + seeds = [ESCROW_SEED, user.key().as_ref()], + bump, + close = user + )] + pub escrow_account: Account<'info, Escrow>, + + #[account( + address = Pubkey::from_str(SOL_USDC_FEED).unwrap() + )] + pub feed_aggregator: AccountLoader<'info, AggregatorAccountData>, + + pub system_program: Program<'info, System>, +} +``` + +And that's it for the program! At this point, you should be able to run +`anchor build` without any errors. + +### 9. Testing + +Let's write some tests. We should have four of them: + +- Creating an Escrow with the unlock price **_below_** the current SOL price so + we can test withdrawing it +- Withdrawing and closing from the above escrow +- Creating an Escrow with the unlock price **_above_** the current SOL price so + we can test withdrawing it +- Withdrawing and failing from the above escrow + +Note that there can only be one escrow per user, so the above order matters. + +We'll provide all the testing code in one snippet. Take a look through to make +sure you understand it before running `anchor test`. + +```typescript title="burry-escrow.ts" +// Inside tests/burry-escrow.ts +import * as anchor from "@coral-xyz/anchor"; +import { Program, AnchorError } from "@coral-xyz/anchor"; +import { BurryEscrow } from "../target/types/burry_escrow"; +import { Big } from "@switchboard-xyz/common"; +import { + AggregatorAccount, + AnchorWallet, + SwitchboardProgram, +} from "@switchboard-xyz/solana.js"; +import { PublicKey, SystemProgram, Connection } from "@solana/web3.js"; +import { assert } from "chai"; +import { confirmTransaction } from "@solana-developers/helpers"; + +const SOL_USD_SWITCHBOARD_FEED = new PublicKey( + "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR", +); + +const ESCROW_SEED = "MICHAEL BURRY"; +const DEVNET_RPC_URL = "https://api.devnet.solana.com"; +const CONFIRMATION_COMMITMENT = "confirmed"; +const PRICE_OFFSET = 10; +const ESCROW_AMOUNT = new anchor.BN(100); +const EXPECTED_ERROR_MESSAGE = + "Current SOL price is not above Escrow unlock price."; + +const provider = anchor.AnchorProvider.env(); +anchor.setProvider(provider); + +const program = anchor.workspace.BurryEscrow as Program; +const payer = (provider.wallet as AnchorWallet).payer; + +describe("burry-escrow", () => { + let switchboardProgram: SwitchboardProgram; + let aggregatorAccount: AggregatorAccount; + + before(async () => { + switchboardProgram = await SwitchboardProgram.load( + new Connection(DEVNET_RPC_URL), + payer, + ); + aggregatorAccount = new AggregatorAccount( + switchboardProgram, + SOL_USD_SWITCHBOARD_FEED, + ); + }); + + const createAndVerifyEscrow = async (unlockPrice: number) => { + const [escrow] = PublicKey.findProgramAddressSync( + [Buffer.from(ESCROW_SEED), payer.publicKey.toBuffer()], + program.programId, + ); + + try { + const transaction = await program.methods + .deposit(ESCROW_AMOUNT, unlockPrice) + .accountsPartial({ + user: payer.publicKey, + escrowAccount: escrow, + systemProgram: SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + + await confirmTransaction( + provider.connection, + transaction, + CONFIRMATION_COMMITMENT, + ); + + const escrowAccount = await program.account.escrow.fetch(escrow); + const escrowBalance = await provider.connection.getBalance( + escrow, + CONFIRMATION_COMMITMENT, + ); + + console.log("Onchain unlock price:", escrowAccount.unlockPrice); + console.log("Amount in escrow:", escrowBalance); + + assert(unlockPrice === escrowAccount.unlockPrice); + assert(escrowBalance > 0); + } catch (error) { + console.error("Error details:", error); + throw new Error(`Failed to create escrow: ${error.message}`); + } + }; + + it("creates Burry Escrow Below Current Price", async () => { + const solPrice: Big | null = await aggregatorAccount.fetchLatestValue(); + if (solPrice === null) { + throw new Error("Aggregator holds no value"); + } + // Although `SOL_USD_SWITCHBOARD_FEED` is not changing we are changing the unlockPrice in test as given below to simulate the escrow behavior + const unlockPrice = solPrice.minus(PRICE_OFFSET).toNumber(); + + await createAndVerifyEscrow(unlockPrice); + }); + + it("withdraws from escrow", async () => { + const [escrow] = PublicKey.findProgramAddressSync( + [Buffer.from(ESCROW_SEED), payer.publicKey.toBuffer()], + program.programId, + ); + + const userBalanceBefore = await provider.connection.getBalance( + payer.publicKey, + ); + + try { + const transaction = await program.methods + .withdraw() + .accountsPartial({ + user: payer.publicKey, + escrowAccount: escrow, + feedAggregator: SOL_USD_SWITCHBOARD_FEED, + systemProgram: SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + + await confirmTransaction( + provider.connection, + transaction, + CONFIRMATION_COMMITMENT, + ); + + // Verify escrow account is closed + try { + await program.account.escrow.fetch(escrow); + assert.fail("Escrow account should have been closed"); + } catch (error) { + console.log(error.message); + assert( + error.message.includes("Account does not exist"), + "Unexpected error: " + error.message, + ); + } + + // Verify user balance increased + const userBalanceAfter = await provider.connection.getBalance( + payer.publicKey, + ); + assert( + userBalanceAfter > userBalanceBefore, + "User balance should have increased", + ); + } catch (error) { + throw new Error(`Failed to withdraw from escrow: ${error.message}`); + } + }); + + it("creates Burry Escrow Above Current Price", async () => { + const solPrice: Big | null = await aggregatorAccount.fetchLatestValue(); + if (solPrice === null) { + throw new Error("Aggregator holds no value"); + } + // Although `SOL_USD_SWITCHBOARD_FEED` is not changing we are changing the unlockPrice in test as given below to simulate the escrow behavior + const unlockPrice = solPrice.plus(PRICE_OFFSET).toNumber(); + await createAndVerifyEscrow(unlockPrice); + }); + + it("fails to withdraw while price is below UnlockPrice", async () => { + const [escrow] = PublicKey.findProgramAddressSync( + [Buffer.from(ESCROW_SEED), payer.publicKey.toBuffer()], + program.programId, + ); + + try { + await program.methods + .withdraw() + .accountsPartial({ + user: payer.publicKey, + escrowAccount: escrow, + feedAggregator: SOL_USD_SWITCHBOARD_FEED, + systemProgram: SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + + assert.fail("Withdrawal should have failed"); + } catch (error) { + console.log(error.message); + if (error instanceof AnchorError) { + assert.include(error.message, EXPECTED_ERROR_MESSAGE); + } else if (error instanceof Error) { + assert.include(error.message, EXPECTED_ERROR_MESSAGE); + } else { + throw new Error(`Unexpected error type: ${error}`); + } + } + }); +}); +``` + +Once you're confident with the testing logic, run `anchor test` in your +terminal. You should see four tests pass. + +```bash + burry-escrow +Onchain unlock price: 137.42243 +Amount in escrow: 1058020 + ✔ creates Burry Escrow Below Current Price (765ms) +Account does not exist or has no data LxDZ9DXNwSFsu2e6u37o6C2T3k59B6ySEHHVaNDrgBq + ✔ withdraws from escrow (353ms) +Onchain unlock price: 157.42243 +Amount in escrow: 1058020 + ✔ creates Burry Escrow Above Current Price (406ms) +AnchorError occurred. Error Code: SolPriceBelowUnlockPrice. Error Number: 6003. Error Message: Current SOL price is not above Escrow unlock price.. + ✔ fails to withdraw while price is below UnlockPrice + + + 4 passing (2s) +``` + +If something goes wrong, review the lab and ensure everything is correct. Focus +on understanding the intent behind the code instead of just copying/pasting. You +can also review the working code on the +[`main` branch of burry-escrow GitHub repository](https://github.com/solana-developers/burry-escrow/tree/main). + +### Challenge + +As an independent challenge, create a fallback plan if the data feed ever goes +down. If the Oracle queue has not updated the aggregator account in X time or if +the data feed account does not exist anymore, withdraw the user's escrowed +funds. + +A potential solution to this challenge can be found +[in the Github repository on the `challenge-solution` branch](https://github.com/solana-developers/burry-escrow/tree/challenge-solution). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=1a5d266c-f4c1-4c45-b986-2afd4be59991)! + + diff --git a/content/courses/connecting-to-offchain-data/verifiable-randomness-functions.mdx b/content/courses/connecting-to-offchain-data/verifiable-randomness-functions.mdx new file mode 100644 index 000000000..5642f7901 --- /dev/null +++ b/content/courses/connecting-to-offchain-data/verifiable-randomness-functions.mdx @@ -0,0 +1,1645 @@ +--- +title: Verifiable Randomness Functions +objectives: + - Explain the limitations of generating random numbers onchain + - Explain how Verifiable Randomness works + - Use Switchboard's VRF oracle queue to generate and consume randomness from + an onchain program +description: "Use proper cryptographic randomness in your onchain programs." +--- + +## Summary + +- Attempts at generating randomness within your program are likely to be + guessable by users given there's no true randomness onchain. +- Verifiable Random Functions (VRFs) give developers the opportunity to + incorporate securely generated random numbers in their onchain programs. +- A VRF is a public-key pseudorandom function that provides proofs that its + outputs were calculated correctly. +- Switchboard offers a developer-friendly VRF for the Solana ecosystem. + +## Lesson + +### Randomness onchain + +Random numbers are **_not_** natively allowed onchain. This is because Solana is +deterministic, every validator runs your code and needs to have the same result. +So if you wanted to create a raffle program, you'd have to look outside of the +blockchain for your randomness. This is where Verifiable Random Functions (VRFs) +come in. VRFs offer developers a secure means of integrating randomness onchain +in a decentralized fashion. + +### Types of Randomness + +Before we dive into how random numbers can be generated for a blockchain, we +must first understand how they are generated on traditional computer systems. +There are really two types of random numbers: _true random_ and _pseudorandom_. +The difference between the two lies in how the numbers are generated. + +Computers can acquire _true random_ numbers by taking some type of physical +measurement of the outside world as entropy. These measurements take advantage +of natural phenomena, such as electronic noise, radioactive decay, or +atmospheric noise, to generate random data. Because these processes are +intrinsically unpredictable, the numbers they produce are genuinely random and +not reproducible. + +_Pseudorandom_ numbers, on the other hand, are generated by algorithms that use +a deterministic process to produce sequences of numbers that appear to be +random. Pseudorandom number generators (PRNGs) start with an initial value +called a seed and then use mathematical formulas to generate subsequent numbers +in the sequence. Given the same seed, a PRNG will always produce the same +sequence of numbers. It's important to seed with something close to true +entropy: an admin-provided "random" input, the last system log, some combination +of your system's clock time and other factors, etc.. Fun fact: older video games +have been broken because speedrunners found out how their randomness was +calculated. One game in particular used the number of steps you've taken in the +game as a seed. + +Unfortunately, neither type of randomness is natively available in Solana +programs, because these programs have to be deterministic. All validators need +to come to the same conclusion. There is no way they'd all draw the same random +number, and if they used a seed, it'd be prone to attacks. See the +[Solana FAQs](/docs/programs/lang-rust#depending-on-rand) for +more. So we'll have to look outside of the blockchain for randomness with VRFs. + +### What is Verifiable Randomness? + +A Verifiable Random Function (VRF) is a public-key pseudorandom function that +provides proofs that its outputs were calculated correctly. This means we can +use a cryptographic keypair to generate a random number with a proof, which can +then be validated by anyone to ensure the value was calculated correctly without +the possibility of leaking the producer's secret key. Once validated, the random +value is stored onchain in an account. + +VRFs are a crucial component for achieving verifiable and unpredictable +randomness on a blockchain, addressing some of the shortcomings of traditional +PRNGs and the challenges with achieving true randomness in a decentralized +system. + +There are three key properties of a VRF: + +1. **Deterministic** - A VRF takes a secret key and a nonce as inputs and + deterministically produces an output ( seeding ). The result is a seemingly + random value. Given the same secret key and nonce, the VRF will always + produce the same output. This property ensures that the random value can be + reproduced and verified by anyone. +2. **Unpredictability** - The output of a VRF appears indistinguishable from + true randomness to anyone without access to the secret key. This property + ensures that even though the VRF is deterministic, you cannot predict the + result ahead of time without knowledge of the inputs. +3. **Verifiability** - Anybody can verify the validity of the random value + generated by a VRF using the corresponding secret key and nonce. + +VRFs are not specific to Solana and have been utilized on other blockchains to +generate pseudorandom numbers. Fortunately switchboard offers their +implementation of VRF to Solana. + +### Switchboard VRF Implementation + +Switchboard is a decentralized Oracle network that offers VRFs on Solana. +Oracles are services that provide external data to a blockchain, allowing them +to interact with and respond to real-world events. The Switchboard network is +made up of many different individual oracles run by third parties to provide +external data and service requests onchain. To learn more about Switchboard's +Oracle network, please refer to our +[Oracle lesson](/developers/courses/connecting-to-offchain-data/oracles) + +Switchboard's VRF allows users to request an oracle to produce a randomness +output onchain. Once an oracle has been assigned the request, the proof of the +VRF result must be verified onchain before it can be used. The VRF proof takes +276 instructions (~48 transactions) to fully verify onchain. Once the proof is +verified, the Switchboard program will execute a onchain callback defined by the +VRF Account during account creation. From there the program can consume the +random data. + +You might be wondering how they get paid. In Switchboard's VRF implementation, +you actually pay per request. + +### Requesting and Consuming VRF + +Now that we know what a VRF is and how it fits into the Switchboard Oracle +network, let's take a closer look at how to actually request and consume +randomness from a Solana program. At a high level, the process for requesting +and consuming randomness from Switchboard looks like this: + +1. Create a `programAuthority` PDA that will be used as the program authority + and sign on behalf of the program. +2. Create a Switchboard VRF Account with the `programAuthority` as the + `authority` and specify the `callback` function the VRF will return the data + to. +3. Invoke the `request_randomness` instruction on the Switchboard program. The + program will assign an oracle to our VRF request. +4. Oracle serves the request and responds to the Switchboard program with the + proof calculated using its secret key. +5. Oracle executes the 276 instructions to verify the VRF proof. +6. Once VRF proof is verified, the Switchboard program will invoke the + `callback` that was passed in as the callback in the initial request with the + pseudorandom number returned from the Oracle. +7. Program consumes the random number and can execute business logic with it! + +There are a lot of steps here, but don't worry, we'll be going through each step +of the process in detail. + +First there are a couple of accounts that we will have to create ourselves to +request randomness, specifically the `authority` and `vrf` accounts. The +`authority` account is a PDA derived from our program that is requesting the +randomness. So the PDA we create will have our own seeds for our own needs. For +now, we'll simply set them at `VRFAUTH`. + +```typescript +// derive PDA +[vrfAuthorityKey, vrfAuthoritySecret] = + anchor.web3.PublicKey.findProgramAddressSync( + [Buffer.from("VRFAUTH")], + program.programId, + ); +``` + +Then, we need to initialize a `vrf` account that is owned by the Switchboard +program and mark the PDA we just derived as its authority. The `vrf` account has +the following data structure. + +```rust +pub struct VrfAccountData { + /// The current status of the VRF account. + pub status: VrfStatus, + /// Incremental counter for tracking VRF rounds. + pub counter: u128, + /// Onchain account delegated for making account changes. <-- This is our PDA + pub authority: Pubkey, + /// The OracleQueueAccountData that is assigned to fulfill VRF update request. + pub oracle_queue: Pubkey, + /// The token account used to hold funds for VRF update request. + pub escrow: Pubkey, + /// The callback that is invoked when an update request is successfully verified. + pub callback: CallbackZC, + /// The number of oracles assigned to a VRF update request. + pub batch_size: u32, + /// Struct containing the intermediate state between VRF crank actions. + pub builders: [VrfBuilder; 8], + /// The number of builders. + pub builders_len: u32, + pub test_mode: bool, + /// Oracle results from the current round of update request that has not been accepted as valid yet + pub current_round: VrfRound, + /// Reserved for future info. + pub _ebuf: [u8; 1024], +} +``` + +Some important fields on this account are `authority`, `oracle_queue`, and +`callback`. The `authority` should be a PDA of the program that has the ability +to request randomness on this `vrf` account. That way, only that program can +provide the signature needed for the vrf request. The `oracle_queue` field +allows you to specify which specific oracle queue you'd like to service the vrf +requests made with this account. If you aren't familiar with oracle queues on +Switchboard, checkout the +[Oracles lesson in the Connecting to Offchain Data course](/developers/courses/connecting-to-offchain-data/oracles)! +Lastly, the `callback` field is where you define the callback instruction the +Switchboard program should invoke once the randomness result has be verified. + +The `callback` field is of type +[`CallbackZC`](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/ecvrf.rs#L25). + +```rust +#[zero_copy(unsafe)] +#[repr(packed)] +pub struct CallbackZC { + /// The program ID of the callback program being invoked. + pub program_id: Pubkey, + /// The accounts being used in the callback instruction. + pub accounts: [AccountMetaZC; 32], + /// The number of accounts used in the callback + pub accounts_len: u32, + /// The serialized instruction data. + pub ix_data: [u8; 1024], + /// The number of serialized bytes in the instruction data. + pub ix_data_len: u32, +} +``` + +This is how you define the Callback struct client side. + +```typescript +// example +import Callback from '@switchboard-xyz/solana.js' +... +... + +const vrfCallback: Callback = { + programId: program.programId, + accounts: [ + // ensure all accounts in consumeRandomness are populated + { pubkey: clientState, isSigner: false, isWritable: true }, + { pubkey: vrfClientKey, isSigner: false, isWritable: true }, + { pubkey: vrfSecret.publicKey, isSigner: false, isWritable: true }, + ], + // use name of instruction + ixData: vrfIxCoder.encode("consumeRandomness", ""), // pass any params for instruction here + } +``` + +Now, you can create the `vrf` account. + +```typescript +// Create Switchboard VRF +[vrfAccount] = await switchboard.queue.createVrf({ + callback: vrfCallback, + authority: vrfAuthorityKey, // vrf authority + vrfKeypair: vrfSecret, + enable: !queue.unpermissionedVrfEnabled, // only set permissions if required +}); +``` + +Now that we have all of our needed accounts we can finally call the +`request_randomness` instruction on the Switchboard program. It's important to +note you can invoke the `request_randomness` in a client or within a program +with a cross program invocation (CPI). Let's take a look at what accounts are +required for this request by checking out the Account struct definition in the +actual +[Switchboard program](https://github.com/switchboard-xyz/solana-sdk/blob/fbef37e4a78cbd8b8b6346fcb96af1e20204b861/rust/switchboard-solana/src/oracle_program/instructions/vrf_request_randomness.rs#L8). + +```rust +// from the Switchboard program +// https://github.com/switchboard-xyz/solana-sdk/blob/fbef37e4a78cbd8b8b6346fcb96af1e20204b861/rust/switchboard-solana/src/oracle_program/instructions/vrf_request_randomness.rs#L8 + +pub struct VrfRequestRandomness<'info> { + #[account(signer)] + pub authority: AccountInfo<'info>, + #[account(mut)] + pub vrf: AccountInfo<'info>, + #[account(mut)] + pub oracle_queue: AccountInfo<'info>, + pub queue_authority: AccountInfo<'info>, + pub data_buffer: AccountInfo<'info>, + #[account( + mut, + seeds = [ + b"PermissionAccountData", + queue_authority.key().as_ref(), + oracle_queue.key().as_ref(), + vrf.key().as_ref() + ], + bump = params.permission_bump + )] + pub permission: AccountInfo<'info>, + #[account(mut, constraint = escrow.owner == program_state.key())] + pub escrow: Account<'info, TokenAccount>, + #[account(mut, constraint = payer_wallet.owner == payer_authority.key())] + pub payer_wallet: Account<'info, TokenAccount>, + #[account(signer)] + pub payer_authority: AccountInfo<'info>, + pub recent_blockhashes: AccountInfo<'info>, + #[account(seeds = [b"STATE"], bump = params.state_bump)] + pub program_state: AccountInfo<'info>, + pub token_program: AccountInfo<'info>, +} +``` + +That's a lot of accounts, let's walk through each one and give them some +context. + +- `authority` - PDA derived from our program +- `vrf` - + [Account owned by the Switchboard program](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/vrf/struct.VrfAccountData.html) +- Oracle Queue - + [Account owned by Switchboard program that contains metadata about the oracle queue to use for this request](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/queue/struct.OracleQueueAccountData.html) +- Queue Authority - Authority of the Oracle Queue chosen +- [Data Buffer](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/queue.rs#L57C165-L57C165) - + Account of the `OracleQueueBuffer` account holding a collection of Oracle + pubkeys that have successfully hearbeated before the queues `oracleTimeout` + configuration has elapsed. Stored in the Oracle Queue account. +- [Permission Account Data](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/permission/struct.PermissionAccountData.html) +- Escrow (Switchboard escrow account) - Token Account +- Switchboard program state account - + [Of type `SbState`](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/sb_state/struct.SbState.html) +- Switchboard Program - Switchboard Program +- Payer Token Account - Will be used to pay for fees +- Payer Authority - Authority of the Payer Token Account +- Recent Blockhashes Program - + [Recent Blockhashes Solana program](https://docs.rs/solana-program/latest/solana_program/sysvar/recent_blockhashes/index.html) +- Token Program - Solana Token Program + +That's all the accounts needed for just the randomness request, now let's see +what it looks like in a Solana program via CPI. To do this, we make use of the +`VrfRequestRandomness` data struct from the +[SwitchboardV2 rust crate.](https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/instructions/vrf_request_randomness.rs) +This struct has some built-in capabilities to make our lives easier here, most +notably the account structure is defined for us and we can easily call `invoke` +or `invoke_signed` on the object. + +```rust +// our client program +use switchboard_v2::VrfRequestRandomness; +use state::*; + +pub fn request_randomness(ctx: Context, request_params: RequestRandomnessParams) -> Result <()> { + let switchboard_program = ctx.accounts.switchboard_program.to_account_info(); + + let vrf_request_randomness = VrfRequestRandomness { + authority: ctx.accounts.vrf_state.to_account_info(), + vrf: ctx.accounts.vrf.to_account_info(), + oracle_queue: ctx.accounts.oracle_queue.to_account_info(), + queue_authority: ctx.accounts.queue_authority.to_account_info(), + data_buffer: ctx.accounts.data_buffer.to_account_info(), + permission: ctx.accounts.permission.to_account_info(), + escrow: ctx.accounts.switchboard_escrow.clone(), + payer_wallet: ctx.accounts.payer_wallet.clone(), + payer_authority: ctx.accounts.user.to_account_info(), + recent_blockhashes: ctx.accounts.recent_blockhashes.to_account_info(), + program_state: ctx.accounts.program_state.to_account_info(), + token_program: ctx.accounts.token_program.to_account_info(), + }; + + msg!("requesting randomness"); + vrf_request_randomness.invoke_signed( + switchboard_program, + request_params.switchboard_state_bump, + request_params.permission_bump, + state_seeds, + )?; + +... + +Ok(()) + +} +``` + +Once the Switchboard program is invoked, it does some logic on its end and +assigns an oracle in the `vrf` account's defined oracle queue to serve the +randomness request. The assigned oracle then calculates a random value and sends +it back to the Switchboard program. + +Once the result is verified, the Switchboard program then invokes the `callback` +instruction defined in the `vrf` account. The callback instruction is where you +would have written your business logic using the random numbers. In the +following code we store the resulting randomness in our `vrf_auth` PDA from our +first step. + +```rust +// our client program + +#[derive(Accounts)] +pub struct ConsumeRandomness<'info> { + // vrf client state + #[account] + pub vrf_auth: AccountLoader<'info, VrfClientState>, + // switchboard vrf account + #[account( + mut, + constraint = vrf.load()?.authority == vrf_auth.key() @ EscrowErrorCode::InvalidVrfAuthorityError + )] + pub vrf: AccountLoader<'info, VrfAccountData> +} + +pub fn handler(ctx: Context) -> Result <()> { + msg!("Consuming randomness!"); + + // load the vrf account data + let vrf = ctx.accounts.vrf.load()?; + // use the get_result method to fetch the randomness results + let result_buffer = vrf.get_result()?; + + // check if result buff is all 0's + if result_buffer == [0u8; 32] { + msg!("vrf buffer empty"); + return Ok(()); + } + + msg!("Result buffer is {:?}", result_buffer); + // use the random value how you see fit + + Ok(()) +} +``` + +Now you have randomness! Hooray! But there is one last thing we have not +discussed yet and that's how the randomness is returned. Switchboard, gives you +your randomness calling +[`get_result()`](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/vrf.rs#L122). +This method returns the `current_round.result` field of the `vrf` account +SwitchboardDecimal format, which is really just a buffer of 32 random +[`u8`](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/ecvrf.rs#L65C26-L65C26) +unsigned-integers. You can use these unsigned-integers however you see fit in +your program, but a very common method is to treat each integer in the buffer as +its own random number. For example, if you need a dice roll (1-6) just take the +first byte of the array, module it with 6 and add one. + +```rust +// slice byte buffer to store the first value +let dice_roll = (result_buffer[0] % 6) + 1; +``` + +What you do with the random values from there is completely up to you! + +That is the essence of requesting randomness with a Switchboard VRF. To recap +the steps involved in a VRF request, review this diagram. + +![VRF Diagram](/assets/courses/unboxed/vrf-diagram.png) + +## Lab + +For this lesson's lab, we will be picking up where we left off in the +[Oracle lesson](/developers/courses/connecting-to-offchain-data/oracles). If you +haven't completed the Oracle lesson and demo, we strongly recommend you do as +there are a lot of overlapping concepts and we'll be starting from the Oracle +lesson's codebase. + +If you don't want to complete the Oracle lesson, the starter code for this lab +is provided for you in +[the main branch of the lab Github repository](https://github.com/solana-developers/burry-escrow). + +The repo contains a "Michael Burry" escrow program. This is a program that +allows a user to lock up some solana funds in escrow that cannot be withdrawn +until SOL has reached a predefined price in USD chosen by the user. We will be +adding VRF functionality to this program to allow the user to "Get out of jail" +by rolling doubles. Our demo today will allow the user to roll two virtual dice, +if they roll doubles (the two dice match), the user can withdraw their funds +from escrow regardless of the SOL price. + +#### 1. Program Setup + +If you are cloning the repo from the previous lesson make sure to do the +following: + +1. `git clone https://github.com/solana-developers/burry-escrow` +2. `cd burry-escrow` +3. `anchor build` +4. `anchor keys list` + 1. Take the resulting key and put it into `Anchor.toml` and + `programs/burry-escrow/src/lib.rs` +5. `solana config get` + 1. Take your **Keypair Path** and change the `wallet` field in your + `Anchor.toml` +6. `yarn install` +7. `anchor test` + +When all tests pass we're ready to begin. We will start by filling in some +boilerplate stuff, then we'll implement the functions. + +#### 2. Cargo.toml + +First, since VRF uses SPL tokens for their fees we need to import `anchor-spl` +in our `Cargo.toml` file. + +```typescript +[dependencies] +anchor-lang = "0.30.1" +anchor-spl = "0.30.1" +switchboard-v2 = "0.4.0" +``` + +#### 3. Lib.rs + +Next, let's edit `lib.rs` and add the additional functions we'll be building +today. The functions are as follows: + +- `init_vrf_client` - Creates the VRF authority PDA, which will sign for and + consume the randomness. +- `get_out_of_jail` - Requests the randomness from the VRF, effectively rolling + the dice. +- `consume_randomness` - The callback function for the VRF where we will check + for the dice rolls. + +```rust +use anchor_lang::prelude::*; +use instructions::deposit::*; +use instructions::withdraw::*; +use instructions::init_vrf_client::*; +use instructions::get_out_of_jail::*; +use instructions::consume_randomness::*; + +pub mod instructions; +pub mod state; +pub mod errors; + +declare_id!("YOUR_KEY_HERE"); + +#[program] +mod burry_escrow { + + use crate::instructions::init_vrf_client::init_vrf_client_handler; + + use super::*; + + pub fn deposit(ctx: Context, escrow_amt: u64, unlock_price: f64) -> Result<()> { + deposit_handler(ctx, escrow_amt, unlock_price) + } + + pub fn withdraw(ctx: Context) -> Result<()> { + withdraw_handler(ctx) + } + + pub fn init_vrf_client(ctx: Context) -> Result<()>{ + init_vrf_client_handler(ctx) + } + + pub fn get_out_of_jail(ctx: Context, params: RequestRandomnessParams) -> Result<()>{ + get_out_of_jail_handler(ctx, params) + } + + pub fn consume_randomness(ctx: Context) -> Result<()>{ + consume_randomness_handler(ctx) + } +} +``` + +Make sure you replace `YOUR_KEY_HERE` with your own program key. + +#### 4. State.rs + +Next, in `state.rs`, add an `out_of_jail` flag to `EscrowState`. When we finally +roll two matching die, we'll flip this flag. When the `withdraw` function is +called we can transfer the funds without checking the price. + +```rust +// state.rs +#[account] +pub struct EscrowState { + pub unlock_price: f64, + pub escrow_amount: u64, + pub out_of_jail: bool +} +``` + +Then, create our second data account for this program: `VrfClientState`. This +will hold the state of our dice rolls. It will have the following fields: + +- `bump` - Stores the bump of the account for easy signing later. +- `result_buffer` - This is where the VRF function will dump the raw randomness + data. +- `dice_type` - We will set this to 6 as in a 6-sided die. +- `die_result_1` and `die_result_2` - The results of our dice roll. +- `timestamp` - Keeps track of when our last roll was. +- `vrf` - Public key of the VRF account; owned by the Switchboard program. We + will create this before we call `VrfClientState`'s initialization function. +- `escrow` - Public key of our burry escrow account. + +We're also going to make the `VrfClientState` context a `zero_copy` struct. This +means that we will initialize it with `load_init()` and pass it into accounts +with `AccountLoader`. We do this because VRF functions are very account +intensive and we need to be mindful of the stack. If you'd like to learn more +about `zero_copy`, take a look at our +[Program Architecture lesson](/developers/courses/program-optimization/program-architecture). + +```rust +// state.rs + +#[repr(packed)] +#[account(zero_copy(unsafe))] +#[derive(Default)] +pub struct VrfClientState { + pub bump: u8, + pub result_buffer: [u8; 32], + pub dice_type: u8, // 6 sided + pub die_result_1: u8, + pub die_result_2: u8, + pub timestamp: i64, + pub vrf: Pubkey, + pub escrow: Pubkey +} +``` + +Lastly we are going to add the `VRF_STATE_SEED` to PDA our VRF Client account. + +```rust +pub const VRF_STATE_SEED: &[u8] = b"VRFCLIENT"; +``` + +Your `state.rs` file should look like this: + +```rust +use anchor_lang::prelude::*; + +pub const ESCROW_SEED: &[u8] = b"MICHAEL BURRY"; +pub const VRF_STATE_SEED: &[u8] = b"VRFCLIENT"; +pub const SOL_USDC_FEED: &str = "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR"; + +#[account] +pub struct EscrowState { + pub unlock_price: f64, + pub escrow_amount: u64, + pub out_of_jail: bool +} + +#[repr(packed)] +#[account(zero_copy(unsafe))] +#[derive(Default)] +pub struct VrfClientState { + pub bump: u8, + pub result_buffer: [u8; 32], + pub dice_type: u8, // 6 sided + pub die_result_1: u8, + pub die_result_2: u8, + pub timestamp: i64, + pub vrf: Pubkey, + pub escrow: Pubkey +} +``` + +#### 5. Error.rs + +Next, let's take a quick pit stop and add one last error +`InvalidVrfAuthorityError` to `error.rs`. We'll use this when the VRF authority +is incorrect. + +```rust +use anchor_lang::prelude::*; + +#[error_code] +#[derive(Eq, PartialEq)] +pub enum EscrowErrorCode { + #[msg("Not a valid Switchboard account")] + InvalidSwitchboardAccount, + #[msg("Switchboard feed has not been updated in 5 minutes")] + StaleFeed, + #[msg("Switchboard feed exceeded provided confidence interval")] + ConfidenceIntervalExceeded, + #[msg("Current SOL price is not above Escrow unlock price.")] + SolPriceAboveUnlockPrice, + #[msg("Switchboard VRF Account's authority should be set to the client's state pubkey")] + InvalidVrfAuthorityError, +} +``` + +#### 6. Mod.rs + +Now, let's modify our `mod.rs` file to include our new functions we'll be +writing. + +```rust +pub mod deposit; +pub mod withdraw; +pub mod init_vrf_client; +pub mod get_out_of_jail; +pub mod consume_randomness; +``` + +#### 7. Deposit.rs and Withdraw.rs + +Lastly, let's update our `deposit.rs` and `withdraw.rs` files to reflect our +soon-to-be new powers. + +First, let's initialize our `out_of_jail` flag to `false` in `deposit.rs`. + +```rust +// in deposit.rs +... +let escrow_state = &mut ctx.accounts.escrow_account; + escrow_state.unlock_price = unlock_price; + escrow_state.escrow_amount = escrow_amount; + escrow_state.out_of_jail = false; +... +``` + +Next, let's write our simple get-out-of-jail logic. Wrap our oracle price-checks +with an `if` statement. If the `out_of_jail` flag on the `escrow_state` account +is false, then we check the price at which to unlock the SOL: + +```rust +if !escrow_state.out_of_jail { + // get result + let val: f64 = feed.get_result()?.try_into()?; + + // check whether the feed has been updated in the last 300 seconds + feed.check_staleness(Clock::get().unwrap().unix_timestamp, 300) + .map_err(|_| error!(EscrowErrorCode::StaleFeed))?; + + msg!("Current feed result is {}!", val); + msg!("Unlock price is {}", escrow_state.unlock_price); + + if val < escrow_state.unlock_price as f64 { + return Err(EscrowErrorCode::SolPriceAboveUnlockPrice.into()) + } + } +``` + +If `out_of_jail` is true, then we get out of jail free and can skip the price +check, going straight to our withdrawal. + +#### 8. Using VRF + +Now that we have the boilerplate out of the way, let's move on to our first +addition: initializing our VRF Client. Let's create a new file called +`init_vrf_client.rs` in the `/instructions` folder. + +We'll add the needed crates, then create the `InitVrfClient` context. We'll need +the following accounts: + +- `user` - the signer who has funds in escrow. +- `escrow_account` - the burry escrow account created when the user locked their + funds up. +- `vrf_client_state` - account we will be creating in this instruction to hold + state about the user's dice rolls. +- `vrf` - Our VRF owned by the Switchboard program, we will create this account + client-side before we call `init_vrf_client`. +- `system_program` - The system program since we use the init macro for + `vrf_state`, which calls `create_account` under the hood. + +```rust +use crate::state::*; +use crate::errors::*; +use anchor_lang::prelude::*; +use switchboard_v2::VrfAccountData; + +#[derive(Accounts)] +pub struct InitVrfClient<'info> { + #[account(mut)] + pub user: Signer<'info>, + // burry escrow account + #[account( + mut, + seeds = [ESCROW_SEED, user.key().as_ref()], + bump, + )] + pub escrow_account: Account<'info, EscrowState>, + // vrf client state + #[account( + init, + seeds = [ + VRF_STATE_SEED, + user.key.as_ref(), + escrow_account.key().as_ref(), + vrf.key().as_ref(), + ], + payer = user, + space = 8 + std::mem::size_of::(), + bump + )] + pub vrf_state: AccountLoader<'info, VrfClientState>, + + // switchboard vrf account + #[account( + mut, + constraint = vrf.load()?.authority == vrf_state.key() @ EscrowErrorCode::InvalidVrfAuthorityError + )] + pub vrf: AccountLoader<'info, VrfAccountData>, + pub system_program: Program<'info, System> +} +``` + +Notice the `vrf_state` account is a PDA derived with the `VRF_STATE_SEED` string +and the `user`, `escrow_account`, and `vrf` public keys as seeds. This means a +single user can only initialize a single `vrf_state` account, just like they can +only have one `escrow_account`. Since there is only one, If you wanted to be +thorough, you might want to implement a `close_vrf_state` function to get your +rent back. + +Now, let's write some basic initialization logic for this function. First we +load and initialize our `vrf_state` account by calling `load_init()`. Then we +fill in the values for each field. + +```rust +pub fn init_vrf_client_handler(ctx: Context) -> Result<()> { + msg!("init_client validate"); + + let mut vrf_state = ctx.accounts.vrf_state.load_init()?; + *vrf_state = VrfClientState::default(); + vrf_state.bump = ctx.bumps.get("vrf_state").unwrap().clone(); + vrf_state.escrow = ctx.accounts.escrow_account.key(); + vrf_state.die_result_1 = 0; + vrf_state.die_result_2 = 0; + vrf_state.timestamp = 0; + vrf_state.dice_type = 6; // sided + + Ok(()) +} +``` + +#### 9. Get Out of Jail + +Now that we have the `VrfClientState` account initialized, we can use it in the +`get_out_jail` instruction. Create a new file called `get_out_of_jail.rs` in the +`/instructions` folder. + +The `get_out_jail` instruction will make our VRF request to Switchboard. We'll +need to pass in all of the accounts needed for both the VRF request and our +business logic callback function. + +VRF Accounts: + +- `payer_wallet` - the token wallet that will pay for the VRF request; the + `user` must be the owner of this account. +- `vrf` - The VRF account that was created by the client. +- `oracle_queue` - The oracle queue that will field the randomness result. +- `queue_authority` - The authority over the queue. +- `data_buffer` - The queue's data buffer account - used by the queue to + compute/verify the randomness. +- `permission` - Created when creating the `vrf` account. It's derived from + several of the other accounts. +- `switchboard_escrow` - Where the payer sends the tokens for requests. +- `program_state` - State of the Switchboard program. + +Programs: + +- `switchboard_program` +- `recent_blockhashes` +- `token_program` +- `system_program` + +Business Logic Accounts: + +- `user` - The user account who has escrowed the funds. +- `escrow_account` - The burry escrow state account for user. +- `vrf_state` - The VRF client state account initialized in the + `init_vrf_client` instruction. + +```rust +use crate::state::*; +use crate::errors::*; +use anchor_lang::prelude::*; +use anchor_lang::solana_program::sysvar::*; +use switchboard_v2::{VrfAccountData, OracleQueueAccountData, PermissionAccountData, SbState, VrfRequestRandomness}; +use anchor_spl::token::{TokenAccount, Token}; + +#[derive(Accounts)] +pub struct RequestRandomness<'info> { + // PAYER ACCOUNTS + #[account(mut)] + pub user: Signer<'info>, + #[account(mut, + constraint = + payer_wallet.owner == user.key() + && switchboard_escrow.mint == program_state.load()?.token_mint + )] + pub payer_wallet: Account<'info, TokenAccount>, + // burry escrow account + #[account( + mut, + seeds = [ESCROW_SEED, user.key().as_ref()], + bump, + )] + pub escrow_account: Account<'info, EscrowState>, + // vrf client state + #[account( + mut, + seeds = [ + VRF_STATE_SEED, + user.key.as_ref(), + escrow_account.key().as_ref(), + vrf.key().as_ref(), + ], + bump + )] + pub vrf_state: AccountLoader<'info, VrfClientState>, + // switchboard vrf account + #[account( + mut, + constraint = vrf.load()?.authority == vrf_state.key() @ EscrowErrorCode::InvalidVrfAuthorityError + )] + pub vrf: AccountLoader<'info, VrfAccountData>, + // switchboard accounts + #[account(mut, + has_one = data_buffer + )] + pub oracle_queue: AccountLoader<'info, OracleQueueAccountData>, + /// CHECK: + #[account( + mut, + constraint = oracle_queue.load()?.authority == queue_authority.key() + )] + pub queue_authority: UncheckedAccount<'info>, + /// CHECK + #[account(mut)] + pub data_buffer: AccountInfo<'info>, + #[account(mut)] + pub permission: AccountLoader<'info, PermissionAccountData>, + #[account(mut, + constraint = switchboard_escrow.owner == program_state.key() && switchboard_escrow.mint == program_state.load()?.token_mint + )] + pub switchboard_escrow: Account<'info, TokenAccount>, + #[account(mut)] + pub program_state: AccountLoader<'info, SbState>, + /// CHECK: + #[account( + address = *vrf.to_account_info().owner, + constraint = switchboard_program.executable == true + )] + pub switchboard_program: AccountInfo<'info>, + // SYSTEM ACCOUNTS + /// CHECK: + #[account(address = recent_blockhashes::ID)] + pub recent_blockhashes: AccountInfo<'info>, + pub token_program: Program<'info, Token>, + pub system_program: Program<'info, System> +} +``` + +Lastly, we'll create a new struct `RequestRandomnessParams`. We'll be passing in +some account's bumps client-side. + +```rust +#[derive(Clone, AnchorSerialize, AnchorDeserialize)] +pub struct RequestRandomnessParams { + pub permission_bump: u8, + pub switchboard_state_bump: u8, +} +``` + +Now, we can work on the logic of this instruction. The logic should gather all +of the accounts needed and pass them to +[VrfRequestRandomness](https://github.com/switchboard-xyz/solana-sdk/blob/fbef37e4a78cbd8b8b6346fcb96af1e20204b861/rust/switchboard-solana/src/oracle_program/instructions/vrf_request_randomness.rs#L8), +which is a really nice struct from Switchboard. Then we'll sign the request and +send it on it's way. + +```rust +pub fn get_out_of_jail_handler(ctx: Context, params: RequestRandomnessParams) -> Result <()> { + let switchboard_program = ctx.accounts.switchboard_program.to_account_info(); + let vrf_state = ctx.accounts.vrf_state.load()?; + + let bump = vrf_state.bump.clone(); + drop(vrf_state); + + // build vrf request struct from the Switchboard Rust crate + let vrf_request_randomness = VrfRequestRandomness { + authority: ctx.accounts.vrf_state.to_account_info(), + vrf: ctx.accounts.vrf.to_account_info(), + oracle_queue: ctx.accounts.oracle_queue.to_account_info(), + queue_authority: ctx.accounts.queue_authority.to_account_info(), + data_buffer: ctx.accounts.data_buffer.to_account_info(), + permission: ctx.accounts.permission.to_account_info(), + escrow: ctx.accounts.switchboard_escrow.clone(), + payer_wallet: ctx.accounts.payer_wallet.clone(), + payer_authority: ctx.accounts.user.to_account_info(), + recent_blockhashes: ctx.accounts.recent_blockhashes.to_account_info(), + program_state: ctx.accounts.program_state.to_account_info(), + token_program: ctx.accounts.token_program.to_account_info(), + }; + + let vrf_key = ctx.accounts.vrf.key(); + let escrow_key = ctx.accounts.escrow_account.key(); + let user_key = ctx.accounts.user.key(); + let state_seeds: &[&[&[u8]]] = &[&[ + &VRF_STATE_SEED, + user_key.as_ref(), + escrow_key.as_ref(), + vrf_key.as_ref(), + &[bump], + ]]; + + // submit vrf request with PDA signature + msg!("requesting randomness"); + vrf_request_randomness.invoke_signed( + switchboard_program, + params.switchboard_state_bump, + params.permission_bump, + state_seeds, + )?; + + msg!("randomness requested successfully"); + + Ok(()) +} +``` + +#### 10. Consume Randomness + +Now that we've built the logic to request a VRF from Switchboard, we must build +the callback instruction the Switchboard program will call once the VRF has been +verified. Create a new file called `consume_randomness.rs` in the +`/instructions` directory. + +This function will use the randomness to determine which dice have been rolled. +If doubles are rolled, set the `out_of_jail` field on `vrf_state` to true. + +First, let's create the `ConsumeRandomness` context. Fortunately, it only takes +three accounts. + +- `escrow_account` - state account for user's escrowed funds. +- `vrf_state` - state account to hold information about dice roll. +- `vrf` - account with the random number that was just calculated by the + Switchboard network. + +```rust +// inside consume_randomness.rs +use crate::state::*; +use crate::errors::*; +use anchor_lang::prelude::*; +use switchboard_v2::VrfAccountData; + +#[derive(Accounts)] +pub struct ConsumeRandomness<'info> { + // burry escrow account + #[account(mut)] + pub escrow_account: Account<'info, EscrowState>, + // vrf client state + #[account(mut)] + pub vrf_state: AccountLoader<'info, VrfClientState>, + // switchboard vrf account + #[account( + mut, + constraint = vrf.load()?.authority == vrf_state.key() @ EscrowErrorCode::InvalidVrfAuthorityError + )] + pub vrf: AccountLoader<'info, VrfAccountData> +} +``` + +Now let's write the logic for our `consume_randomness_handler`. We'll first +fetch the results from the `vrf` account. + +We need to call `load()` because the `vrf` is passed in as an `AccountLoader`. +Remember, `AccountLoader` avoids both stack and heap overflows for large +accounts. Then, we call `get_result()` to grab the randomness from inside the +`VrfAccountData` struct. Finally, we'll check if the resulting buffer is zeroed +out. If it's all zeros, it means the Oracles have not yet verified and deposited +the randomness in the account. + +```rust +// inside consume_randomness.rs + +pub fn consume_randomness_handler(ctx: Context) -> Result<()> { + msg!("Consuming randomness..."); + + let vrf = ctx.accounts.vrf.load()?; + let result_buffer = vrf.get_result()?; + + if result_buffer == [0u8; 32] { + msg!("vrf buffer empty"); + return Ok(()); + } + + Ok(()) +} +``` + +Then we load our `vrf_state` using `load_mut` since we'll be storing the +randomness and dice rolls within it. We also want to check that the +`result_buffer` returned from the `vrf` does not match byte for byte the +`result_buffer` from the `vrf_state`. If they do match, we know the returned +randomness is stale. + +```rust +pub fn consume_randomness_handler(ctx: Context) -> Result<()> { + msg!("Successfully consumed randomness."); + + let vrf = ctx.accounts.vrf.load()?; + let result_buffer = vrf.get_result()?; + + if result_buffer == [0u8; 32] { + msg!("vrf buffer empty"); + return Ok(()); + } + // new code + let vrf_state = &mut ctx.accounts.vrf_state.load_mut()?; + if result_buffer == vrf_state.result_buffer { + msg!("result_buffer unchanged"); + return Ok(()); + } + + ... + ... +} +``` + +Now it's time to actually use the random result. Since we only use two dice we +only need the first two bytes of the buffer. To convert these random values into +“dice rolls”, we use modular arithmetic. For anyone not familiar with modular +arithmetic, +[Wikipedia can help](https://en.wikipedia.org/wiki/Modular_arithmetic). In +modular arithmetic, numbers "wrap around" upon reaching a given fixed quantity. +This given quantity is known as the modulus to leave as the remainder. Here, the +modulus is the `dice_type` stored on the `vrf_state` account. We hard-coded this +to 6 when the account was initialized to represent a 6-sided die. When we use +`dice_type`, or 6, as the modulus, our result will be a number 0-5. We then add +one, to make the resulting possibilities 1-6. + +```rust +pub fn consume_randomness_handler(ctx: Context) -> Result <()> { + msg!("Successfully consumed randomness."); + + let vrf = ctx.accounts.vrf.load()?; + let result_buffer = vrf.get_result()?; + + if result_buffer == [0u8; 32] { + msg!("vrf buffer empty"); + return Ok(()); + } + + let vrf_state = &mut ctx.accounts.vrf_state.load_mut()?; + let dice_type = vrf_state.dice_type; + if result_buffer == vrf_state.result_buffer { + msg!("result_buffer unchanged"); + return Ok(()); + } + + msg!("Result buffer is {:?}", result_buffer); + + let dice_1 = result_buffer[0] % dice_type + 1; + let dice_2 = result_buffer[1] % dice_type + 1; + + msg!("Current Die 1 Value [1 - {}) = {}!", dice_type, dice_1); + msg!("Current Die 2 Value [1 - {}) = {}!", dice_type, dice_2); + + ... + ... +} +``` + +> Fun fact from Christian (one of the editors): one byte per roll is actually a +> slightly bad option for a dice roll. (Good enough to demo) You have 256 +> options in a u8. When modulo'd by 6, the number zero has a slight advantage in +> the distribution (256 is not divisible by 6). Number of 0s: (255-0)/6 + 1 = 43 +> Number of 1s: (256-1)/6 = 42.6, so 42 occurrences of 1 Number of 2s: (257-2)/6 +> = 42.5, so 42 occurrences of 2 Number of 3s: (258-3)/6 = 42.5, so 42 +> occurrences of 3 Number of 4s: (259-4)/6 = 42.5, so 42 occurrences of 4 Number +> of 5s: (260-5)/6 = 42.5, so 42 occurrences of 5 + +The very last thing we have to do is update the fields in `vrf_state` and +determine is the user rolled doubles. If so, flip the `out_of_jail` flag to +true. + +If the `out_of_jail` becomes true, the user can then call the `withdraw` +instruction and it will skip over the price check. + +```rust +pub fn consume_randomness_handler(ctx: Context) -> Result <()> { + msg!("Successfully consumed randomness."); + + let vrf = ctx.accounts.vrf.load()?; + let result_buffer = vrf.get_result()?; + + if result_buffer == [0u8; 32] { + msg!("vrf buffer empty"); + return Ok(()); + } + + let vrf_state = &mut ctx.accounts.vrf_state.load_mut()?; + let dice_type = vrf_state.dice_type; + if result_buffer == vrf_state.result_buffer { + msg!("result_buffer unchanged"); + return Ok(()); + } + + msg!("Result buffer is {:?}", result_buffer); + + let dice_1 = result_buffer[0] % dice_type + 1; + let dice_2 = result_buffer[1] % dice_type + 1; + + msg!("Current Die 1 Value [1 - {}) = {}!", dice_type, dice_1); + msg!("Current Die 2 Value [1 - {}) = {}!", dice_type, dice_2); + + msg!("Updating VRF State with random value..."); + vrf_state.result_buffer = result_buffer; + vrf_state.die_result_1 = dice_1; + vrf_state.die_result_2 = dice_2; + vrf_state.timestamp = Clock::get().unwrap().unix_timestamp; + + if dice_1 == dice_2 { + msg!("Rolled doubles, get out of jail free!"); + let escrow_state = &mut ctx.accounts.escrow_account; + escrow_state.out_of_jail = true; + } + + Ok(()) +} +``` + +And that's it for the get-out-of-jail functionality! Congrats, you have just +built a program that can consume Switchboard data feeds and submit VRF requests. +Please make sure your program builds successfully by running `anchor build`. + +#### 11. Testing + +Alright, let's test our program. Historically, we'd need to test the VRF on +Devnet. Fortunately, the folks at Switchboard have created some really nice +functions to let us run our own VRF oracle locally. For this, we'll need to set +up our local server, grab all of the right accounts, and then call our program. + +The first thing we'll do is pull in some more accounts in our `Anchor.toml` +file: + +```toml +## VRF ACCOUNTS +[[test.validator.clone]] # sbv2 attestation programID +address = "sbattyXrzedoNATfc4L31wC9Mhxsi1BmFhTiN8gDshx" + +[[test.validator.clone]] # sbv2 attestation IDL +address = "5ExuoQR69trmKQfB95fDsUGsUrrChbGq9PFgt8qouncz" + +[[test.validator.clone]] # sbv2 SbState +address = "CyZuD7RPDcrqCGbNvLCyqk6Py9cEZTKmNKujfPi3ynDd" +``` + +Then we create a new test file called `vrf-test.ts` and copy and paste the code +below. It copies over the last two tests from the oracle lesson, adds some +imports, and adds a new function called `delay`. + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { BurryEscrow } from "../target/types/burry_escrow"; +import { Big } from "@switchboard-xyz/common"; +import { + AggregatorAccount, + AnchorWallet, + SwitchboardProgram, + SwitchboardTestContext, + Callback, + PermissionAccount, +} from "@switchboard-xyz/solana.js"; +import { NodeOracle } from "@switchboard-xyz/oracle"; +import { assert } from "chai"; + +export const solUsedSwitchboardFeed = new anchor.web3.PublicKey( + "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR", +); + +function delay(ms: number) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +describe("burry-escrow-vrf", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + const provider = anchor.AnchorProvider.env(); + const program = anchor.workspace.BurryEscrow as Program; + const payer = (provider.wallet as AnchorWallet).payer; + + it("Create Burry Escrow Above Price", async () => { + // fetch switchboard devnet program object + const switchboardProgram = await SwitchboardProgram.load( + "devnet", + new anchor.web3.Connection("https://api.devnet.solana.com"), + payer, + ); + const aggregatorAccount = new AggregatorAccount( + switchboardProgram, + solUsedSwitchboardFeed, + ); + + // derive escrow state account + const [escrowState] = await anchor.web3.PublicKey.findProgramAddressSync( + [Buffer.from("MICHAEL BURRY"), payer.publicKey.toBuffer()], + program.programId, + ); + console.log("Escrow Account: ", escrowState.toBase58()); + + // fetch latest SOL price + const solPrice: Big | null = await aggregatorAccount.fetchLatestValue(); + if (solPrice === null) { + throw new Error("Aggregator holds no value"); + } + const failUnlockPrice = solPrice.plus(10).toNumber(); + const amountToLockUp = new anchor.BN(100); + + // Send transaction + try { + const tx = await program.methods + .deposit(amountToLockUp, failUnlockPrice) + .accounts({ + user: payer.publicKey, + escrowAccount: escrowState, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + + await provider.connection.confirmTransaction(tx, "confirmed"); + console.log("Your transaction signature", tx); + + // Fetch the created account + const newAccount = await program.account.escrowState.fetch(escrowState); + + const escrowBalance = await provider.connection.getBalance( + escrowState, + "confirmed", + ); + console.log("Onchain unlock price:", newAccount.unlockPrice); + console.log("Amount in escrow:", escrowBalance); + + // Check whether the data onchain is equal to local 'data' + assert(failUnlockPrice == newAccount.unlockPrice); + assert(escrowBalance > 0); + } catch (error) { + console.log(error); + assert.fail(error); + } + }); + + it("Attempt to withdraw while price is below UnlockPrice", async () => { + let didFail = false; + + // derive escrow address + const [escrowState] = await anchor.web3.PublicKey.findProgramAddressSync( + [Buffer.from("MICHAEL BURRY"), payer.publicKey.toBuffer()], + program.programId, + ); + + // send tx + try { + const tx = await program.methods + .withdraw() + .accounts({ + user: payer.publicKey, + escrowAccount: escrowState, + feedAggregator: solUsedSwitchboardFeed, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + + await provider.connection.confirmTransaction(tx, "confirmed"); + console.log("Your transaction signature", tx); + } catch (error) { + didFail = true; + + assert( + error.message.includes( + "Current SOL price is not above Escrow unlock price.", + ), + "Unexpected error message: " + error.message, + ); + } + + assert(didFail); + }); +}); +``` + + + +If you only want to run the vrf tests, change +`describe("burry-escrow-vrf", () => {` to: +`describe.only("burry-escrow-vrf", () => {` + + + +Now, we are going to set up our local VRF Oracle server using +`SwitchboardTestContext`. This will give us a `switchboard` context and an +`oracle` node. We call the initialization functions in the `before()` function. +This will run and complete before any tests begin. Lastly, let's add +`oracle?.stop()` to the `after()` function to clean everything up. + +```typescript +describe.only("burry-escrow-vrf", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + const provider = anchor.AnchorProvider.env() + const program = anchor.workspace.BurryEscrow as Program; + const payer = (provider.wallet as AnchorWallet).payer + + // ADDED CODE + let switchboard: SwitchboardTestContext + let oracle: NodeOracle + + before(async () => { + switchboard = await SwitchboardTestContext.loadFromProvider(provider, { + name: "Test Queue", + // You can provide a keypair to so the PDA schemes dont change between test runs + // keypair: SwitchboardTestContext.loadKeypair(SWITCHBOARD_KEYPAIR_PATH), + queueSize: 10, + reward: 0, + minStake: 0, + oracleTimeout: 900, + // aggregators will not require PERMIT_ORACLE_QUEUE_USAGE before joining a queue + unpermissionedFeeds: true, + unpermissionedVrf: true, + enableBufferRelayers: true, + oracle: { + name: "Test Oracle", + enable: true, + // stakingWalletKeypair: SwitchboardTestContext.loadKeypair(STAKING_KEYPAIR_PATH), + }, + }) + + oracle = await NodeOracle.fromReleaseChannel({ + chain: "solana", + // use the latest testnet (devnet) version of the oracle + releaseChannel: "testnet", + // disables production capabilities like monitoring and alerts + network: "localnet", + rpcUrl: provider.connection.rpcEndpoint, + oracleKey: switchboard.oracle.publicKey.toBase58(), + // path to the payer keypair so the oracle can pay for txns + secretPath: switchboard.walletPath, + // set to true to suppress oracle logs in the console + silent: false, + // optional env variables to speed up the workflow + envVariables: { + VERBOSE: "1", + DEBUG: "1", + DISABLE_NONCE_QUEUE: "1", + DISABLE_METRICS: "1", + }, + }) + + switchboard.oracle.publicKey + + // start the oracle and wait for it to start heartbeating onchain + await oracle.startAndAwait() + }) + + after(() => { + oracle?.stop() + }) + +// ... rest of code +} +``` + +Now let's run the actual test. We'll structure the test to keep rolling dice +until we get doubles, then we'll check that we can withdraw the funds. + +First, we'll gather all of the accounts we need. The `switchboard` test context +gives us most of these. Then we'll need to call our `initVrfClient` function. +Finally, we'll roll our dice in a loop and check for doubles. + +```typescript +it("Roll till you can withdraw", async () => { + // derive escrow address + const [escrowState] = await anchor.web3.PublicKey.findProgramAddressSync( + [Buffer.from("MICHAEL BURRY"), payer.publicKey.toBuffer()], + program.programId, + ); + + const vrfSecret = anchor.web3.Keypair.generate(); + const [vrfClientKey] = anchor.web3.PublicKey.findProgramAddressSync( + [ + Buffer.from("VRFCLIENT"), + payer.publicKey.toBytes(), + escrowState.toBytes(), + vrfSecret.publicKey.toBytes(), + ], + program.programId, + ); + console.log(`VRF Client: ${vrfClientKey}`); + + const vrfIxCoder = new anchor.BorshInstructionCoder(program.idl); + const vrfClientCallback: Callback = { + programId: program.programId, + accounts: [ + // ensure all accounts in consumeRandomness are populated + // { pubkey: payer.publicKey, isSigner: false, isWritable: true }, + { pubkey: escrowState, isSigner: false, isWritable: true }, + { pubkey: vrfClientKey, isSigner: false, isWritable: true }, + { pubkey: vrfSecret.publicKey, isSigner: false, isWritable: true }, + ], + ixData: vrfIxCoder.encode("consumeRandomness", ""), // pass any params for instruction here + }; + + const queue = await switchboard.queue.loadData(); + + // Create Switchboard VRF and Permission account + const [vrfAccount] = await switchboard.queue.createVrf({ + callback: vrfClientCallback, + authority: vrfClientKey, // vrf authority + vrfKeypair: vrfSecret, + enable: !queue.unpermissionedVrfEnabled, // only set permissions if required + }); + + // vrf data + const vrf = await vrfAccount.loadData(); + + console.log(`Created VRF Account: ${vrfAccount.publicKey}`); + + // derive the existing VRF permission account using the seeds + const [permissionAccount, permissionBump] = PermissionAccount.fromSeed( + switchboard.program, + queue.authority, + switchboard.queue.publicKey, + vrfAccount.publicKey, + ); + + const [payerTokenWallet] = + await switchboard.program.mint.getOrCreateWrappedUser( + switchboard.program.walletPubkey, + { fundUpTo: 1.0 }, + ); + + // initialize vrf client + try { + const tx = await program.methods + .initVrfClient() + .accounts({ + user: payer.publicKey, + escrowAccount: escrowState, + vrfState: vrfClientKey, + vrf: vrfAccount.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + } catch (error) { + console.log(error); + assert.fail(); + } + + let rolledDoubles = false; + while (!rolledDoubles) { + try { + // Request randomness and roll dice + const tx = await program.methods + .getOutOfJail({ + switchboardStateBump: switchboard.program.programState.bump, + permissionBump, + }) + .accounts({ + vrfState: vrfClientKey, + vrf: vrfAccount.publicKey, + user: payer.publicKey, + payerWallet: payerTokenWallet, + escrowAccount: escrowState, + oracleQueue: switchboard.queue.publicKey, + queueAuthority: queue.authority, + dataBuffer: queue.dataBuffer, + permission: permissionAccount.publicKey, + switchboardEscrow: vrf.escrow, + programState: switchboard.program.programState.publicKey, + + switchboardProgram: switchboard.program.programId, + recentBlockhashes: anchor.web3.SYSVAR_RECENT_BLOCKHASHES_PUBKEY, + tokenProgram: anchor.utils.token.TOKEN_PROGRAM_ID, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + + await provider.connection.confirmTransaction(tx, "confirmed"); + console.log(`Created VrfClient Account: ${vrfClientKey}`); + + // wait a few sec for switchboard to generate the random number and invoke callback ix + console.log("Rolling Die..."); + + let didUpdate = false; + let vrfState = await program.account.vrfClientState.fetch(vrfClientKey); + + while (!didUpdate) { + console.log("Checking die..."); + vrfState = await program.account.vrfClientState.fetch(vrfClientKey); + didUpdate = vrfState.timestamp.toNumber() > 0; + await delay(1000); + } + + console.log( + "Roll results - Die 1:", + vrfState.dieResult1, + "Die 2:", + vrfState.dieResult2, + ); + if (vrfState.dieResult1 == vrfState.dieResult2) { + rolledDoubles = true; + } else { + console.log("Resetting die..."); + await delay(5000); + } + } catch (error) { + console.log(error); + assert.fail(); + } + } + + const tx = await program.methods + .withdraw() + .accounts({ + user: payer.publicKey, + escrowAccount: escrowState, + feedAggregator: solUsedSwitchboardFeed, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([payer]) + .rpc(); + + await provider.connection.confirmTransaction(tx, "confirmed"); +}); +``` + +Note the function where we get our `payerTokenWallet`. VRF actually requires the +requester to pay some wrapped SOL. This is part of the incentive mechanism of +the oracle network. Fortunately, with testing, Switchboard gives us this really +nice function to create and fund a test wallet. + +```typescript +const [payerTokenWallet] = + await switchboard.program.mint.getOrCreateWrappedUser( + switchboard.program.walletPubkey, + { fundUpTo: 1.0 }, + ); +``` + +And there you have it! You should be able to run and pass all of the tests using +`anchor test`. + +If something is not working, go back and find where you went wrong. +Alternatively feel free to try out the +[solution code on the `vrf` branch](https://github.com/solana-developers/burry-escrow/tree/vrf). +Remember to update your program keys and wallet path like we did in the +[the Setup step](#1-program-setup). + +## Challenge + +Now it's time to work on something independently. Let's add some +[Monopoly rules]() to our +program. Add some logic to the program to track how many times a user rolls. If +they roll 3 times without rolling doubles, they should be able to withdraw their +funds, just like getting out of jail in Monopoly. + +If you get stuck, we have the solution in the +[`vrf-challenge-solution` branch](https://github.com/solana-developers/burry-escrow/tree/vrf-challenge-solution). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=5af49eda-f3e7-407d-8cd7-78d0653ee17c)! + + diff --git a/content/courses/intro-to-solana/getting-started.mdx b/content/courses/intro-to-solana/getting-started.mdx new file mode 100644 index 000000000..15565d26c --- /dev/null +++ b/content/courses/intro-to-solana/getting-started.mdx @@ -0,0 +1,127 @@ +--- +title: Course Guide +objectives: + - understand what web3 is + - understand what Solana is + - learn how this course is structured + - know how to get the most from this course +description: "Understand what web3, blockchains, and Solana are." +--- + +## Welcome + +Welcome to the best starting point for developers looking to learn web3 and +blockchain! + +## What is web 3? + +Typically, in older systems, people interact with each other through third-party +platforms: + +- User accounts are stored on large platforms like Google, X (formerly known as + Twitter), and Meta (Facebook, Instagram). These accounts can be removed at + will by the companies, and items 'owned' by these accounts may be lost + forever. + +- Accounts that store and transfer value - like payment cards, bank accounts, + and stock trading accounts - are handled by large platforms like credit card + companies, money transfer organizations, and stock exchanges. In many cases, + these companies take a piece (around 1% - 3%) of every transaction that occurs + on their platforms. They may often slow transaction settlement down, to + benefit the organization. In some cases the item being transferred may not + belong to the recipient at all but is rather held on the recipient's behalf. + +Web3 is an evolution of the internet that allows people to **transact directly +with each other**: + +- Users own their accounts, represented by their wallet. + +- Transfers of value can occur directly between users. + +- Tokens - representing currencies, digital art, event tickets, real estate, or + whatever else - are fully under the custody of the user. + +Common uses of web3 include: + +- Selling goods and services online with near-zero fees and instant settlement. + +- Selling digital or physical items, ensuring that each item is genuine and that + copies are distinguishable from original items. + +- Instant global payments, without the time and expense of traditional money + transfer companies. + +## What is Solana? + +Solana allows people to **transact directly with each other instantly at almost +no cost**. + +Compared to older platforms like Bitcoin and Ethereum, Solana is: + +- Significantly faster - most transactions complete in a second or two. + +- Massively cheaper - transaction fees (referred to as 'gas fees' in older + networks) are typically $0.00025 (much less than one penny) regardless of the + value of what's being transferred. + +- Highly decentralized, having one of the highest Nakamoto coefficients + (decentralization score) of any proof-of-stake network. + +Many of the common use cases on Solana are only possible on Solana, due to the +high costs and slow translation times of older blockchains. + +## What do I need before I start? + +You **don't** need previous blockchain experience, or Rust knowledge, to follow +this course! You do need: + +- A Linux, Mac or Windows computer. + - Windows machines should have [Windows Terminal](https://aka.ms/terminal) and + [WSL](https://learn.microsoft.com/en-us/windows/wsl/) installed. + - Have [node.js](https://nodejs.org/en/download) 20 installed. Windows + machines should install node.js inside WSL2. +- Basic TypeScript programming experience. +- Basic use of the command line +- Basic use of git (either via the command line or your favorite GUI) + +## How do I use the course effectively? + +This course is broken down into individual lessons. Each lesson has three +sections: + +- **Overview** - the overview contains explanatory text, examples, and code + snippets. You are _not_ expected to code along with any of the examples shown + here. The goal is to simply read through and get initial exposure to the + lesson topics. + +- **Lab** - a practical project you _absolutely should_ code along with. This is + your second exposure to the content as well as your first opportunity to dive + in and _do the thing_. + +- **Challenge** - another project, with just a few simple prompts that you + should take and implement independently. + +The lessons here are very effective, but everyone comes from different +backgrounds and aptitudes that can't be taken into account by static content. +With that in mind, here are three recommendations for how to get the most out of +the course: + +1. **Be brutally honest with yourself** - this may sound a little vague, but + being honest with yourself about how well you understand a certain topic is + essential to mastering it. It's really easy to read a thing and think "yeah, + yeah I get it," only to realize later that you actually didn't. Be honest + with yourself while going through each lesson. Please don't hesitate to + repeat sections if you need to or do outside research when the lesson + phrasing doesn't quite work for you. + +2. **Do every lab and challenge** - this supports the first point. It's pretty + tough to lie to yourself about how well you know something when you make + yourself try to do it. Do every lab and every challenge to test where you're + at and repeat them as needed. We provide solution code for every lab, but be + sure to use it as a helpful resource rather than a crutch. + +3. **Go above and beyond** - this sounds cliche, but don't just stop at what the + lab and challenges ask you to do. Get creative! Take the projects and make + them your own. Build past them. The more you practice the better you get. + +Alright, that's it for the pep talk. Get after it! diff --git a/content/courses/intro-to-solana/index.mdx b/content/courses/intro-to-solana/index.mdx new file mode 100644 index 000000000..c27f02a2a --- /dev/null +++ b/content/courses/intro-to-solana/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Introduction to cryptography and Solana clients +description: Learn the basics of how to interact with the Solana blockchain. +--- diff --git a/content/courses/intro-to-solana/interact-with-wallets.mdx b/content/courses/intro-to-solana/interact-with-wallets.mdx new file mode 100644 index 000000000..b6c8a3e84 --- /dev/null +++ b/content/courses/intro-to-solana/interact-with-wallets.mdx @@ -0,0 +1,681 @@ +--- +title: Interact With Wallets +objectives: + - Explain wallets + - Install a Solana wallet app and set your wallet app to + [Devnet](https://api.devnet.solana.com/) + - Create a React app that uses Wallet Adapter to have users sign transactions +description: "Connect with installed browser wallets from your React apps." +--- + +## Summary + +- **Wallets** store your secret key and allow users to sign transactions +- **Hardware wallets** store your secret key on a separate device +- **Software wallets** use your computer for secure storage. On desktops, + software wallets are often **browser extensions** that add the ability to + connect to a wallet from a website. On mobile, wallet apps have their own + browsers. +- Solana's **Wallet Adapter** allows you to build websites that can request a + user's wallet address and propose transactions for them to sign + +## Lesson + +### Wallets + +In the previous two lessons, we discussed keypairs. Keypairs are used to locate +accounts and sign transactions. While the public key of a keypair is perfectly +safe to share, the secret key should always be kept in a secure location. If a +user's secret key is exposed, then a malicious actor could execute transactions +with the authority of that user, allowing them to transfer all the assets +inside. + +A “wallet” refers to anything that stores a secret key to keep it secure. These +secure storage options can generally be described as either “hardware” or +“software” wallets. Hardware wallets are storage devices that are separate from +your computer. Software wallets are applications you can install on your +existing device(s). + +- On mobile, software wallets are typically mobile apps, installed through the + iOS App Store or Google Play. These include their own web browsers. +- On desktop, software wallets often come in the form of a browser extension. + +Both techniques allow websites to interact easily with the wallet, for example: + +1. Seeing the wallet's wallet address (their public key) +2. Submitting transactions for a user's approval to sign +3. Sending signed transactions to the network + +Signing transactions requires using your secret key. By letting a site submit a +transaction to your wallet and having the wallet handle the signing, you ensure +that you never expose your secret key to the website. Instead, you only share +the secret key with the wallet application. + +Unless you're creating a wallet application yourself, your code should never +need to ask a user for their secret key. Instead, you can ask users to connect +to your site using a reputable wallet. + +## Solana's Wallet Adapter + +If you build web apps, and need users to be able to connect to their wallets and +sign transactions through your apps, you'll want Solana's Wallet Adapter. Wallet +Adapter is a suite of modular packages: + +- The core functionality is found in `@solana/wallet-adapter-base`. +- React support is added by `@solana/wallet-adapter-react`. +- Additional packages provide components for common UI frameworks. In this + lesson, and throughout this course, we'll be using components from + `@solana/wallet-adapter-react-ui`. + +Finally, some packages are adapters for specific wallet apps. These are now no +longer necessary in most cases - see below. + +### Install Wallet-Adapter Libraries for React + +When adding wallet support to an existing React app, you start by installing the +appropriate packages. You'll need `@solana/wallet-adapter-base`, +`@solana/wallet-adapter-react`. If you plan to use the provided React +components, you'll also need to add `@solana/wallet-adapter-react-ui`. + +All wallets that support the +[Wallet Standard](https://github.com/wallet-standard/wallet-standard) are +supported out of the box, and all the popular Solana wallets support the Wallet +Standard. However, if you wish to add support for any wallets that don't support +the standard, add a package for them. + +``` +npm install @solana/wallet-adapter-base \ + @solana/wallet-adapter-react \ + @solana/wallet-adapter-react-ui +``` + + + +We're learning doing this manually to learn about Wallet +Adapter, but you can also use +[create-solana-dapp](https://github.com/solana-developers/create-solana-dapp) to +create a brand new React or NextJS app that supports Solana wallets. + + + +### Connect To Wallets + +`@solana/wallet-adapter-react` allows us to persist and access wallet connection +states through hooks and context providers, namely: + +- `useWallet` +- `WalletProvider` +- `useConnection` +- `ConnectionProvider` + +For these to work properly, any use of `useWallet` and `useConnection` should be +wrapped in `WalletProvider` and `ConnectionProvider`. One of the best ways to +ensure this is to wrap your entire app in `ConnectionProvider` and +`WalletProvider`: + +```tsx +import { NextPage } from "next"; +import { FC, ReactNode, useMemo } from "react"; +import { + ConnectionProvider, + WalletProvider, +} from "@solana/wallet-adapter-react"; +import { clusterApiUrl } from "@solana/web3.js"; +import "@solana/wallet-adapter-react-ui/styles.css"; + +export const Home: NextPage = props => { + const endpoint = clusterApiUrl("devnet"); + const wallets = useMemo(() => [], []); + + return ( + + +

Put the rest of your app here

+
+
+ ); +}; +``` + +Note that `ConnectionProvider` requires an `endpoint` property and that +`WalletProvider` requires a `wallets` property. We're continuing to use the +endpoint for the Devnet cluster, and since all major Solana wallet applications +support the Wallet Standard, we don't need any wallet-specific adapters. At this +point, you can connect with `wallet.connect()`, which will instruct the wallet +to prompt the user for permission to view their public key and request approval +for transactions. + +![wallet connection prompt](/assets/courses/unboxed/wallet-connect-prompt.png) + +While you could do this in a `useEffect` hook, you'll usually want to provide +more sophisticated functionality. For example, you may want users to be able to +choose from a list of supported wallet applications or disconnect after they've +already connected. + +### @solana/wallet-adapter-react-ui + +You can create custom components for this, or you can leverage components +provided by `@solana/wallet-adapter-react-ui`. The simplest way to provide a +full-featured wallet experience is to use `WalletModalProvider` and +`WalletMultiButton`: + +```tsx +import { NextPage } from "next"; +import { FC, ReactNode, useMemo } from "react"; +import { + ConnectionProvider, + WalletProvider, +} from "@solana/wallet-adapter-react"; +import { + WalletModalProvider, + WalletMultiButton, +} from "@solana/wallet-adapter-react-ui"; +import { + clusterApiUrl, + Transaction, + PublicKey, + SystemProgram, +} from "@solana/web3.js"; +import "@solana/wallet-adapter-react-ui/styles.css"; + +const Home: NextPage = props => { + const endpoint = clusterApiUrl("devnet"); + const wallets = useMemo(() => [], []); + + return ( + + + + +

Put the rest of your app here

+
+
+
+ ); +}; + +export default Home; +``` + +The `WalletModalProvider` adds functionality for presenting a modal screen for +users to select which wallet they'd like to use. The `WalletMultiButton` changes +behavior to match the connection status: + +![multi button select wallet option](/assets/courses/unboxed/multi-button-select-wallet.png) + +![connect wallet modal](/assets/courses/unboxed/connect-wallet-modal.png) + +![multi button connect options](/assets/courses/unboxed/multi-button-connect.png) + +![multi button connected state](/assets/courses/unboxed/multi-button-connected.png) + +You can also use more granular components if you need more specific +functionality: + +- `WalletConnectButton` +- `WalletModal` +- `WalletModalButton` +- `WalletDisconnectButton` +- `WalletIcon` + +### Access Account Info + +Once your site is connected to a wallet, `useConnection` will retrieve a +`Connection` object and `useWallet` will get the `WalletContextState`. +`WalletContextState` has a property `publicKey` that is `null` when not +connected to a wallet and has the public key of the user's account when a wallet +is connected. With a public key and a connection, you can fetch account info and +more. + +```tsx +import { useConnection, useWallet } from "@solana/wallet-adapter-react"; +import { LAMPORTS_PER_SOL } from "@solana/web3.js"; +import { FC, useEffect, useState } from "react"; + +export const BalanceDisplay: FC = () => { + const [balance, setBalance] = useState(0); + const { connection } = useConnection(); + const { publicKey } = useWallet(); + + useEffect(() => { + const updateBalance = async () => { + if (!connection || !publicKey) { + console.error("Wallet not connected or connection unavailable"); + } + + try { + connection.onAccountChange( + publicKey, + updatedAccountInfo => { + setBalance(updatedAccountInfo.lamports / LAMPORTS_PER_SOL); + }, + "confirmed", + ); + + const accountInfo = await connection.getAccountInfo(publicKey); + + if (accountInfo) { + setBalance(accountInfo.lamports / LAMPORTS_PER_SOL); + } else { + throw new Error("Account info not found"); + } + } catch (error) { + console.error("Failed to retrieve account info:", error); + } + }; + + updateBalance(); + }, [connection, publicKey]); + + return ( +
+

{publicKey ? `Balance: ${balance} SOL` : ""}

+
+ ); +}; +``` + +Note the call to connection.onAccountChange(), which updates the account balance +shown once the network confirms the transaction. + +### Send Transactions + +`WalletContextState` also provides a `sendTransaction` function that you can use +to submit transactions for approval. + +```tsx +const { publicKey, sendTransaction } = useWallet(); +const { connection } = useConnection(); + +const sendSol = async event => { + event.preventDefault(); + + if (!publicKey) { + console.error("Wallet not connected"); + return; + } + + try { + const recipientPubKey = new PublicKey(event.currentTarget.recipient.value); + + const transaction = new Transaction(); + const sendSolInstruction = SystemProgram.transfer({ + fromPubkey: publicKey, + toPubkey: recipientPubKey, + lamports: 0.1 * LAMPORTS_PER_SOL, + }); + + transaction.add(sendSolInstruction); + + const signature = await sendTransaction(transaction, connection); + console.log(`Transaction signature: ${signature}`); + } catch (error) { + console.error("Transaction failed", error); + } +}; +``` + +When this function is called, the connected wallet will display the transaction +for the user's approval. If approved, then the transaction will be sent. + +![wallet transaction approval prompt](/assets/courses/unboxed/wallet-transaction-approval-prompt.png) + +## Lab + +Let's take the Ping program from the last lesson and build a frontend that lets +users approve a transaction that pings the program. As a reminder, the program's +public key is `ChT1B39WKLS8qUrkLvFDXMhEJ4F1XZzwUNHUt4AU9aVa` and the public key +for the data account is `Ah9K7dQ8EHaZqcAsgBW8w37yN2eAy3koFmUn4x3CJtod`. + +![Solana Ping App](/assets/courses/unboxed/solana-ping-app.png) + +### Download a Solana wallet + +You'll need a Solana wallet app. There's a wide variety of +[Solana wallets](/docs/intro/wallets) available. We're going +to use a browser-extension wallet in this case, since you probably code on a +laptop or desktop! + +Follow the wallets instructions for creating a new account and a new wallet. + +Then set your wallet to use Devnet, for example: + +- In Phantom, click **Settings** -> **Developer Settings** -> **Testnet mode**. + 'Testnet mode' sets Solana to Devnet by default. +- In Solflare, click **Settings** -> **General** -> **Network** -> **DevNet** +- In Backpack, click **Preferences** -> **Developer Mode** + +This ensures that your wallet app will be connected to the same network we'll be +using in this lab. + +### Download the starter code + +{/* TODO: this uses old Unboxed starter repos, we could move it to use `create-solana-dapp` eg https://github.com/solana-developers/anchor-ping-frontend/tree/main/web */} + +Download the +[starter code for this project](https://github.com/Unboxed-Software/solana-ping-frontend/tree/starter). +This project is a simple Next.js application. It's mostly empty except for the +`AppBar` component. We'll build the rest throughout this lab. + +You can see its current state with the command `npm run dev` in the console. + +### Wrap the app in context providers + +To start, we're going to create a new component to contain the various +Wallet-Adapter providers that we'll be using. Create a new file inside the +`components` folder called `WalletContextProvider.tsx`. + +Let's start with some of the boilerplate for a functional component: + +```tsx +import { FC, ReactNode } from "react"; + +const WalletContextProvider: FC<{ children: ReactNode }> = ({ children }) => { + return ( + + )); +}; + +export default WalletContextProvider; +``` + +To properly connect to the user's wallet, we'll need a `ConnectionProvider`, +`WalletProvider`, and `WalletModalProvider`. Start by importing these components +from `@solana/wallet-adapter-react` and `@solana/wallet-adapter-react-ui`. Then +add them to the `WalletContextProvider` component. Note that +`ConnectionProvider` requires an `endpoint` parameter and `WalletProvider` +requires an array of `wallets`. For now, just use an empty string and an empty +array, respectively. + +```tsx +import { FC, ReactNode } from "react"; +import { + ConnectionProvider, + WalletProvider, +} from "@solana/wallet-adapter-react"; +import { WalletModalProvider } from "@solana/wallet-adapter-react-ui"; + +const WalletContextProvider: FC<{ children: ReactNode }> = ({ children }) => { + return ( + + + {children} + + + ); +}; + +export default WalletContextProvider; +``` + +The last things we need are an actual endpoint for `ConnectionProvider` and the +supported wallets for `WalletProvider`. + +For the endpoint, we'll use the same `clusterApiUrl` function from the +`@solana/web3.js` library that we've used before so you'll need to import it. +For the array of wallets you'll also need to import the +`@solana/wallet-adapter-wallets` library. + +After importing these libraries, create a constant `endpoint` that uses the +`clusterApiUrl` function to get the URL for Devnet. Then create a constant named +`wallets` and set it to an empty array - since all wallets support Wallet +Standard, we no longer need any custom wallet adapter. Finally, replace the +empty string and empty array in `ConnectionProvider` and `WalletProvider`, +respectively. + +To complete this component, add +`require('@solana/wallet-adapter-react-ui/styles.css');` below your imports to +ensure proper styling and behavior of the Wallet Adapter library components. + +```tsx +import { FC, ReactNode, useMemo } from "react"; +import { + ConnectionProvider, + WalletProvider, +} from "@solana/wallet-adapter-react"; +import { WalletModalProvider } from "@solana/wallet-adapter-react-ui"; +import { clusterApiUrl } from "@solana/web3.js"; +import * as walletAdapterWallets from "@solana/wallet-adapter-wallets"; +require("@solana/wallet-adapter-react-ui/styles.css"); + +const WalletContextProvider: FC<{ children: ReactNode }> = ({ children }) => { + const endpoint = clusterApiUrl("devnet"); + const wallets = useMemo(() => [], []); + + return ( + + + {children} + + + ); +}; + +export default WalletContextProvider; +``` + +### Add wallet multi-button + +Next, let's set up the Connect button. The current button is just a placeholder +because rather than using a standard button or creating a custom component, +we'll be using Wallet-Adapter's “multi-button.” This button interfaces with the +providers we set up in `WalletContextProvider` and let's users choose a wallet, +connect to a wallet, and disconnect from a wallet. If you ever need more custom +functionality, you can create a custom component to handle this. + +Before we add the “multi-button,” we need to wrap the app in the +`WalletContextProvider`. Do this by importing it in `index.tsx` and adding it +after the closing `` tag: + +```tsx +import { NextPage } from "next"; +import styles from "../styles/Home.module.css"; +import WalletContextProvider from "../components/WalletContextProvider"; +import { AppBar } from "../components/AppBar"; +import Head from "next/head"; +import { PingButton } from "../components/PingButton"; + +const Home: NextPage = props => { + return ( +
+ + Wallet-Adapter Example + + + + +
+ +
+
+
+ ); +}; + +export default Home; +``` + +If you run the app, everything should still look the same since the current +button on the top right is still just a placeholder. To remedy this, open +`AppBar.tsx` and replace `` with ``. +You'll need to import `WalletMultiButton` from +`@solana/wallet-adapter-react-ui`. + +```tsx +import { FC } from "react"; +import styles from "../styles/Home.module.css"; +import Image from "next/image"; +import { WalletMultiButton } from "@solana/wallet-adapter-react-ui"; + +export const AppBar: FC = () => { + return ( +
+ + Wallet-Adapter Example + +
+ ); +}; +``` + +At this point, you should be able to run the app and interact with the +multi-button at the top-right of the screen. It should now read, "Select +Wallet." If you have the a wallet installed, you should be able to use this +button to connect your wallet to the site. + +### Create button to ping program + +Now that our app can connect to our wallet, let's make the “Ping!” button +actually do something. + +Start by opening the `PingButton.tsx` file. We're going to replace the +`console.log` inside of `onClick` with code that will create a transaction and +submit it to the wallet app for the end user's approval. + +First, we need a connection, the wallet's public key, and Wallet-Adapter's +`sendTransaction` function. To get this, we need to import `useConnection` and +`useWallet` from `@solana/wallet-adapter-react`. While we're here, let's also +import `@solana/web3.js` since we'll need it to create our transaction. + +```tsx +import { useConnection, useWallet } from "@solana/wallet-adapter-react"; +import { + PublicKey, + Transaction, + TransactionInstruction, + sendTransaction, +} from "@solana/web3.js"; +import { FC, useState } from "react"; +import styles from "../styles/PingButton.module.css"; + +export const PingButton: FC = () => { + const onClick = () => { + console.log("Ping!"); + }; + + return ( +
+ +
+ ); +}; +``` + +Now use the `useConnection` hook to create a `connection` constant and the +`useWallet` hook to create `publicKey` and `sendTransaction` constants. + +```tsx +import { useConnection, useWallet } from "@solana/wallet-adapter-react"; +import { + PublicKey, + Transaction, + TransactionInstruction, + sendTransaction, +} from "@solana/web3.js"; +import { FC, useState } from "react"; +import styles from "../styles/PingButton.module.css"; + +export const PingButton: FC = () => { + const { connection } = useConnection(); + const { publicKey, sendTransaction } = useWallet(); + + const onClick = () => { + console.log("Ping!"); + }; + + return ( +
+ +
+ ); +}; +``` + +With that, we can fill in the body of `onClick`. + +First, check that both `connection` and `publicKey` exist (if either does not +then the user's wallet isn't connected yet). + +Next, construct two instances of `PublicKey`, one for the program ID +`ChT1B39WKLS8qUrkLvFDXMhEJ4F1XZzwUNHUt4AU9aVa` and one for the data account +`Ah9K7dQ8EHaZqcAsgBW8w37yN2eAy3koFmUn4x3CJtod`. + +Next, construct a `Transaction`, then a new `TransactionInstruction` that +includes the data account as a writable key. + +Next, add this instruction to the transaction. + +Finally, call `sendTransaction`. + +```tsx +const onClick = async () => { + if (!connection || !publicKey) { + console.error("Wallet not connected or connection unavailable"); + } + + try { + const programId = new PublicKey(PROGRAM_ID); + const programDataAccount = new PublicKey(DATA_ACCOUNT_PUBKEY); + const transaction = new Transaction(); + + const instruction = new TransactionInstruction({ + keys: [ + { + pubkey: programDataAccount, + isSigner: false, + isWritable: true, + }, + ], + programId, + }); + + transaction.add(instruction); + + const signature = await sendTransaction(transaction, connection); + console.log("Transaction Signature:", signature); + } catch (error) { + console.error("Transaction failed:", error); + } +}; +``` + +And that's it! If you refresh the page, connect your wallet, and click the ping +button, your wallet should present you with a popup to confirm the transaction. + +### Add some polish + +There's a lot you could do to make the user experience here even better. For +example, you could change the UI to only show you the Ping button when a wallet +is connected and display some other prompt otherwise. You could link to the +transaction on Solana Explorer after a user confirms a transaction so they can +easily go look at the transaction details. The more you experiment with it, the +more comfortable you'll get, so get creative! + +You can also download the +[full source code from this lab](https://github.com/Unboxed-Software/solana-ping-frontend) +to understand all of this in context. + +## Challenge + +Now it's your turn to build something independently. Create an application that +lets a user connect their wallet and send SOL to another account. + +![Send SOL App](/assets/courses/unboxed/solana-send-sol-app.png) + +1. You can build this from scratch or you can + [download the starter code](https://github.com/Unboxed-Software/solana-send-sol-frontend/tree/starter). +2. Wrap the starter application in the appropriate context providers. +3. In the form component, set up the transaction and send it to the user's + wallet for approval. +4. Get creative with the user experience. Add a link to let the user view the + transaction on Solana Explorer or something else that seems cool to you! + +If you get really stumped, feel free to +[check out the solution code](https://github.com/Unboxed-Software/solana-send-sol-frontend/tree/main). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=69c5aac6-8a9f-4e23-a7f5-28ae2845dfe1)! + + diff --git a/content/courses/intro-to-solana/intro-to-cryptography.mdx b/content/courses/intro-to-solana/intro-to-cryptography.mdx new file mode 100644 index 000000000..8418bb544 --- /dev/null +++ b/content/courses/intro-to-solana/intro-to-cryptography.mdx @@ -0,0 +1,253 @@ +--- +title: Cryptography and the Solana Network +objectives: + - Understand symmetric and asymmetric cryptography + - Explain keypairs + - Generate a new keypair + - Load a keypair from an env file +description: "Understand asymmetric cryptography and how Solana uses it." +--- + +## Summary + +- A **keypair** is a matching pair of **public key** and **secret key**. +- The **public key** is used as an “address” that points to an account on the + Solana network. A public key can be shared with anyone. +- The **secret key** is used to verify authority over the account. As the name + suggests, you should always keep secret keys _secret_. +- `@solana/web3.js` provides helper functions for creating a brand new keypair, + or for constructing a keypair using an existing secret key. + +## Lesson + +In this lesson, we will explore the basics of cryptography and how it's applied +within the Solana ecosystem. + +### Symmetric and Asymmetric Cryptography + +'Cryptography' the study of hiding information. There are two main types of +cryptography you'll encounter day to day: + +**Symmetric Cryptography** is where the same key is used to encrypt and decrypt. +It's hundreds of years old and has been used by everyone from the ancient +Egyptians to Queen Elizabeth I. + +There's a variety of symmetric cryptography algorithms, but the most common +you'll see today are AES and Chacha20. + +**Asymmetric Cryptography** + +- Asymmetric cryptography - also called + '[public key cryptography](https://en.wikipedia.org/wiki/Public-key_cryptography)' + was developed in the 1970s. In asymmetric cryptography, participants have + pairs of keys (or **keypairs**). Each keypair consists of a **secret key** and + a **public key**. Asymmetric encryption works differently from symmetric + encryption, and can do different things: + +- **Encryption**: if it's encrypted with a public key, only the secret key from + the same keypair can be used to read it +- **Signatures**: if it's encrypted with a secret key, the public key from the + same keypair can be used to prove the secret key holder signed it. +- You can even use asymmetric cryptography to work out a good key for symmetric + cryptography! This is called **key exchange**, where you use your public keys + and the recipient's public key to come up with a 'session' key. +- There's a variety of asymmetric cryptography algorithms, but the most common + you'll see today are variants of ECC or RSA. + +Asymmetric encryption is very popular: + +- Your bank card has a secret key inside it that's used to sign transactions. + + Your bank can confirm you made the transaction by checking them with the + matching public key. + +- Websites include a public key in their certificate. Your browser will use this + public key to encrypt the data (like personal information, login details, and + credit card numbers) it sends to the web page. + + The website has the matching private key so that the website can read the + data. + +- Your electronic passport was signed by the country that issued it to ensure + the passport isn't forged. + + The electronic passport gates can confirm this using the public key of your + issuing country. + +- The messaging apps on your phone use key exchange to make a session key. + +In short, cryptography is all around us. Solana, as well as other blockchains, +are but one use of cryptography. + +### Solana uses public keys as addresses + +![Solana wallet addresses](/assets/courses/unboxed/wallet-addresses.svg) + +People participating in the Solana network have at least one keypair. In Solana: + +- The **public key** is used as an “address” that points to an account on the + Solana network. Even friendly names - like `example.sol` - point to addresses + like `dDCQNnDmNbFVi8cQhKAgXhyhXeJ625tvwsunRyRc7c8` + +- The **secret key** is used to verify authority over that keypair. If you have + the secret key for an address, you control the tokens inside that address. For + this reason, as the name suggests, you should always keep secret keys + _secret_. + +### Using @solana/web3.js to make a keypair + +You can use the Solana blockchain from either the browser or node.js with the +`@solana/web3.js` npm module. Set up a project how you normally would, then +[use `npm`](https://nodesource.com/blog/an-absolute-beginners-guide-to-using-npm/) +to install `@solana/web3.js` + +```shell +npm i @solana/web3.js@1 +``` + +We'll cover a lot of +[web3.js](/docs/clients/javascript-reference) gradually +throughout this course, but you can also check out the +[official web3.js documentation](/docs/clients/javascript-reference). + +To send tokens, send NFTS, or read and write data Solana, you'll need your own +keypair. To make a new keypair, use the `Keypair.generate()` function from +`@solana/web3.js`: + +```typescript +import { Keypair } from "@solana/web3.js"; + +const keypair = Keypair.generate(); + +console.log(`The public key is: `, keypair.publicKey.toBase58()); +console.log(`The secret key is: `, keypair.secretKey); +``` + + + +Since the keypair can be regenerated from the secret key, we usually only store +the secret key, and restore the keypair from the secret key. + +Additionally, since the secret key gives authority over the address, we don't +store secret keys in source code. Instead, we: + +- Put secret keys in a `.env` file +- Add `.env` to `.gitignore` so the `.env` file is not committed. + + + +### Loading an existing keypair + +If you already have a keypair you'd like to use, you can load a `Keypair` from +an existing secret key stored in the filesystem or an `.env` file. In node.js, +the `@solana-developers/helpers` npm package includes some extra functions: + +```bash +npm i @solana-developers/helpers +``` + +- To use an `.env` file use `getKeypairFromEnvironment()` +- To use a Solana CLI file use `getKeypairFromFile()` + +```typescript +import "dotenv/config"; +import { getKeypairFromEnvironment } from "@solana-developers/helpers"; + +const keypair = getKeypairFromEnvironment("SECRET_KEY"); +``` + +You know how to make and load keypairs! Let's practice what we've learned. + +## Lab + +In this lab we will learn about keypairs, and how to store secret keys securely +on solana + +### Installation + +Make a new directory, install TypeScript, Solana web3.js and esrun: + +```bash +mkdir generate-keypair +cd generate-keypair +npm init -y +npm install typescript @solana/web3.js@1 esrun @solana-developers/helpers@2 +``` + +Make a new file called `generate-keypair.ts` + +```typescript +import { Keypair } from "@solana/web3.js"; +const keypair = Keypair.generate(); +console.log(`✅ Generated keypair!`); +``` + +Run `npx esrun generate-keypair.ts`. You should see the text: + +``` +✅ Generated keypair! +``` + +Each `Keypair` has a `publicKey` and `secretKey` property. Update the file: + +```typescript +import { Keypair } from "@solana/web3.js"; + +const keypair = Keypair.generate(); + +console.log(`The public key is: `, keypair.publicKey.toBase58()); +console.log(`The secret key is: `, keypair.secretKey); +console.log(`✅ Finished!`); +``` + +Run `npx esrun generate-keypair.ts`. You should see the text: + +``` +The public key is: 764CksEAZvm7C1mg2uFmpeFvifxwgjqxj2bH6Ps7La4F +The secret key is: Uint8Array(64) [ + (a long series of numbers) +] +✅ Finished! +``` + +## Loading an existing keypair from an .env file + +To ensure that your secret key stays secure, we recommend injecting the secret +key using a `.env` file: + +Make a new file called `.env` with the contents of the key you made earlier: + +```env +SECRET_KEY="[(a series of numbers)]" +``` + +We can then load the keypair from the environment. Update `generate-keypair.ts`: + +```typescript +import "dotenv/config"; +import { getKeypairFromEnvironment } from "@solana-developers/helpers"; + +const keypair = getKeypairFromEnvironment("SECRET_KEY"); + +console.log( + `✅ Finished! We've loaded our secret key securely, using an env file!`, +); +``` + +Run `npx esrun generate-keypair.ts`. You should see the following result: + +```text +✅ Finished! We've loaded our secret key securely, using an env file! +``` + +We've now learned about keypairs, and how to store secret keys securely on +Solana. In the next chapter, we'll use them! + + + +## Completed the lab? + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=ee06a213-5d74-4954-846e-cba883bc6db1)! + + diff --git a/content/courses/intro-to-solana/intro-to-custom-onchain-programs.mdx b/content/courses/intro-to-solana/intro-to-custom-onchain-programs.mdx new file mode 100644 index 000000000..d11b228be --- /dev/null +++ b/content/courses/intro-to-solana/intro-to-custom-onchain-programs.mdx @@ -0,0 +1,268 @@ +--- +title: Using custom onchain programs +objectives: + - Create transactions for custom onchain programs +description: + "Make instructions for arbitrary programs using the 'TransactionInstruction' + constructor." +--- + +## Summary + +Solana has multiple onchain programs you can use. Instructions that use these +programs have data in a custom format determined by the specific function being +invoked in the onchain program. + +## Lesson + +### Instructions + +In previous lessons, we used the `SystemProgram.transfer()` function from +`@solana/web3.js`, which creates an instruction for the System program to +transfer SOL. + +When working with other programs, however, you'll need to create instructions +manually. With `@solana/web3.js`, you can create instructions with the +`TransactionInstruction` constructor: + +```typescript +const instruction = new TransactionInstruction({ + programId: PublicKey; + keys: [ + { + pubkey: Pubkey, + isSigner: boolean, + isWritable: boolean, + }, + ], + data?: Buffer; +}); +``` + +`TransactionInstruction()` takes 3 fields: + +- The `programId` field is fairly self-explanatory: it's the public key (also + called the 'address' or 'program ID') of the program. + +- `keys` is an array of accounts and how they will be used during the + transaction. You need to know the behavior of the program you are calling and + ensure that you provide all of the necessary accounts in the array. + + - `pubkey` - the public key of the account + - `isSigner` - a boolean representing whether or not the account is a signer + on the transaction + - `isWritable` - a boolean representing whether or not the account is written + to during the transaction's execution + +- an optional `Buffer` containing data to pass to the program. We'll be ignoring + the `data` field for now, but we will revisit it in a future lesson. + +After making our instruction, we add it to a transaction, send the transaction +to our RPC to be processed and confirmed, then look at the transaction +signature. + +```typescript +const transaction = new web3.Transaction().add(instruction); + +const signature = await web3.sendAndConfirmTransaction( + connection, + transaction, + [payer], +); + +console.log(`✅ Success! Transaction signature is: ${signature}`); +``` + +### Solana Explorer + +![Solana Explorer set to Devnet](/assets/courses/unboxed/solana-explorer-devnet.png) + +All transactions on the blockchain are publicly viewable on +[Solana Explorer](http://explorer.solana.com). For example, you could take the +signature returned by `sendAndConfirmTransaction()` in the example above, search +for that signature in Solana Explorer, then see: + +- when it occurred +- which block it was included in +- the transaction fee +- and more! + +![Solana Explorer with details about a transaction](/assets/courses/unboxed/solana-explorer-transaction-overview.png) + +## Lab + +### Writing transactions for the ping counter program + +We're going to create a script to ping an onchain program that increments a +counter each time it has been pinged. This program exists on the Solana Devnet +at address `ChT1B39WKLS8qUrkLvFDXMhEJ4F1XZzwUNHUt4AU9aVa`. The program stores +its data in a specific account at the address +`Ah9K7dQ8EHaZqcAsgBW8w37yN2eAy3koFmUn4x3CJtod`. + +![Solana stores programs and data in separate accounts](/assets/courses/unboxed/pdas-global-state.svg) + +### Basic scaffolding + +We'll start by using the same packages and `.env` file we made earlier in +[Intro to Writing Data](/developers/courses/intro-to-solana/intro-to-writing-data). + +Name the file `send-ping-transaction.ts`: + +```typescript +import * as web3 from "@solana/web3.js"; +import "dotenv/config"; +import { + getKeypairFromEnvironment, + airdropIfRequired, +} from "@solana-developers/helpers"; + +const payer = getKeypairFromEnvironment("SECRET_KEY"); +const connection = new web3.Connection(web3.clusterApiUrl("devnet")); + +const newBalance = await airdropIfRequired( + connection, + payer.publicKey, + 1 * web3.LAMPORTS_PER_SOL, + 0.5 * web3.LAMPORTS_PER_SOL, +); +``` + +This will connect to Solana Devnet and request some test Lamports if needed. + +### Ping program + +Now let's talk to the Ping program! To do this, we need to: + +1. create a transaction +2. create an instruction +3. add the instruction to the transaction +4. send the transaction + +Remember, the most challenging piece here is including the right information in +the instructions. We know the address of the program that we are calling. We +also know that the program writes data to a separate account whose address we +also have. Let's add the string versions of both of those as constants at the +top of the file: + +```typescript +const PING_PROGRAM_ADDRESS = "ChT1B39WKLS8qUrkLvFDXMhEJ4F1XZzwUNHUt4AU9aVa"; +const PING_PROGRAM_DATA_ADDRESS = + "Ah9K7dQ8EHaZqcAsgBW8w37yN2eAy3koFmUn4x3CJtod"; +``` + +Now let's create a new transaction, then initialize a `PublicKey` for the +program account, and another for the data account. + +```typescript +const transaction = new web3.Transaction(); +const programId = new web3.PublicKey(PING_PROGRAM_ADDRESS); +const pingProgramDataId = new web3.PublicKey(PING_PROGRAM_DATA_ADDRESS); +``` + +Next, let's create the instruction. Remember, the instruction needs to include +the public key for the Ping program and it also needs to include an array with +all the accounts that will be read from or written to. In this example program, +only the data account referenced above is needed. + +```typescript +const transaction = new web3.Transaction(); +const programId = new web3.PublicKey(PING_PROGRAM_ADDRESS); +const pingProgramDataId = new web3.PublicKey(PING_PROGRAM_DATA_ADDRESS); + +const instruction = new web3.TransactionInstruction({ + keys: [ + { + pubkey: pingProgramDataId, + isSigner: false, + isWritable: true, + }, + ], + programId, +}); +``` + +Next, let's add this instruction to the transaction we created. Then, call +`sendAndConfirmTransaction()` by passing in the connection, transaction, and +payer. Finally, let's log the result of that function call so we can look it up +on Solana Explorer. + +```typescript +transaction.add(instruction); + +const signature = await web3.sendAndConfirmTransaction( + connection, + transaction, + [payer], +); + +console.log(`✅ Transaction completed! Signature is ${signature}`); +``` + +### Run the ping client and check Solana Explorer + +Now run the code with the following command: + +```bash +npx esrun send-ping-transaction.ts +``` + +It may take a moment or two but you should see a long string printed to the +console, like the following: + +``` +✅ Transaction completed! Signature is 55S47uwMJprFMLhRSewkoUuzUs5V6BpNfRx21MpngRUQG3AswCzCSxvQmS3WEPWDJM7bhHm3bYBrqRshj672cUSG +``` + +Copy the transaction signature. Then visit +[Solana explorer on devnet](https://explorer.solana.com/?cluster=devnet). Paste +the signature into the search bar at the top of Solana Explorer (make sure +you're connected to Devnet) and hit enter. You should see all the details about +the transaction. If you scroll all the way to the bottom, then you will see +`Program Logs`, which show how many times the program has been pinged including +your ping. + +![Solana Explorer with logs from calling the Ping program](/assets/courses/unboxed/solana-explorer-ping-result.png) + +Scroll around the explorer and look at what you're seeing: + +- The **Account Input(s)** will include: + - The address of your payer - being debited 5000 lamports for the transaction + - The program address for the ping program + - The data address for the ping program +- The **Instruction** section will contain a single instruction with no data - + the ping program is a pretty simple program, so it doesn't need any data. +- The **Program Instruction Logs** show the logs from the ping program. + +If you want to make it easier to look at Solana Explorer for transactions in the +future, simply change your `console.log` to the following: + +```typescript +console.log( + `You can view your transaction on Solana Explorer at:\nhttps://explorer.solana.com/tx/${signature}?cluster=devnet`, +); +``` + +And just like that you're calling programs on the Solana network and writing +data onchain! + +In the next few lessons, you'll learn how to + +1. Send transactions safely from the browser instead of running a script +2. Add custom data to your instructions +3. Deserialize data from the chain + +## Challenge + +Go ahead and create a script from scratch that will allow you to transfer SOL +from one account to another on Devnet. Be sure to print out the transaction +signature so you can look at it on Solana Explorer. + +If you get stuck feel free to glance at the +[solution code](https://github.com/Unboxed-Software/solana-ping-client). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=e969d07e-ae85-48c3-976f-261a22f02e52)! + + diff --git a/content/courses/intro-to-solana/intro-to-reading-data.mdx b/content/courses/intro-to-solana/intro-to-reading-data.mdx new file mode 100644 index 000000000..732d12f50 --- /dev/null +++ b/content/courses/intro-to-solana/intro-to-reading-data.mdx @@ -0,0 +1,232 @@ +--- +title: Read Data From The Solana Network +objectives: + - Understand accounts and their addresses + - Understand SOL and lamports + - Use web3.js to connect to Solana and read an account balance +description: + "Connect to Solana DevNet from TypeScript and read data from the blockchain!" +--- + +## Summary + +- **SOL** is the name of Solana's native token. Each SOL is made from 1 billion + **Lamports**. +- **Accounts** store tokens, NFTs, programs, and data. For now, we'll focus on + accounts that store SOL. +- **Addresses** point to accounts on the Solana network. Anyone can read the + data at a given address. Most addresses are also **public keys**. + +# Lesson + +### Accounts + +All data on Solana is stored in accounts. Accounts can store: + +- SOL +- Other tokens, like USDC +- NFTs +- Programs, like the film review program we make in this course! +- Program data, like a film review for the program above! + +### SOL + +SOL is Solana's 'native token' - this means SOL is used to pay transaction fees, +rent for accounts, and other common. SOL is sometimes shown with the `◎` symbol. +Each SOL is made from 1 billion **Lamports**. + +In the same way that finance apps typically do math in cents (for USD) and pence +(for GBP), Solana apps typically transfer, spend, store, and handle SOL as +Lamports, only converting to full SOL to display to users. + +### Addresses + +Addresses uniquely identify accounts. Addresses are often shown as base-58 +encoded strings like `dDCQNnDmNbFVi8cQhKAgXhyhXeJ625tvwsunRyRc7c8`. Most +addresses on Solana are also **public keys**. As mentioned in the previous +chapter, whoever controls the matching secret key for an address controls the +account - for example, the person with the secret key can send tokens from the +account. + +## Reading from the Solana Blockchain + +### Installation + +We use an npm package called `@solana/web3.js` to do most of the work with +Solana. We'll also install TypeScript and `esrun`, so we can run `.ts` files on +the command line: + +```bash +npm install typescript @solana/web3.js@1 esrun +``` + +### Connect to the Network + +Every interaction with the Solana network using `@solana/web3.js` is going to +happen through a `Connection` object. The `Connection` object establishes a +connection with a specific Solana network, called a 'cluster'. For now, we'll +use the `Devnet` cluster rather than `Mainnet`. `Devnet` is designed for +developer use and testing, and `DevNet` tokens don't have real value. + +```typescript +import { Connection, clusterApiUrl } from "@solana/web3.js"; + +const connection = new Connection(clusterApiUrl("devnet")); +console.log(`✅ Connected!`); +``` + +Running this TypeScript (`npx esrun example.ts`) shows: + +``` +✅ Connected! +``` + +### Read from the Network + +To read the balance of an account: + +```typescript +import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; + +const connection = new Connection(clusterApiUrl("devnet")); +const address = new PublicKey("CenYq6bDRB7p73EjsPEpiYN7uveyPUTdXkDkgUduboaN"); +const balance = await connection.getBalance(address); + +console.log(`The balance of the account at ${address} is ${balance} lamports`); +console.log(`✅ Finished!`); +``` + +The balance returned is in \*lamports, as discussed earlier. Web3.js provides +the constant `LAMPORTS_PER_SOL` for showing Lamports as SOL: + +```typescript +import { + Connection, + PublicKey, + clusterApiUrl, + LAMPORTS_PER_SOL, +} from "@solana/web3.js"; + +const connection = new Connection(clusterApiUrl("devnet")); +const address = new PublicKey("CenYq6bDRB7p73EjsPEpiYN7uveyPUTdXkDkgUduboaN"); +const balance = await connection.getBalance(address); +const balanceInSol = balance / LAMPORTS_PER_SOL; + +console.log(`The balance of the account at ${address} is ${balanceInSol} SOL`); +console.log(`✅ Finished!`); +``` + +Running `npx esrun example.ts` will show something like: + +``` +The balance of the account at CenYq6bDRB7p73EjsPEpiYN7uveyPUTdXkDkgUduboaN is 0.00114144 SOL +✅ Finished! +``` + +...and just like that, we are reading data from the Solana blockchain! + +## Lab + +Let's practice what we've learned, and check the balance at a particular +address. + +### Load a keypair + +Remember the public key from the previous chapter. + +Make a new file called `check-balance.ts`, substituting your public key for +``. + +The script loads the public key, connects to DevNet, and checks the balance: + +```typescript +import { Connection, LAMPORTS_PER_SOL, PublicKey } from "@solana/web3.js"; + +const publicKey = new PublicKey(""); + +const connection = new Connection("https://api.devnet.solana.com", "confirmed"); + +const balanceInLamports = await connection.getBalance(publicKey); + +const balanceInSOL = balanceInLamports / LAMPORTS_PER_SOL; + +console.log( + `💰 Finished! The balance for the wallet at address ${publicKey} is ${balanceInSOL}!`, +); +``` + +Save this to a file, and `npx esrun check-balance.ts`. You should see something +like: + +``` +💰 Finished! The balance for the wallet at address 31ZdXAvhRQyzLC2L97PC6Lnf2yWgHhQUKKYoUo9MLQF5 is 0! +``` + +### Get Devnet SOL + +In Devnet you can get free SOL to develop with. Think of Devnet SOL like board +game money - it looks like it has value, but it doesn't have value. + +[Get some Devnet SOL](https://faucet.solana.com/) and use the public key of your +keypair as the address. + +Pick any amount of SOL you like. + +### Check your balance + +Re-run the script. You should see your balance updated: + +``` +💰 Finished! The balance for the wallet at address 31ZdXAvhRQyzLC2L97PC6Lnf2yWgHhQUKKYoUo9MLQF5 is 0.5! +``` + +### Check other student's balances + +You can modify the script to check balances on any wallet. + +```typescript +import { Connection, LAMPORTS_PER_SOL, PublicKey } from "@solana/web3.js"; + +const suppliedPublicKey = process.argv[2]; +if (!suppliedPublicKey) { + throw new Error("Provide a public key to check the balance of!"); +} + +const connection = new Connection("https://api.devnet.solana.com", "confirmed"); + +const publicKey = new PublicKey(suppliedPublicKey); + +const balanceInLamports = await connection.getBalance(publicKey); + +const balanceInSOL = balanceInLamports / LAMPORTS_PER_SOL; + +console.log( + `✅ Finished! The balance for the wallet at address ${publicKey} is ${balanceInSOL}!`, +); +``` + +Swap wallet addresses with your classmates in the chat and check their balances. + +```bash +% npx esrun check-balance.ts (some wallet address) +✅ Finished! The balance for the wallet at address 31ZdXAvhRQyzLC2L97PC6Lnf2yWgHhQUKKYoUo9MLQF5 is 3! +``` + +And check a few of your classmate's balances. + +## Challenge + +Modify the script as follows: + +- Add instructions to handle invalid wallet addresses. +- Modify the script to connect to `mainNet` and look up some famous Solana + wallets. Try `toly.sol`, `shaq.sol` or `mccann.sol`. + +We'll transfer SOL in the next lesson! + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=8bbbfd93-1cdc-4ce3-9c83-637e7aa57454)! + + diff --git a/content/courses/intro-to-solana/intro-to-writing-data.mdx b/content/courses/intro-to-solana/intro-to-writing-data.mdx new file mode 100644 index 000000000..4e64d7b1d --- /dev/null +++ b/content/courses/intro-to-solana/intro-to-writing-data.mdx @@ -0,0 +1,264 @@ +--- +title: Create Transactions on the Solana Network +objectives: + - Explain transactions + - Explain transaction fees + - Use `@solana/web3.js` to send SOL + - Use `@solana/web3.js` to sign transactions + - Use Solana Explorer to view transactions +description: + "Make your first transactions on DevNet, using the System and memo programs!" +--- + +## Summary + +All modifications to onchain data happen through **transactions**. Transactions +are mostly a set of instructions that invoke Solana programs. Transactions are +atomic, meaning they either succeed - if all the instructions have been executed +properly - or fail as if the transaction hasn't been run at all. + +## Lesson + +### Transactions are atomic + +Any modification to onchain data happens through transactions sent to programs. + +A transaction on Solana is similar to a transaction elsewhere: it is atomic. +**Atomic means the entire transaction runs or fails**. + +Think of paying for something online: + +- The balance of your account is debited +- The bank transfers the funds to the merchant + +Both of these things need to happen for the transaction to be successful. If +either of them fails, none of them should happen, rather than pay the merchant +and not debit your account, or debit the account but not pay the merchant. + +Atomic means either the transaction happens - meaning all the individual steps +succeed - or the entire transaction fails. + +### Transactions contain instructions + +The steps within a transaction on Solana are called **instructions**. + +Each instruction contains: + +- an array of accounts that will be read from and/or written to. This is what + makes Solana fast - transactions that affect different accounts are processed + simultaneously +- the public key of the program to invoke +- data passed to the program being invoked, structured as a byte array + +When a transaction is run, one or more Solana programs are invoked with the +instructions included in the transaction. + +As you might expect, `@solana/web3.js` provides helper functions for creating +transactions and instructions. You can create a new transaction with the +constructor, `new Transaction()`. Once created, you can add instructions to the +transaction using the `add()` method. + +One of those helper functions is `SystemProgram.transfer()`, which makes an +instruction for the `SystemProgram` to transfer some SOL: + +```typescript +const transaction = new Transaction(); + +const sendSolInstruction = SystemProgram.transfer({ + fromPubkey: sender, + toPubkey: recipient, + lamports: LAMPORTS_PER_SOL * amount, +}); + +transaction.add(sendSolInstruction); +``` + +The `SystemProgram.transfer()` function requires: + +- a public key corresponding to the sender's account +- a public key corresponding to the recipient's account +- the amount of SOL to send in lamports. + +`SystemProgram.transfer()` returns the instruction for sending SOL from the +sender to the recipient. + +The program used in this instruction will be the `system` program (at the +address `11111111111111111111111111111111`), the data will be the amount of SOL +to transfer (in Lamports) and the accounts will be based on the sender and +recipient. + +The instruction can then be added to the transaction. + +Once all the instructions have been added, a transaction needs to be sent to the +cluster and confirmed: + +```typescript +const signature = sendAndConfirmTransaction(connection, transaction, [ + senderKeypair, +]); +``` + +The `sendAndConfirmTransaction()` function takes the following parameters: + +- a cluster connection +- a transaction +- an array of keypairs that will act as signers on the transaction - in this + example, we only have one signer: the sender. + +### Transactions have fees + +Transaction fees are built into the Solana economy as compensation to the +validator network for the CPU and GPU resources required in processing +transactions. Solana transaction fees are deterministic. + +The first signer included in the array of signers on a transaction is +responsible for paying the transaction fee. If this signer does not have enough +SOL in their account to cover the transaction fee, the transaction will be +dropped with an error like: + +``` +> Transaction simulation failed: Attempt to debit an account but found no record of a prior credit. +``` + +If you get this error, it's because your keypair is brand new and doesn't have +any SOL to cover the transaction fees. Let's fix this by adding the following +lines just after we've set up the connection: + +```typescript +await airdropIfRequired( + connection, + keypair.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.5 * LAMPORTS_PER_SOL, +); +``` + +This will deposit 1 SOL into your account which you can use for testing. This +won't work on Mainnet where it would have value. But it's incredibly convenient +for testing locally and on Devnet. + +You can also use the Solana CLI command `solana airdrop 1` to get free test SOL +in your account when testing, whether locally or on devnet. + +### Solana Explorer + +![Solana Explorer set to Devnet](/assets/courses/unboxed/solana-explorer-devnet.png) + +All transactions on the blockchain are publicly viewable on the +[Solana Explorer](http://explorer.solana.com). For example, you could take the +signature returned by `sendAndConfirmTransaction()` in the example above, search +for that signature in the Solana Explorer, then see: + +- when it occurred +- which block it was included in +- the transaction fee +- and more! + +![Solana Explorer with details about a transaction](/assets/courses/unboxed/solana-explorer-transaction-overview.png) + +## Lab + +We're going to create a script to send SOL to other students. + +### Basic scaffolding + +We'll start by using the same packages and `.env` file we made earlier in +[Intro to Cryptography](/developers/courses/intro-to-solana/intro-to-cryptography). + +Create a file called `transfer.ts`: + +```typescript +import { + Connection, + Transaction, + SystemProgram, + sendAndConfirmTransaction, + PublicKey, +} from "@solana/web3.js"; +import "dotenv/config"; +import { getKeypairFromEnvironment } from "@solana-developers/helpers"; + +const suppliedToPubkey = process.argv[2] || null; + +if (!suppliedToPubkey) { + console.log(`Please provide a public key to send to`); + process.exit(1); +} + +const senderKeypair = getKeypairFromEnvironment("SECRET_KEY"); + +console.log(`suppliedToPubkey: ${suppliedToPubkey}`); + +const toPubkey = new PublicKey(suppliedToPubkey); + +const connection = new Connection("https://api.devnet.solana.com", "confirmed"); + +console.log( + `✅ Loaded our own keypair, the destination public key, and connected to Solana`, +); +``` + +Run the script to ensure it connects, loads your keypair, and loads: + +```bash +npx esrun transfer.ts (destination wallet address) +``` + +### Create the transaction and run it + +Add the following to complete the transaction and send it: + +```typescript +console.log( + `✅ Loaded our own keypair, the destination public key, and connected to Solana`, +); + +const transaction = new Transaction(); + +const LAMPORTS_TO_SEND = 5000; + +const sendSolInstruction = SystemProgram.transfer({ + fromPubkey: senderKeypair.publicKey, + toPubkey, + lamports: LAMPORTS_TO_SEND, +}); + +transaction.add(sendSolInstruction); + +const signature = await sendAndConfirmTransaction(connection, transaction, [ + senderKeypair, +]); + +console.log( + `💸 Finished! Sent ${LAMPORTS_TO_SEND} to the address ${toPubkey}. `, +); +console.log(`Transaction signature is ${signature}!`); +``` + +### Experiment + +Send SOL to other students in the class. + +```bash +npx esrun transfer.ts (destination wallet address) +``` + +## Challenge + +Answer the following questions: + +- How much SOL did the transfer take? What is this in USD? + +- Can you find your transaction on https://explorer.solana.com? Remember we are + using the `devnet` network. + +- How long does the transfer take? + +- What do you think "confirmed" means? + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=dda6b8de-9ed8-4ed2-b1a5-29d7a8a8b415)! + + diff --git a/content/courses/intro-to-solana/meta.json b/content/courses/intro-to-solana/meta.json new file mode 100644 index 000000000..8f95e0359 --- /dev/null +++ b/content/courses/intro-to-solana/meta.json @@ -0,0 +1,10 @@ +{ + "pages": [ + "getting-started", + "intro-to-cryptography", + "intro-to-reading-data", + "intro-to-writing-data", + "intro-to-custom-onchain-programs", + "interact-with-wallets" + ] +} diff --git a/content/courses/meta.json b/content/courses/meta.json new file mode 100644 index 000000000..a6a71ac21 --- /dev/null +++ b/content/courses/meta.json @@ -0,0 +1,16 @@ +{ + "pages": [ + "intro-to-solana", + "tokens-and-nfts", + "onchain-development", + "connecting-to-offchain-data", + "token-extensions", + "native-onchain-development", + "program-optimization", + "state-compression", + "program-security", + "mobile", + "offline-transactions", + "solana-pay" + ] +} diff --git a/content/courses/mobile/index.mdx b/content/courses/mobile/index.mdx new file mode 100644 index 000000000..6341f7bd3 --- /dev/null +++ b/content/courses/mobile/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Solana Mobile Development +description: Use Solana features from native mobile apps for both Android and iOS. +--- diff --git a/content/courses/mobile/intro-to-solana-mobile.mdx b/content/courses/mobile/intro-to-solana-mobile.mdx new file mode 100644 index 000000000..29ed0353d --- /dev/null +++ b/content/courses/mobile/intro-to-solana-mobile.mdx @@ -0,0 +1,1232 @@ +--- +title: Introduction to Solana Mobile +objectives: + - Explain the benefits of creating mobile-first App experiences + - Explain the high-level Mobile Wallet Adapter (MWA) flow + - Explain the high-level differences between React and React Native + - Create a simple Android Solana App using React Native +description: + "Learn how to build native mobile apps using blockchain functionality" +--- + +## Summary + +- The **Solana Mobile Wallet Adapter** (**MWA**) allows mobile apps to submit + transactions for signing via a WebSocket connection to mobile wallets. +- The easiest way to start building Solana mobile applications is by using + Solana Mobile’s + [React Native packages](https://docs.solanamobile.com/react-native/setup) - + `@solana-mobile/mobile-wallet-adapter-protocol` and + `@solana-mobile/mobile-wallet-adapter-protocol-web3js` + +## Lesson Overview + +In these lessons, we will develop mobile apps that interact with the Solana +network, this opens up a whole new paradigm of blockchain use cases and +behaviors. The **Solana Mobile Stack** (**SMS**) is designed to help developers +seamlessly create mobile apps. It includes the +[Mobile Wallet Adapter (MWA)](https://docs.solanamobile.com/getting-started/overview#mobile-wallet-adapter) +, a Solana Mobile SDK that uses React Native, +[Seed Vault](https://docs.solanamobile.com/getting-started/overview#seed-vault), +and the +[Solana app Store](https://docs.solanamobile.com/getting-started/overview#solana-app-store). +These resources simplify mobile development with a similar experience but with +mobile-specific features. + +This lesson focuses on using React Native to create a simple Android app that +integrates with the Solana network. If you're not familiar with programming in +React or Solana, we recommend starting with our +[Intro to Solana lesson](https://github.com/solana-foundation/developer-content/tree/main/content/courses/intro-to-solana) +and returning when you're ready. If you are, let's dive in! + +## Intro to Solana Mobile + +Native mobile wallets hold your secret keys and use them to sign and send +transactions just like web extension wallets. However native mobile wallets use +the +[Mobile Wallet Adapter](https://github.com/solana-mobile/mobile-wallet-adapter) +(MWA) standard instead of the +[Wallet Adapter](https://github.com/anza-xyz/wallet-adapter) to ensure any apps +can work with any wallet. + +We will dig into the specifics of the MWA in a +[later lesson](/developers/courses/mobile/mwa-deep-dive), but it effectively +opens a WebSocket between applications to facilitate communication. That way a +separate app can provide the wallet app with the transaction to be signed and +sent, and the wallet app can respond with appropriate status updates. + +### Mobile Use Cases with Solana + +Before development, it is important to understand the current landscape of Web3 +mobile development to foresee potential blockers and opportunities. Here are a +few examples of what Solana mobile development can unlock: + +**Mobile Banking and Trading (DeFi)** + +Most traditional banking right now happens on on native mobile apps. With SMS, +you can now bank and trade using native mobile apps with your own wallet, where +you hold your own keys. + +**Mobile Gaming with Solana Micropayments** + +Mobile games account for roughly 50% of the video game industry's total value, +largely due to small in-game purchases. However, payment processing fees usually +mean these in-game purchases have a minimum of $0.99 USD. With Solana, it's +possible to unlock true micropayments. Need an extra life? That'll be 0.0001 +SOL. + +**Mobile E-Commerce** + +SMS can enable a new wave of mobile e-commerce shoppers to pay directly from +their favorite Solana wallet. Imagine a world where you can use your Solana +wallet as seamlessly as you can use Apple Pay. + +In summary, mobile blockchain transactions can open many opportunities. Let's +start building! + +### Supported Operating Systems + +Currently, the MWA only supports Android. On Android, a WebSocket connection can +persist between apps, even when the wallet app is in the background. + +On iOS, the OS quickly suspends websocket connections when an app is +backgrounded, so the standard +[Wallet Adapter](https://github.com/solana-labs/wallet-adapter) library is used +instead. + +The remainder of this lesson will focus on developing Android apps with the MWA. + +### Supported Frameworks + +Solana Mobile supports a number of different frameworks. Officially supported +are React Native and native Android, with community SDKs for Flutter, Unity, and +Unreal Engine. + +**Solana SDKs:** + +- [React Native](https://docs.solanamobile.com/react-native/quickstart) ( + Regular and Expo ) +- [Android](https://docs.solanamobile.com/android-native/quickstart) + +**Community SDKs:** + +- [Flutter](https://docs.solanamobile.com/flutter/overview) +- [Unity](https://docs.solanamobile.com/unity/unity_sdk) +- [Unreal Engine](https://docs.solanamobile.com/unreal/unreal_sdk) + +To keep the development experience as close as possible to other lessons, we +will be working exclusively with React Native. + +## From React to React Native + +React Native is very similar to React but designed for mobile. Here are some key +points to note: + +- React Native compiles down to native Android and iOS apps while React compiles + down to a collection of web pages. +- Instead of using web elements like `
`, you will use mobile-native + elements like ``. +- React Native allows access to mobile hardware, such as the camera and + accelerometer, which React web apps cannot access. +- Many standard React and Node packages may not be compatible with React Native + and setting up React Native can be challenging. Fortunately, the + [React Native Docs](https://reactnative.dev/docs/environment-setup?guide=native) + contains everything you may need. +- For development, you will need to set up + [Android Studio](https://developer.android.com/studio/intro/) for Android apps + and an emulator or physical device for testing. + + + +**NOTE:** There is a learning curve, but if you know React you're not nearly as far from being able to develop mobile apps as you think! It may feel jarring to start, but after a few hours of React Native development, you will start to feel much more comfortable. We have included a [Lab](#lab) section below to help you. + + + +## Creating a React Native App on Solana + +Solana React Native apps are virtually identical to React apps. The primary +difference is in the wallet interaction. Instead of the wallet being available +in the browser, your app will create an MWA session with the wallet app of your +choosing using a WebSocket. Fortunately, this is abstracted for you in the MWA +library. The only difference is that anytime you need to make a call to the +wallet, the `transact` function will be used, more details on this function in +later parts of this lesson. + +![App Flow](/assets/courses/unboxed/basic-solana-mobile-flow.png) + +## Reading Data + +Reading data from a Solana cluster in React Native works the same way as in +React. You can use the `useConnection` hook to access the `connection` object, +which is responsible for interacting with the Solana network. + +In Solana, an account refers to any object stored on-chain, and is typically +referenced by a +[public key](/docs/terminology#public-key-pubkey). + +Here’s an example of how you can read an account information using the +`getAccountInfo` method: + +```javascript +const { connection } = useConnection(); +const publicKey = new PublicKey("your-wallet-public-key-here"); // Replace with a valid public key +const account = await connection.getAccountInfo(publicKey); +``` + +> **NOTE:** If you need a refresher, refer to our +> [Intro to Reading Data lesson](/developers/courses/intro-to-solana/intro-to-reading-data). + +## Connecting to a Wallet + +When writing data to the blockchain, it must be done through a **transaction**. +Transactions need to be signed by one or more secret keys and sent to an +[RPC provider](https://academy.subquery.network/subquery_network/node_operators/rpc_providers/introduction.html) +for processing. In almost all cases, this interaction is facilitated through a +wallet application. + +### Web vs. Mobile Wallet Interactions + +The websocket that connects the app and the wallet is managed using the MWA, and +initiated using **Android intents**, with the dApp broadcasting its intent using +the `solana-wallet://` scheme. +![Connecting](/assets/courses/unboxed/basic-solana-mobile-connect.png) + +When the wallet app receives the intent broadcast, it opens a WebSocket +connection with the app that initiated the session. The app initiates this +connection using the `transact` function, as shown below: + +```tsx +transact(async (wallet: Web3MobileWallet) => { + // Your wallet action code goes here +}); +``` + +This function provides access to the `Web3MobileWallet` object, allowing you to +perform actions such as signing transactions or interacting with wallet data. +Remember, all wallet interactions must occur inside the callback of the +`transact` function. + +### Signing and sending transactions + +The overall flow for signing and sending a transaction is as follows: + +- Use the `transact` function to establish a session with the wallet. This + function takes an asynchronous callback: + `async (wallet: Web3MobileWallet) => {...}`. +- Inside the callback, request wallet authorization using `wallet.authorize()` + or `wallet.reauthorize()`, depending on the wallet's state (whether it has an + active session or requires reauthorization). +- Once the wallet is authorized, you can either: + - Sign the transaction using `wallet.signTransactions()`, or + - Sign and send the transaction directly using + `wallet.signAndSendTransactions()`. + +![Transacting](/assets/courses/unboxed/basic-solana-mobile-transact.png) +To manage the wallet's authorization state, consider creating a +`useAuthorization()` hook. This hook can streamline the process of handling +authorization within your app, especially if you have multiple interactions with +the wallet. + +> We will explore the use of this hook and practice managing the wallet's state +> in more detail during the lab exercises. + +Here is an example of sending a transaction using MWA: + +```tsx +//import required dependencies if any + +const { authorizeSession } = useAuthorization(); +const { connection } = useConnection(); + +const sendTransactions = async (transaction: Transaction) => { + try { + // Start a session with the wallet + await transact(async (wallet: Web3MobileWallet) => { + // Get the latest blockhash for the transaction + const { blockhash, lastValidBlockHeight } = + await connection.getLatestBlockhash(); + + // Authorize the wallet session + const authResult = await authorizeSession(wallet); + + // Create an updated transaction with the latest blockhash and feePayer + const updatedTransaction = new Transaction({ + recentBlockhash: blockhash, + feePayer: authResult.publicKey, + }).add(transaction); + + // Sign and send the transaction via the wallet + const signatures = await wallet.signAndSendTransactions({ + transactions: [updatedTransaction], + }); + + console.log(`Transaction successful! Signature: ${signatures[0]}`); + }); + } catch (error) { + console.error("Error sending transaction:", error); + throw new Error("Transaction failed"); + } +}; +``` + +## Debugging + +Debugging can be challenging when working with Solana mobile transactions, as +two separate applications are involved: your app and the mobile wallet. Unlike +typical single-application setups, you won't have direct access to the wallet’s +logs, which makes tracking issues more complex. + +However, Android Studio’s +[Logcat](https://developer.android.com/studio/debug/logcat) provides a useful +solution - enabling you to view logs from all applications running on your +device including the wallet. By leveraging Logcat, you can monitor the +interaction between your app and the wallet, helping you identify any issues +that arise during transaction signing and submission. + +If Logcat is not your preferred tool, an alternative approach is to use the +wallet solely for signing transactions, while handling the actual transaction +submission in your app’s code. This method allows for greater control over +debugging, as you can inspect the transaction flow more thoroughly on the client +side. + +## Deploying for Solana Mobile + +Deploying mobile applications can be challenging, and the complexity increases +when dealing with blockchain-based apps. Two primary factors contribute to this +difficulty: customer safety and financial incentives. + +### Customer Safety and Regulatory Uncertainty: + +Most mobile app marketplaces, such as the Apple App Store and Google Play Store, +have policies that restrict blockchain-related apps. Since blockchain is still a +relatively new and evolving technology, platforms are cautious about regulatory +compliance. They often adopt strict guidelines to protect users from potential +risks associated with blockchain apps. + +### In-App Purchases and Platform Fees: + +Another significant challenge arises when using blockchain transactions for +in-app purchases. Many platforms impose a transaction fee on purchases made +within their apps (ranging from 15% to 30%). Payment via the blockchain is often +seen as a way to bypass these fees, which is explicitly prohibited by most app +stores. These platforms prioritize protecting their revenue streams and +therefore enforce strict policies against apps that facilitate blockchain +payments for in-app purchases. + +> While traditional app stores impose strict policies around blockchain +> transactions to protect their revenue and comply with regulations, alternative +> distribution methods like the Solana app Store offers developers a more +> flexible platform for deploying Solana-based mobile applications. This +> decentralized approach bypasses many of the restrictions seen in centralized +> app marketplaces, allowing apps to thrive in a more blockchain-friendly +> ecosystem. + +## Conclusion + +Getting started with Solana mobile development is more accessible than ever, +thanks to the Solana Mobile Stack (SMS). Although React Native introduces some +differences compared to React, much of the code you will write remains familiar, +particularly when it comes to structuring the UI and handling state. The main +distinction lies in how you interact with wallets, which requires using the +`transact` callback to establish wallet sessions, sign transactions, and +communicate with Solana’s blockchain. + +As you continue building Solana mobile apps, it's essential to keep learning and +refining your skills. Be sure to explore additional resources like: + +- [The official Solana Developer Docs](/docs) for in-depth + guides on Solana’s core libraries and best practices. + +- [Solana Stack Exchange](https://solana.stackexchange.com/) forum for + troubleshooting, sharing insights, and staying updated on the latest ecosystem + changes. + +Mastering mobile Solana development will open up new opportunities in +decentralized finance (DeFi), gaming, and e-commerce, allowing you to build +cutting-edge applications with a seamless user experience. Stay curious and +experiment with different tools to push the boundaries of what you can achieve +with mobile apps. Let's put our knowledge to test by building a counting app +with React Native for Android OS! + +## Lab: Building a Mobile Counter app with React Native + +This app will display a counter and allow users to make increments via a +transaction on the Solana blockchain. The app will also connect to a wallet for +signing transactions. + +We will use the **Anchor framework** to interact with the on-chain counter +program. The client side has already been developed in one of our previous +lessons called +[Intro to client-side Anchor development](/developers/courses/onchain-development/intro-to-anchor-frontend), +feel free to check out its code for more context. + +To ensure you fully understand the core concepts, we will write this application +in vanilla React Native without a starting template. While Solana Mobile offers +templates that handle some boilerplate, building from scratch provides a much +deeper understanding. + +### Getting Started + +To get started, you will need to properly set up a React Native development +environment if you didn't already. This +[article](https://reactnative.dev/docs/set-up-your-environment) shows you how. +Remember that this step is not required if you are using a +[Framework](https://reactnative.dev/architecture/glossary#react-native-framework). + +Ensure you have [Node.js](https://nodejs.org/en/download) installed on your +system. These will manage your JavaScript packages. Install Android Studio: + +Android Studio is required to run the Android emulator and to compile your React +Native app for Android devices. Configure the ANDROID_HOME Environment Variable: + +> **NOTE:** You will need to configure the `ANDROID_HOME` environment variable +> so that your terminal can recognize Android’s SDK tools. This step is critical +> for running and building your app on Android. + +## Project Setup + +Create a Sample Project for the Emulator Setup to ensure your Android +environment is set up correctly. In your terminal, run the code below within +your preferred directory to scaffold a new React Native project, where +`SampleProject` is your preferred project name. You can open the project in +Android Studio and ensure it runs correctly on the Android emulator. + +```bash + npx react-native init SampleProject --npm +``` + +### Cloning and Running MWA + +1. Clone the repo in `SampleProject` + + ```bash + git clone https://github.com/solana-mobile/mobile-wallet-adapter.git + ``` + +2. In Android Studio, _Open project > Navigate to the cloned directory > Select + mobile-wallet-adapter/android_ +3. After Android Studio finishes loading the project, select `fakewallet` in + the build/run configuration dropdown in the top right + + ![Fake Wallet](/assets/courses/unboxed/basic-solana-mobile-fake-wallet.png) + +4. For easier debugging, use **Logcat**. Check the + [Logcat installation guide](https://developer.android.com/studio/debug/logcat) + if you are interested. +5. Now that your fake wallet is running on the emulator, go to _View -> Tool + Windows -> Logcat_. This will open up a console logging out what’s happening + with fake wallet. + +6. (Optional) Install other + [Solana wallets](https://play.google.com/store/search?q=solana%20wallet&c=apps) + on the Google Play store. + +Lastly, we recommend installing _java version 11_ to avoid dependency errors. To +know what version you have installed, run `java --version` in your terminal. + +### 1. Plan out the App Structure + +Before we do any coding, let's conceptualize the outline of the app. Again, this +app will connect to and interact with the counter program we've already deployed +to Devnet. To do this, we'll need the following: + +- A `Connection` object to interact with Solana (`ConnectionProvider.tsx`) +- Access to our counter program (`ProgramProvider.tsx`) +- Authorization for a wallet to sign and send requests (`AuthProvider.tsx`) +- Text to display our counter value (`CounterView.tsx`) +- A button to press to increment our count (`CounterButton.tsx`) + +There will be more files and considerations, but these are the most important +files we'll be creating and working with. + +### 2. Create the App + +Now that we've got some of the basic setup and structure down, let's scaffold a +new app with the following command: + +```bash +npx react-native@latest init counter --npm +``` + +This scaffolds a new React Native project for us called `counter`. + +Let's make sure everything is set up properly by starting the default app and +running it on our Android emulator. + +```bash +cd counter +npm run android +``` + +This should open and run the app in your Android emulator. If you run into +problems, check to make sure you’ve accomplished everything in the +[_Getting Started_](#getting-started) section. + +### 3. Install Dependencies + +We will need to import our Solana dependencies. +[The Solana Mobile docs provide a nice list of packages](https://docs.solanamobile.com/react-native/setup) +and explanations for why we need them: + +- `@solana-mobile/mobile-wallet-adapter-protocol`: A React Native/Javascript API + enabling interaction with MWA-compatible wallets +- `@solana-mobile/mobile-wallet-adapter-protocol-web3js`: A convenience wrapper + to use common primitives from + [@solana/web3.js](https://github.com/solana-labs/solana-web3.js), such as + `Transaction` and `Uint8Array` +- `@solana/web3.js`: Solana Web Library for interacting with the Solana network + through th + [JSON RPC API](https://github.com/solana-foundation/developer-content/blob/main/docs/rpc/http/index.mdx) +- `@react-native-get-random-values` Secure random number generator polyfill for +- `web3.js` underlying library on React Native +- `buffer`: Buffer polyfill; also needed for `web3.js` on React Native + +In addition to this list, we will add two more packages: + +- `@coral-xyz/anchor`: The Anchor TS client. +- `assert`: A polyfill that lets Anchor do its thing. +- `text-encoding-polyfill`: A polyfill needed to create the `Program` object + +If you’re not familiar: polyfills provide Node-native libraries to make them +work anywhere Node is not running. We will finish our polyfill setup shortly. +For now, install dependencies using the following command: + +```bash +npm install \ + @solana/web3.js \ + @solana-mobile/mobile-wallet-adapter-protocol-web3js \ + @solana-mobile/mobile-wallet-adapter-protocol \ + react-native-get-random-values \ + buffer \ + @coral-xyz/anchor \ + assert \ + text-encoding-polyfill +``` + +### 4. Create ConnectionProvider.tsx file + +Let's start adding our Solana functionality. Create a new folder called +`components` and within it, a file called `ConnectionProvider.tsx`. This +provider will wrap the entire application and make our `Connection` object +available throughout. Hopefully, you're noticing a pattern: this is identical to +the React patterns we've used throughout the course. + +```tsx +import { Connection, ConnectionConfig } from "@solana/web3.js"; +import React, { ReactNode, createContext, useContext, useMemo } from "react"; + +export interface ConnectionProviderProps { + children: ReactNode; + endpoint: string; + config?: ConnectionConfig; +} + +export interface ConnectionContextState { + connection: Connection; +} + +const ConnectionContext = createContext( + {} as ConnectionContextState, +); + +export function ConnectionProvider({ + children, + endpoint, + config = { commitment: "confirmed" }, +}: ConnectionProviderProps) { + const connection = useMemo( + () => new Connection(endpoint, config), + [config, endpoint], + ); + + return ( + + {children} + + ); +} + +export const useConnection = (): ConnectionContextState => + useContext(ConnectionContext); +``` + +### 5. Create AuthProvider.tsx file + +The next Solana provision we will need is the **auth provider**. This is one of +the main differences between mobile and web development. What we’re implementing +here is roughly equivalent to the `WalletProvider` that we’re used to in web +apps. However, since we're using Android and its natively installed wallets, the +flow to connect and utilize them is a bit different. Most notably, we need to +follow the MWA protocol. + +We do this by providing the following in our `AuthProvider`: + +- `accounts`: If the user has multiple wallets, different accounts are + maintained in this array of Accounts. +- `selectedAccount`: The current selected account for the transaction. +- `authorizeSession(wallet)`: Authorizes (or reauthorizes, if token is expired) + the `wallet` for the user and returns an account which will act as the + selected account for the session. The `wallet` variable is from the callback + of the `transact` function you call independently anytime you want to interact + with a wallet. +- `deauthorizeSession(wallet)`: Deauthorizes the `wallet`. +- `onChangeAccount`: Acts as a handler when `selectedAccount` is changed. + +We are also going to throw in some utility methods: + +- `getPublicKeyFromAddress(base64Address)`: Creates a new Public Key object from + the Base64 address given from the `wallet` object +- `getAuthorizationFromAuthResult`: Handles the authorization result, extracts + relevant data from the result, and returns the `Authorization` context object + +We will expose all of this through a `useAuthorization` hook. + +Since this provider is the same across all apps, we are going to give you the +full implementation that you can copy and paste. We will dig into the details of +MWA in a future lesson. + +Create the file `AuthProvider.tsx` in the `components` folder and paste in the +following: + +```tsx +import { Cluster, PublicKey } from "@solana/web3.js"; +import { + Account as AuthorizedAccount, + AuthorizationResult, + AuthorizeAPI, + AuthToken, + Base64EncodedAddress, + DeauthorizeAPI, + ReauthorizeAPI, +} from "@solana-mobile/mobile-wallet-adapter-protocol"; +import { toUint8Array } from "js-base64"; +import { useState, useCallback, useMemo, ReactNode } from "react"; +import React from "react"; + +const AuthUtils = { + getAuthorizationFromAuthResult: ( + authResult: AuthorizationResult, + previousAccount?: Account, + ): Authorization => { + const selectedAccount = + previousAccount === undefined || + !authResult.accounts.some( + ({ address }) => address === previousAccount.address, + ) + ? AuthUtils.getAccountFromAuthorizedAccount(authResult.accounts[0]) + : previousAccount; + + return { + accounts: authResult.accounts.map( + AuthUtils.getAccountFromAuthorizedAccount, + ), + authToken: authResult.auth_token, + selectedAccount, + }; + }, + + getAccountFromAuthorizedAccount: ( + authAccount: AuthorizedAccount, + ): Account => ({ + ...authAccount, + publicKey: new PublicKey(toUint8Array(authAccount.address)), + }), +}; + +type Account = Readonly<{ + address: Base64EncodedAddress; + label?: string; + publicKey: PublicKey; +}>; + +type Authorization = Readonly<{ + accounts: Account[]; + authToken: AuthToken; + selectedAccount: Account; +}>; + +const APP_IDENTITY = { + name: "Solana Counter Incrementor", +}; + +type AuthorizationProviderContext = { + accounts: Account[] | null; + authorizeSession: (wallet: AuthorizeAPI & ReauthorizeAPI) => Promise; + deauthorizeSession: (wallet: DeauthorizeAPI) => void; + onChangeAccount: (nextSelectedAccount: Account) => void; + selectedAccount: Account | null; +}; + +const AuthorizationContext = React.createContext({ + accounts: null, + authorizeSession: () => { + throw new Error("Provider not initialized"); + }, + deauthorizeSession: () => { + throw new Error("Provider not initialized"); + }, + onChangeAccount: () => { + throw new Error("Provider not initialized"); + }, + selectedAccount: null, +}); + +type AuthProviderProps = { + children: ReactNode; + cluster: Cluster; +}; + +function AuthorizationProvider({ children, cluster }: AuthProviderProps) { + const [authorization, setAuthorization] = useState( + null, + ); + + const handleAuthorizationResult = useCallback( + async (authResult: AuthorizationResult): Promise => { + const nextAuthorization = AuthUtils.getAuthorizationFromAuthResult( + authResult, + authorization?.selectedAccount, + ); + setAuthorization(nextAuthorization); + return nextAuthorization; + }, + [authorization], + ); + + const authorizeSession = useCallback( + async (wallet: AuthorizeAPI & ReauthorizeAPI) => { + const authorizationResult = authorization + ? await wallet.reauthorize({ + auth_token: authorization.authToken, + identity: APP_IDENTITY, + }) + : await wallet.authorize({ cluster, identity: APP_IDENTITY }); + return (await handleAuthorizationResult(authorizationResult)) + .selectedAccount; + }, + [authorization, cluster, handleAuthorizationResult], + ); + + const deauthorizeSession = useCallback( + async (wallet: DeauthorizeAPI) => { + if (authorization?.authToken) { + await wallet.deauthorize({ auth_token: authorization.authToken }); + setAuthorization(null); + } + }, + [authorization], + ); + + const onChangeAccount = useCallback((nextAccount: Account) => { + setAuthorization(currentAuthorization => { + if ( + currentAuthorization?.accounts.some( + ({ address }) => address === nextAccount.address, + ) + ) { + return { ...currentAuthorization, selectedAccount: nextAccount }; + } + throw new Error(`${nextAccount.address} is no longer authorized`); + }); + }, []); + + const value = useMemo( + () => ({ + accounts: authorization?.accounts ?? null, + authorizeSession, + deauthorizeSession, + onChangeAccount, + selectedAccount: authorization?.selectedAccount ?? null, + }), + [authorization, authorizeSession, deauthorizeSession, onChangeAccount], + ); + + return ( + + {children} + + ); +} + +const useAuthorization = () => React.useContext(AuthorizationContext); + +export { + AuthorizationProvider, + useAuthorization, + type Account, + type AuthProviderProps, + type AuthorizationProviderContext, +}; +``` + +### 6. Create ProgramProvider.tsx file + +The last provider we need is our program provider. This will expose the counter +program we want to interact with. + +Since we are using the Anchor TS client to interact with our program, we need +the program's IDL. Start by creating a root-level folder called `models`, then +create a new file `anchor-counter.ts`. Paste the contents of the Anchor Counter +IDL into this new file. + +Next, create the file `ProgramProvider.tsx` inside of components. Inside we will +create the program provider to surface our program and the counter PDA: + +```tsx +import { + AnchorProvider, + IdlAccounts, + Program, + setProvider, +} from "@coral-xyz/anchor"; +import { Keypair, PublicKey } from "@solana/web3.js"; +import { AnchorCounter, IDL } from "../models/anchor-counter"; +import React, { + ReactNode, + createContext, + useCallback, + useContext, + useEffect, + useMemo, + useState, +} from "react"; +import { useConnection } from "./ConnectionProvider"; + +export type CounterAccount = IdlAccounts["counter"]; + +export type ProgramContextType = { + program: Program | null; + counterAddress: PublicKey | null; +}; + +export const ProgramContext = createContext({ + program: null, + counterAddress: null, +}); + +export type ProgramProviderProps = { + children: ReactNode; +}; + +export function ProgramProvider({ children }: ProgramProviderProps) { + const { connection } = useConnection(); + const [program, setProgram] = useState | null>(null); + const [counterAddress, setCounterAddress] = useState(null); + + const setup = useCallback(async () => { + const programId = new PublicKey( + "ALeaCzuJpZpoCgTxMjJbNjREVqSwuvYFRZUfc151AKHU", + ); + + // MockWallet is a placeholder wallet used for initializing the AnchorProvider. + // In a mobile app, we don't need a real wallet here because the actual signing + // will be done by the user's mobile wallet app. This mock wallet allows us to + // set up the provider without a real wallet instance. + + const MockWallet = { + signTransaction: () => Promise.reject(), + signAllTransactions: () => Promise.reject(), + publicKey: Keypair.generate().publicKey, + }; + + const provider = new AnchorProvider(connection, MockWallet, {}); + setProvider(provider); + + const programInstance = new Program( + IDL, + programId, + provider, + ); + + const [counterProgramAddress] = PublicKey.findProgramAddressSync( + [Buffer.from("counter")], + programId, + ); + + setProgram(programInstance); + setCounterAddress(counterProgramAddress); + }, [connection]); + + useEffect(() => { + setup(); + }, [setup]); + + const value: ProgramContextType = useMemo( + () => ({ + program, + counterAddress, + }), + [program, counterAddress], + ); + + return ( + {children} + ); +} + +export const useProgram = () => useContext(ProgramContext); +``` + +### 7. Modify App.tsx file + +Now that we have all our providers, let's wrap our app with them. We're going to +re-write the default `App.tsx` with the following changes: + +- Import our providers and add in our polyfills +- Wrap the app first with `ConnectionProvider`, then `AuthorizationProvider`, + and finally `ProgramProvider` +- Pass in our Devnet endpoint to the `ConnectionProvider` +- Pass our cluster to the `AuthorizationProvider` +- Replace the default internal `` with ``, a screen we'll + build in the next step + +```tsx +// Polyfills at the top +import "text-encoding-polyfill"; +import "react-native-get-random-values"; +import { Buffer } from "buffer"; +global.Buffer = Buffer; + +import { clusterApiUrl } from "@solana/web3.js"; +import { ConnectionProvider } from "./components/ConnectionProvider"; +import { AuthorizationProvider } from "./components/AuthProvider"; +import { ProgramProvider } from "./components/ProgramProvider"; +import { MainScreen } from "./screens/MainScreen"; // Going to make this +import React from "react"; + +export default function App() { + const cluster = "devnet"; + const endpoint = clusterApiUrl(cluster); + + return ( + // ConnectionProvider: Manages the connection to the Solana network + + // AuthorizationProvider: Handles wallet authorization + + // ProgramProvider: Provides access to the Solana program + + + + + + ); +} +``` + +### 8. Create MainScreen.tsx file + +Now, let's put everything together to create our UI. Create a new folder called +`screens` and a new file called `MainScreen.tsx` inside of it. In this file, we +are only structuring the screen to display two yet-to-be-created components: +`CounterView` and `CounterButton`. + +Additionally, in this file, we're introducing React Native's `StyleSheet`. This +is another difference from regular React. Don't worry, it behaves very similarly +to CSS. + +In `screens/MainScreen.tsx` paste the following: + +```tsx +import React from "react"; +import { StatusBar, StyleSheet, View } from "react-native"; +import { CounterView } from "../components/CounterView"; +import { CounterButton } from "../components/CounterButton"; + +export function MainScreen() { + return ( + + + + + + + + + + ); +} + +const styles = StyleSheet.create({ + container: { + height: "100%", + width: "100%", + backgroundColor: "lightgray", + }, + incrementButtonContainer: { + position: "absolute", + right: "5%", + bottom: "3%", + }, + counterContainer: { + alignContent: "center", + alignItems: "center", + justifyContent: "center", + }, +}); +``` + +### 9. Create CounterView.tsx file + +The `CounterView` is the first of our two program-specific files. +`CounterView`'s only job is to fetch and listen for updates on our `Counter` +account. Since we're only listening here, we don't have to do anything +MWA-related. It should look identical to a web application. We'll use our +`Connection` object to listen for the `programAddress` specified in +`ProgramProvider.tsx`. When the account is changed, we update the UI. + +In `components/CounterView.tsx` paste the following: + +```tsx +import { View, Text, StyleSheet } from "react-native"; +import { useConnection } from "./ConnectionProvider"; +import { useProgram, CounterAccount } from "./ProgramProvider"; +import { useEffect, useState } from "react"; +import { AccountInfo } from "@solana/web3.js"; +import React from "react"; + +const counterStyle = StyleSheet.create({ + counter: { + fontSize: 48, + fontWeight: "bold", + color: "black", + textAlign: "center", + }, +}); + +export function CounterView() { + const { connection } = useConnection(); + const { program, counterAddress } = useProgram(); + const [counter, setCounter] = useState(); + + // Fetch Counter Info + useEffect(() => { + if (!program || !counterAddress) return; + + program.account.counter.fetch(counterAddress).then(setCounter); + + const subscriptionId = connection.onAccountChange( + counterAddress, + (accountInfo: AccountInfo) => { + try { + const data = program.coder.accounts.decode( + "counter", + accountInfo.data, + ); + setCounter(data); + } catch (e) { + console.log("account decoding error: " + e); + } + }, + ); + + return () => { + connection.removeAccountChangeListener(subscriptionId); + }; + }, [program, counterAddress, connection]); + + if (!counter) return Loading...; + + return ( + + Current counter + {counter.count.toString()} + + ); +} +``` + +### 10. Create CounterButton.tsx file + +Finally, we have our last component, the `CounterButton`. This floating action +button will do the following in a new function `incrementCounter`: + +- Call `transact` to get access to a mobile wallet +- Authorize the session with `authorizeSession` from the `useAuthorization` hook +- Request a Devnet airdrop to fund the transaction if not enough Devnet SOL is + available +- Create an `increment` transaction +- Call `signAndSendTransactions` to have the wallet sign and send the + transaction + + + +The fake Solana wallet we use generates a new keypair every +time you restart the fake wallet app, requiring that we want to check for funds +and airdrop every time. This is for demo purposes only, you can't do this in +production. + + + +Create the file `CounterButton.tsx` and paste in the following: + +```tsx +import { + Alert, + Platform, + Pressable, + StyleSheet, + Text, + ToastAndroid, +} from "react-native"; +import { useAuthorization } from "./AuthProvider"; +import { useProgram } from "./ProgramProvider"; +import { useConnection } from "./ConnectionProvider"; +import { + transact, + Web3MobileWallet, +} from "@solana-mobile/mobile-wallet-adapter-protocol-web3js"; +import { LAMPORTS_PER_SOL, Transaction } from "@solana/web3.js"; +import { useState } from "react"; +import React from "react"; + +const floatingActionButtonStyle = StyleSheet.create({ + container: { + height: 64, + width: 64, + alignItems: "center", + borderRadius: 40, + justifyContent: "center", + elevation: 4, + marginBottom: 4, + backgroundColor: "blue", + }, + + text: { + fontSize: 24, + color: "white", + }, +}); + +export function CounterButton() { + const { authorizeSession } = useAuthorization(); + const { program, counterAddress } = useProgram(); + const { connection } = useConnection(); + const [isTransactionInProgress, setIsTransactionInProgress] = useState(false); + + const showToastOrAlert = (message: string) => { + if (Platform.OS === "android") { + ToastAndroid.show(message, ToastAndroid.SHORT); + } else { + Alert.alert(message); + } + }; + + const incrementCounter = () => { + if (!program || !counterAddress) return; + + if (!isTransactionInProgress) { + setIsTransactionInProgress(true); + + transact(async (wallet: Web3MobileWallet) => { + const authResult = await authorizeSession(wallet); + const latestBlockhashResult = await connection.getLatestBlockhash(); + + const ix = await program.methods + .increment() + .accounts({ counter: counterAddress, user: authResult.publicKey }) + .instruction(); + + const balance = await connection.getBalance(authResult.publicKey); + + console.log( + `Wallet ${authResult.publicKey} has a balance of ${balance}`, + ); + + // When on Devnet you may want to transfer SOL manually per session, due to Devnet's airdrop rate limit + const minBalance = LAMPORTS_PER_SOL / 1000; + + if (balance < minBalance) { + console.log( + `requesting airdrop for ${authResult.publicKey} on ${connection.rpcEndpoint}`, + ); + await connection.requestAirdrop(authResult.publicKey, minBalance * 2); + } + + const transaction = new Transaction({ + ...latestBlockhashResult, + feePayer: authResult.publicKey, + }).add(ix); + const signature = await wallet.signAndSendTransactions({ + transactions: [transaction], + }); + + showToastOrAlert(`Transaction successful! ${signature}`); + }) + .catch(e => { + console.log(e); + showToastOrAlert(`Error: ${JSON.stringify(e)}`); + }) + .finally(() => { + setIsTransactionInProgress(false); + }); + } + }; + + return ( + <> + + + + + + ); +} +``` + +### 11. Build and Run + +Now it's time to test that everything works! Build and run with the following +command: + +```bash +npm run android +``` + +This will open the app in your emulator, click the + button in the bottom right. +This will open up the "fake wallet". The "fake wallet" has various options to +assist in debugging. The image below outlines the buttons to tap to properly +test your app: + +![Counter App](/assets/courses/unboxed/basic-solana-mobile-counter-app.png) + +If you run into problems, here are some examples of what they could be and how +to fix them: + +- Application does not build → Exit Metro with _Ctrl+C_ and try again +- Nothing happens when you press the `CounterButton` → Make sure you have Solana + wallet installed ( like the fake wallet we installed in Prerequisites ) +- You get stuck in a forever loop while calling `increment` → This is likely due + to you reaching a Devnet airdrop rate limit. Take out the airdrop section in + `CounterButton` and manually send some Devnet SOL to your wallet's address + (printed in the console) + +That's it! You've made your first Solana Mobile app. If you get stuck, feel free +to check out the + +[full solution code](https://github.com/solana-developers/react-native-counter) +on the `main` branch of the repository. + +## Challenge + +Your next challenge is to expand the app by adding a `decrement` function. You +need to create another button that will call the `decrement` method on the +Solana program. The logic for the decrement function already exists in the +program’s **IDL** (**Interface Description Language**), so your task is to write +the client-side code that interacts with it. + +Once you've completed this, you can check your solution against the solution +code available on the + +[solution branch](https://github.com/solana-developers/react-native-counter). + + + +If you’ve successfully completed the lab, push your code to GitHub and share +your feedback on this lesson through this [form](https://form.typeform.com/to/IPH0UGz7#answers-lesson=c15928ce-8302-4437-9b1b-9aa1d65af864) + + diff --git a/content/courses/mobile/meta.json b/content/courses/mobile/meta.json new file mode 100644 index 000000000..423891597 --- /dev/null +++ b/content/courses/mobile/meta.json @@ -0,0 +1,7 @@ +{ + "pages": [ + "intro-to-solana-mobile", + "mwa-deep-dive", + "solana-mobile-dapps-with-expo" + ] +} diff --git a/content/courses/mobile/mwa-deep-dive.mdx b/content/courses/mobile/mwa-deep-dive.mdx new file mode 100644 index 000000000..3fd561a69 --- /dev/null +++ b/content/courses/mobile/mwa-deep-dive.mdx @@ -0,0 +1,1635 @@ +--- +title: Exploring Mobile Wallet Adapter +objectives: + - Describe the differences between connecting to a web wallet vs a mobile + wallet + - Connect to and sign transactions from a mobile wallet + - Create a simple mobile wallet + - Explain at a high level the interaction between `walletlib` and wallet apps +description: + "Initiate transactions on mobile wallets in your native mobile apps." +--- + +## Summary + +- Wallets are just wrappers around a keypair, but they're essential for secure + key management +- Mobile and Web dApps handle their wallet-app connection differently +- MWA handles all of its wallet interaction by wrapping all the wallet's + functionalities within the `transact` function for easier integration. +- Solana Mobile's `walletlib` does the heavy lifting for surfacing wallet + requests to wallet apps + +## Lesson + +Wallets exist to protect your secret keys. While some applications might have +app-specific keys, many blockchain use cases rely on a single identity used +across multiple apps. In these cases, you very much want to be careful about how +you expose signing across these apps. You don't want to share your secret key +with all of them, which means you need a standard for allowing apps to submit +transactions for signature to a secure wallet app that holds your secret key. +This is where the Mobile Wallet Adapter (MWA) comes in. It's the transport layer +to connect your mobile dApps to your wallet. + +### What is MWA + +Mobile Wallet Adapter (MWA) is the mobile connection between dApps and wallets. +Much like the [wallet adapter](https://github.com/solana-labs/wallet-adapter) +we're used to on the web, MWA allows us to create native mobile dApps. However, +since the web and mobile are different platforms, we have to approach the +app-wallet connection differently. + +At its core, a wallet app is fairly straightforward. It's a secure wrapper +around your keypair. External applications can request that the wallet sign +transactions without ever having access to your secret key. Both the web and +mobile wallet adapters define this interaction for their respective platforms. + +#### How does a web wallet work? + +A web wallet is simply a browser extension that stores keypairs and allows the +browser to request access to its functions. It's the wallet's job to follow the +[wallet standard](https://github.com/wallet-standard/wallet-standard), which +defines what functions should be available to the browser: + +- `registerWallet` +- `getWallets` +- `signAndSendTransaction` +- `signIn` +- `signTransaction` +- `signMessage` + +These functions are all available to the browser through the global `window` +object. The browser extension registers itself as a wallet. The wallet adapter +looks for these registered wallets and allows the client to connect and interact +with them. + +A browser extension wallet can run isolated JavaScript. This means it can inject +functions into the browser's `window` object. Effectively, the transport layer +here is just extra JavaScript code as far as the browser is concerned. + +If you're curious to know more about how browser extensions work, take a look at +some +[open-source browser extensions](https://github.com/solana-labs/browser-extension/tree/master). + +#### How MWA is different from web wallets + +Mobile Wallet Adapter (MWA) is different. In the web world, we just need to +inject some code into the `window` object to access our wallets. Mobile apps, +however, are sandboxed. This means that the code for each app is isolated from +other apps. There's no shared state between apps that would be analogous to a +browser's `window` object. This poses a problem for wallet signing since a +mobile wallet and a mobile dApp exist in isolated environments. + +However, there are ways to facilitate communication if you're willing to get +creative. On Android, basic inter-app communication is done through +[`Intents`](https://developer.android.com/guide/components/intents-filters). An +Android Intent is a messaging object used to request an action from another app +component. + +This particular communication is one-way, whereas the interface for wallet +functionality requires two-way communication. MWA gets around this by using an +intent from the requesting app to trigger the wallet app opening up two-way +communication using WebSockets. + +The rest of this lesson will focus on the MWA interface and functionality rather +than the low-level mechanisms underpinning inter-app communication. However, if +you want to know the nitty gritty, read the +[MWA specs](https://solana-mobile.github.io/mobile-wallet-adapter/spec/spec.html). + +### How to work with MWA + +The differences between MWA and the traditional wallet adapter require slight +modifications to how you program your apps. + +#### Connect to a wallet + +By way of comparison, look at the example of connecting to a wallet with React +vs with React Native. + +On the web, you wrap the application with `WalletProvider`, and then children +access the wallet through the `useWallet` hook. From there, children can view, +select, connect, and interact with wallets. + +```tsx +// Parent +{children}; + +// Child +const { wallets, select, connect } = useWallet(); +const wallet = wallets[0]; // choose a wallet +select(wallet); // select the wallet +connect(); // connect +``` + +In React Native, using MWA, this looks a little different. In this case, +providers aren't needed. Rather, wallet context is provided through the +`transact` function from the MWA package. Behind the scenes, this function +searches the devices for active Solana wallets. It surfaces these wallets to the +user through a partial selection modal. Once the user selects a wallet, that +wallet is provided as an argument to the `transact` callback. Your code can then +interact with the wallet directly. + +```tsx +transact(async (wallet: Web3MobileWallet) => { + // returns you the context of the user selected wallet +}); +``` + +#### Authorize a wallet + +On the web, the first time you connect a wallet to a site in your browser, the +wallet prompts you to authorize the site. Similarly, on mobile, the requesting +app needs to be authorized before it can request _privileged_ methods like +signing a transaction. + +Your code can trigger this authorization process by calling +`wallet.authorize()`. The user will be prompted to accept or reject the +authorization request. The returned `AuthorizationResult` will indicate the +user's acceptance or rejection. If accepted, this result object provides you +with the user's account as well as an `auth_token` you can use in +`wallet.reauthorize()` for subsequent calls. This auth token ensures that other +apps can't pretend to be your app. The auth token is generated during the +`authorize()` call, and subsequent requests from the dApp can use the +`reauthorize()` method with the stored token to maintain secure communication +without repeatedly prompting the user. + +```tsx +transact(async (wallet: Web3MobileWallet) => { + const authResult = wallet.authorize({ + cluster: "devnet", + identity: { name: "Solana Counter Incrementor" }, + }); // Authorizes the wallet + + const authToken = authResult.auth_token; // save this for the wallet.reauthorize() function + const publicKey = authResult.selectedAccount.publicKey; +}); +``` + +It's worth noting that all methods except `authorize` and `deauthorize` are +_privileged_ methods. So you'll want to track if a wallet is authorized or not +and call `wallet.reauthorize()` when it is. Below is a simple example that +tracks the authorization state: + +```tsx +const APP_IDENTITY = { name: "Solana Counter Incrementor" }; +const [auth, setAuth] = useState(null); + +transact(async (wallet: Web3MobileWallet) => { + let authResult; + + if (auth) { + authResult = wallet.reauthorize({ + auth_token: auth, + identity: APP_IDENTITY, + }); + } else { + authResult = wallet.authorize({ + cluster: "devnet", + identity: APP_IDENTITY, + }); + + setAuth(authResult.auth_token); + } + + const publicKey = authResult.selectedAccount.publicKey; +}); +``` + +Note that the above example does not handle errors or user rejections. In +production, it's a good idea to wrap the authorization state and methods with a +custom `useAuthorization` hook. For reference, we built this +[in the previous lesson](/developers/courses/mobile/intro-to-solana-mobile). + +#### Interact with a wallet + +Unlike connecting and authorizing wallets, requesting methods like +`signAndSendTransactions`, `signMessages`, and `signTransactions` is virtually +the same between web and mobile. + +On the web, you can access these methods with the `useWallet` hook. You just +have to make sure you're connected before calling them: + +```tsx +const { connected, signAllTransactions, signMessage, sendTransaction } = useWallet(); + +if ( connected ) { + signAllTransactions(...); + signMessage(...); + sendTransaction(...); +``` + +For MWA, simply call the functions on the `wallet` context provided by the +`transact` callback: + +```tsx +const APP_IDENTITY = {name: 'Solana Counter Incrementor'} +const [auth, setAuth] = useState(null) + +transact(async (wallet: Web3MobileWallet) => { + let authResult; + + if ( auth ) { + authResult = wallet.reauthorize({ + auth_token: auth, + identity: APP_IDENTITY, + }) + } else { + authResult = wallet.authorize( + { + cluster: "devnet", + identity: APP_IDENTITY + } + ); + setAuth(authResult.auth_token) + } + + const publicKey = authResult.selectedAccount.publicKey + + // Choose your interaction... + wallet.signAndSendTransactions(...) + wallet.signMessages(...) + wallet.signTransactions(...) +}); +``` + +Every time you want to call these methods, you will have to call +`wallet.authorize()` or `wallet.reauthorize()`. + +When invoking `wallet.signAndSendTransactions(...)`, it's essential to handle +transaction failures gracefully. Transactions can fail due to various reasons +such as network issues, signature mismatches, or insufficient funds. Proper +error handling ensures a smooth user experience, even when the transaction +process encounters issues: + +```tsx +transact(async (wallet: Web3MobileWallet) => { + try { + const result = await wallet.signAndSendTransactions(...); + // Handle success + } catch (error) { + console.error("Failed to sign and send transactions:", error); + // Implement error handling logic + } + }); +``` + +And that's it! You should have enough information to get started. The Solana +mobile team has put in a lot of work to make the development experience as +seamless as possible between the two. + +### What MWA is doing wallet-side + +This lesson has talked mostly about what MWA is doing in dApps, but a huge +portion of MWA functionality happens in wallets. Whether you want to create your +own wallet or simply understand the system better, it's worth discussing what +MWA-compatible wallets are doing at a high level. For most readers, it's not +essential to feel like you can create a wallet after reading through these +sections; simply try to get a sense of the overall flow. + +#### Introduction to the `walletlib` + +Solana Mobile has done the vast majority of the heavy lifting by creating the +`mobile-wallet-adapter-walletlib`. This library handles all the low-level +communication between dApps and wallets: + +```bash +npm i @solana-mobile/mobile-wallet-adapter-walletlib +``` + +> This package is still in alpha and is not production ready. However, the API +> is stable and will not change drastically, so you can begin integration with +> your wallet. + +However, `walletlib` doesn't provide UI for you or determine the outcome of +requests. Rather, it exposes a hook allowing the wallet code to receive and +resolve requests. The wallet developer is responsible for displaying the +appropriate UI, managing the wallet behavior, and appropriately resolving each +request. + +#### How wallets use the `walletlib` + +At its core, wallets use `walletlib` by calling a single function: +`useMobileWalletAdapterSession`. When calling this function, wallets provide the +following: + +1. The wallet name +2. A configuration object of type `MobileWalletAdapterConfig` +3. A handler for requests +4. A handler for sessions + +Below is an example component that shows the scaffold of how wallets connect to +the `walletlib`: + +```tsx +import { useCallback, useMemo } from "react"; +import { Text } from "react-native"; +import { WalletProvider } from "./components/WalletProvider"; +import { + MWARequest, + MWASessionEvent, + MobileWalletAdapterConfig, + useMobileWalletAdapterSession, +} from "./lib/mobile-wallet-adapter-walletlib/src"; + +function MWAApp() { + const config: MobileWalletAdapterConfig = useMemo(() => { + return { + supportsSignAndSendTransactions: true, + maxTransactionsPerSigningRequest: 10, + maxMessagesPerSigningRequest: 10, + supportedTransactionVersions: [0, "legacy"], + noConnectionWarningTimeoutMs: 3000, + }; + }, []); + + const handleRequest = useCallback((request: MWARequest) => {}, []); + + const handleSessionEvent = useCallback( + (sessionEvent: MWASessionEvent) => {}, + [], + ); + + useMobileWalletAdapterSession( + "React Native Fake Wallet", + config, + handleRequest, + handleSessionEvent, + ); + + return I'm a wallet!; +} + +export default MWAApp; +``` + +If you were to create your own wallet, you would modify the `config` object and +implement the `handleRequest` and `handleSessionEvent` handlers accordingly. +While all of these are required and all are important, the primary element is +the request handler. This is where wallets provide the implementation logic for +each request, e.g. how to handle when a dApp requests authorization or requests +that the wallet sign and send a transaction. + +For example, if the request is of type +`MWARequestType.SignAndSendTransactionsRequest`, then your code would use the +user's secret key to sign the transaction provided by the request, send the +request to an RPC provider, and then respond to the requesting dApp using a +`resolve` function. + +The `resolve` function simply tells the dApp what happened and closes the +session. The `resolve` function takes two arguments: `request` and `response`. +The types of `request` and `response` are different depending on what the +original request was. So in the example of +`MWARequestType.SignAndSendTransactionsRequest`, you would use the following +resolve function: + +```ts +export function resolve( + request: SignAndSendTransactionsRequest, + response: SignAndSendTransactionsResponse, +): void; +``` + +The `SignAndSendTransactionsResponse` type is defined as follows: + +```ts +export type SignAndSendTransactionsCompleteResponse = Readonly<{ + signedTransactions: Uint8Array[]; +}>; +export type SignAndSendTransactionsResponse = + | SignAndSendTransactionsCompleteResponse + | UserDeclinedResponse + | TooManyPayloadsResponse + | AuthorizationNotValidResponse + | InvalidSignaturesResponse; +``` + +Which response you send would depend on the result of attempting to sign and +send the transaction. + +You can dig into the +[`walletlib` source](https://github.com/solana-mobile/mobile-wallet-adapter/blob/main/js/packages/mobile-wallet-adapter-walletlib/src/resolve.ts) +if you'd like to know all of the types associated with `resolve`. + +One final point is that the component used for interacting with `walletlib` also +needs to be registered in the app's `index.js` as the MWA entry point for the +app. + +```js +import { AppRegistry } from "react-native"; +import App from "./App"; +import { name as appName } from "./app.json"; +import MWAApp from "./MWAApp"; + +// Mock event listener functions to prevent them from fataling. +window.addEventListener = () => {}; +window.removeEventListener = () => {}; + +AppRegistry.registerComponent(appName, () => App); + +// Register the MWA component +AppRegistry.registerComponent("MobileWalletAdapterEntrypoint", () => MWAApp); +``` + +### Conclusion + +While the MWA is slightly different than the web wallet standard, once you +understand the nuances between them it becomes fairly straightforward to +implement mobile wallet interaction. This becomes especially true when you +understand what MWA is doing not only in your dApp but also in wallets. If +anything remains unclear to you, be sure to spend time familiarizing yourself +with both sides of the equation. + +## Lab + +Now let's do some hands-on practice by building a mobile wallet. The goal here +is to see what happens on both sides of the MWA process to demystify the +app-wallet relationship. + +#### 0. Set up development environment if needed + +Before we start programming our wallet, we need to do some setup. You will need +a React Native development environment and a Solana dApp to test on. If you have +completed the +[Introduction to Solana Mobile lab](/developers/courses/mobile/intro-to-solana-mobile), +both of these requirements should be met and the counter app installed on your +Android device/emulator. + +If you _haven't_ completed/done the +[intro to solana mobile](https://github.com/solana-developers/react-native-fake-wallet) +you will need to: + +1. Setup an + [Android React Native developer environment](https://github.com/solana-developers/react-native-fake-wallet) + with a device or emulator +2. Install a + [Devnet Solana dApp](https://github.com/solana-developers/react-native-fake-wallet) + by doing the following steps in your terminal: + +```bash +git clone https://github.com/solana-developers/react-native-fake-wallet +cd solana-react-native-counter +npm run install +``` + +#### 1. Planning out the app's structure + +We are making the wallet from scratch, so let's look at our major building +blocks. + +First, we'll make the actual wallet app (popup not included). This will include: + +- Creating a `WalletProvider.tsx` +- Modifying the `MainScreen.tsx` +- Modifying `App.tsx` + +Next, we'll make a boilerplate MWA app that displays 'Im a Wallet' anytime the +wallet is requested from a different dApp. This will include: + +- Creating a `MWAApp.tsx` +- Modifying `index.js` + +Then we'll set up all of our UI and request routing. This will mean: + +- Modifying the `MWAApp.tsx` +- Creating a `ButtonGroup.tsx` +- Creating a `AppInfo.tsx` + +Finally, we'll implement two actual request functions, authorize and sign and +send transactions. This entails creating the following: + +- `AuthorizeDappRequestScreen.tsx` +- `SignAndSendTransactionScreen.tsx` + +#### 2. Scaffold the Wallet app + +Let's scaffold the app with: + +```bash +npx react-native@latest init wallet --npm +cd wallet +``` + +Now, let's install our dependencies. These are the same dependencies from our +[Introduction to Solana Mobile lab](/developers/courses/mobile/intro-to-solana-mobile) +with two additions: + +- `@react-native-async-storage/async-storage`: provides access to on-device + storage +- `fast-text-encoding`: a polyfill for text encoding + +We will be using `async-storage` to store our keypair so that the wallet will +stay persistent through multiple sessions. It is important to note that +`async-storage` is **_NOT_** a safe place to keep your keys in production. +Again, **_DO NOT_** use this in production. Instead, take a look at +[Android's keystore system](https://developer.android.com/privacy-and-security/keystore). + +Install these dependencies with the following command: + +```bash +npm install \ + @solana/web3.js \ + @solana-mobile/mobile-wallet-adapter-protocol-web3js \ + @solana-mobile/mobile-wallet-adapter-protocol \ + react-native-get-random-values \ + buffer \ + @coral-xyz/anchor \ + assert \ + bs58 \ + @react-native-async-storage/async-storage \ + fast-text-encoding +``` + +We need to depend on Solana's `mobile-wallet-adapter-walletlib` package, which +handles all of the low-level communication. + +> Note: A reminder that this package is still in alpha and is not production +> ready. However, the API is stable and will not change drastically, so you can +> begin integration with your wallet. + +Let's install the package in a new folder `lib`: + +```bash +npm i @solana-mobile/mobile-wallet-adapter-walletlib +``` + +Next, in `android/build.gradle`, change the `minSdkVersion` to version `23`. + +```gradle + minSdkVersion = 23 +``` + +Finally, finish the initial setup by building the app. You should get the +default React Native app showing up on your +device./environment-setup?os=linux&platform=android&guide=native#jdk-studio + +```bash +npm run android +``` + +If you get any errors make sure you double-check you've followed all of the +steps above. + +#### 3. Create the main wallet app + +There are two parts to the wallet application we'll be building: + +1. The UI to be displayed when you manually open the wallet application +2. The UI to be displayed as a bottom sheet when a separate app requests to use + the wallet + +Throughout this lab, we'll be calling these the "main wallet app" and "wallet +popup," respectively. + +- Generate a `Keypair` when the app first loads +- Display the address and Devnet SOL balance +- Allow users to airdrop some Devnet SOL to their wallet + +This can all be accomplished by creating two files: + +- `WalletProvider.tsx` - Generates a Keypair and stores it in `async-storage`, + then fetches the keypair on subsequent sessions. It also provides the Solana + `Connection` +- `MainScreen.tsx` - Shows the wallet, its balance, and an airdrop button + +Let's start with the `WalletProvider.tsx`. This file will use `async-storage` to +store a base58 encoded version of a `Keypair`. The provider will check the +storage key of `@my_fake_wallet_keypair_key`. If nothing returns, then the +provider should generate and store a keypair. The `WalletProvider` will then +return its context including the `wallet` and `connection`. The rest of the app +can access this context using the `useWallet()` hook. + +**_AGAIN_**, async storage is not fit to store secret keys in production. Please +use something like +[Android's keystore system](https://developer.android.com/privacy-and-security/keystore). + +Let's create the `WalletProvider.tsx` within a new directory named `components`: + +```tsx +import AsyncStorage from "@react-native-async-storage/async-storage"; +import { Connection, Keypair } from "@solana/web3.js"; +import { encode, decode } from "bs58"; +import { + ReactNode, + createContext, + useContext, + useEffect, + useState, +} from "react"; + +const ASYNC_STORAGE_KEY = "@my_fake_wallet_keypair_key"; + +interface EncodedKeypair { + publicKeyBase58: string; + secretKeyBase58: string; +} + +function encodeKeypair(keypair: Keypair): EncodedKeypair { + return { + publicKeyBase58: keypair.publicKey.toBase58(), + secretKeyBase58: encode(keypair.secretKey), + }; +} + +function decodeKeypair(encodedKeypair: EncodedKeypair): Keypair { + const secretKey = decode(encodedKeypair.secretKeyBase58); + return Keypair.fromSecretKey(secretKey); +} + +export interface WalletContextData { + wallet: Keypair | null; + connection: Connection; +} + +const WalletContext = createContext({ + wallet: null, + connection: new Connection("https://api.devnet.solana.com"), +}); + +export const useWallet = () => useContext(WalletContext); + +export interface WalletProviderProps { + rpcUrl?: string; + children: ReactNode; +} + +export function WalletProvider(props: WalletProviderProps) { + const { rpcUrl, children } = props; + const [keyPair, setKeyPair] = useState(null); + + const fetchOrGenerateKeypair = async () => { + try { + const storedKey = await AsyncStorage.getItem(ASYNC_STORAGE_KEY); + let keyPair; + if (storedKey) { + const encodedKeypair: EncodedKeypair = JSON.parse(storedKey); + keyPair = decodeKeypair(encodedKeypair); + } else { + // Generate a new random pair of keys and store them in local storage for later retrieval + // This is not secure! Async storage is used for demo purpose. Never store keys like this! + keyPair = Keypair.generate(); + await AsyncStorage.setItem( + ASYNC_STORAGE_KEY, + JSON.stringify(encodeKeypair(keyPair)), + ); + } + setKeyPair(keyPair); + } catch (error) { + console.log("error getting keypair: ", error); + } + }; + + useEffect(() => { + fetchOrGenerateKeypair(); + }, []); + + const connection = useMemo( + () => new Connection(rpcUrl ?? "https://api.devnet.solana.com"), + [rpcUrl], + ); + + const value = { + wallet: keyPair, + connection, + }; + + return ( + {children} + ); +} +``` + +Note that we are defaulting our `rpcUrl` to Devnet. + +Now let's make the `MainScreen.tsx`. It should simply grab the `wallet` and +`connection` from `useWallet()`, and then display the address and balance. +Additionally, since all transactions require a transaction fee in SOL, we'll +also include an airdrop button. + +Create a new directory called `screens` and a new file called `MainScreen.tsx` +inside of it: + +```tsx +import { Button, StyleSheet, Text, View } from "react-native"; +import { useWallet } from "../components/WalletProvider"; +import { useEffect, useState } from "react"; +import { LAMPORTS_PER_SOL } from "@solana/web3.js"; + +const styles = StyleSheet.create({ + container: { + width: "100%", + height: "100%", + justifyContent: "center", // Centers children along the main axis (vertically for column) + alignItems: "center", // Centers children along the cross axis (horizontally for column) + }, +}); + +function MainScreen() { + const [isLoading, setIsLoading] = useState(false); + const [balance, setBalance] = useState(null); + const { wallet, connection } = useWallet(); + const [errorMessage, setErrorMessage] = useState(null); + + useEffect(() => { + updateBalance(); + }, [wallet]); + + const updateBalance = async () => { + if (wallet) { + try { + const lamports = await connection.getBalance(wallet.publicKey); + setBalance(lamports / LAMPORTS_PER_SOL); + } catch (error) { + console.error("Failed to fetch / update balance:", error); + setErrorMessage("Failed to fetch balance"); + } + } + }; + + const airdrop = async () => { + if (wallet && !isLoading) { + setIsLoading(true); + try { + const signature = await connection.requestAirdrop( + wallet.publicKey, + LAMPORTS_PER_SOL, + ); + await connection.confirmTransaction(signature, "max"); + await updateBalance(); + } catch (error) { + console.log("error requesting airdrop", error); + setErrorMessage("Airdrop failed"); + } + + setIsLoading(false); + } + }; + + return ( + + Wallet: + {wallet?.publicKey.toString() ?? "No Wallet"} + Balance: + {balance?.toFixed(5) ?? ""} + {isLoading && Loading...} + {errorMessage && {errorMessage}} + {balance !== null && !isLoading && balance < 0.005 && ( + + )} + {movies.length === 5 && ( + + )} +
+ +); +``` + +At this point, you should be able to run the project and click between pages! + +#### 3. Order reviews alphabetically by title + +If you look at the reviews, you might notice they aren't in any specific order. +We can fix this by adding back just enough data into our data slice to help us +do some sorting. The various properties in the movie review data buffer are laid +out as follows + +- `initialized` - unsigned 8-bit integer; 1 byte +- `rating` - unsigned 8-bit integer; 1 byte +- `title` - string; unknown number of bytes +- `description` - string; unknown number of bytes + +Based on this, the offset we need to provide to the data slice to access `title` +is 2. The length, however, is indeterminate, so we can just provide what seems +to be a reasonable length. I'll stick with 18 as that will cover the length of +most titles without fetching too much data every time. + +Once we've modified the data slice in `getProgramAccounts`, we then need to +actually sort the returned array. To do this, we need to compare the part of the +data buffer that actually corresponds to `title`. The first 4 bytes of a dynamic +field in Borsh are used to store the length of the field in bytes. So in any +given buffer `data` that is sliced the way we discussed above, the string +portion is `data.slice(4, 4 + data[0])`. + +Now that we've thought through this, let's modify the implementation of +`prefetchAccounts` in `MovieCoordinator`: + +```tsx +// account type as returned by getProgramAccounts() +type ProgramAccount = { + pubkey: PublicKey; + account: AccountInfo; +}; + +const DATA_OFFSET = 2; // Skip the first 2 bytes, which store versioning information for the data schema of the account. This versioning ensures that changes to the account's structure can be tracked and managed over time. +const DATA_LENGTH = 18; // Retrieve 18 bytes of data, including the part of the account's data that stores the user's public key for comparison. +// Define a constant for the size of the header in each account buffer +const HEADER_SIZE = 4; // 4 bytes for length header + +static async prefetchAccounts(connection: Connection) { + // Get readonly accounts response + const readonlyAccounts = (await connection.getProgramAccounts( + new PublicKey(MOVIE_REVIEW_PROGRAM_ID), + { + dataSlice:{ offset: DATA_OFFSET, length: DATA_LENGTH }, + } + )) + + const accounts: Array = Array.from(readonlyAccounts); // Make a mutable copy of the readonly array + + accounts.sort((a, b) => { + try { + // Check if buffers are long enough to avoid out-of-bounds access + const lengthA = a.account.data.readUInt32LE(0); // Reads the first 4 bytes for length + const lengthB = b.account.data.readUInt32LE(0); + + if ( + a.account.data.length < HEADER_SIZE + lengthA || + b.account.data.length < HEADER_SIZE + lengthB + ) { + throw new Error('Buffer length is insufficient'); + } + + const dataA = a.account.data.subarray(HEADER_SIZE, HEADER_SIZE + lengthA); + const dataB = b.account.data.subarray(HEADER_SIZE, HEADER_SIZE + lengthB); + + return dataA.compare(dataB); + } catch (error) { + console.error('Error sorting accounts: ', error); + return 0; // Default sort order in case of error + } + }); + + this.accounts = accounts.map(account => account.pubkey) + + } catch (error) { + console.error("Error prefetching accounts:", error); + } +} +``` + +And just like that, you should be able to run the app and see the list of movie +reviews ordered alphabetically. + +#### 4. Add search + +The last thing we'll do to improve this app is to add some basic search +capability. Let's add a `search` parameter to `prefetchAccounts` and reconfigure +the body of the function to use it. + +We can use the `filters` property of the `config` parameter of +`getProgramAccounts` to filter accounts by specific data. The offset to the +`title` fields is 2, but the first 4 bytes are the length of the title so the +actual offset to the string itself is 6. Remember that the bytes need to be base +58 encoded, so let's install and import `bs58`. + +```tsx +import bs58 from 'bs58' + +... + +static async prefetchAccounts(connection: Connection, search: string) { + const readonlyAccounts = (await connection.getProgramAccounts( + new PublicKey(MOVIE_REVIEW_PROGRAM_ID), + { + dataSlice: { offset: DATA_OFFSET, length: DATA_LENGTH }, + filters: + search === "" + ? [] + : [ + { + memcmp: { + offset: 6, + bytes: bs58.encode(Buffer.from(search)), + }, + }, + ], + } + )); + + const accounts: Array = Array.from(readonlyAccounts); // Make a mutable copy of the readonly array + + accounts.sort((a, b) => { + try { + const lengthA = a.account.data.readUInt32LE(0); + const lengthB = b.account.data.readUInt32LE(0); + + if ( + a.account.data.length < HEADER_SIZE + lengthA || + b.account.data.length < HEADER_SIZE + lengthB + ) { + throw new Error('Buffer length is insufficient'); + } + + const dataA = a.account.data.subarray(HEADER_SIZE, HEADER_SIZE + lengthA); + const dataB = b.account.data.subarray(HEADER_SIZE, HEADER_SIZE + lengthB); + + return dataA.compare(dataB); + } catch (error) { + console.error("Error sorting accounts: ", error); + return 0; + } + }); + + this.accounts = accounts.map((account) => account.pubkey); +} +``` + +Now, add a `search` parameter to `fetchPage` and update its call to +`prefetchAccounts` to pass it along. We'll also need to add a `reload` boolean +parameter to `fetchPage` so that we can force a refresh of the account +prefetching every time the search value changes. + +```tsx +static async fetchPage( + connection: Connection, + page: number, + perPage: number, + search: string, + reload = false +): Promise> { + if (this.accounts.length === 0 || reload) { + await this.prefetchAccounts(connection, search); + } + + const paginatedPublicKeys = this.accounts.slice( + (page - 1) * perPage, + page * perPage + ); + + if (paginatedPublicKeys.length === 0) { + return []; + } + + const accounts = await connection.getMultipleAccountsInfo( + paginatedPublicKeys + ); + + const movies = accounts.reduce((accumulator: >, account) => { + try { + const movie = Movie.deserialize(account?.data); + if (movie) { + accumulator.push(movie); + } + } catch (error) { + console.error('Error deserializing movie data: ', error); + } + return accumulator; + }, []); + + return movies; + } +``` + +With that in place, let's update the code in `MovieList` to call this properly. + +First, add `const [search, setSearch] = useState('')` near the other `useState` +calls. Then update the call to `MovieCoordinator.fetchPage` in the `useEffect` +to pass the `search` parameter and to reload when `search !== ''`. + +```tsx +const connection = new Connection(clusterApiUrl("devnet")); +const [movies, setMovies] = useState>([]); +const [page, setPage] = useState(1); +const [search, setSearch] = useState(""); + +useEffect(() => { + const fetchMovies = async () => { + try { + const movies = await MovieCoordinator.fetchPage( + connection, + page, + 5, + search, + search !== "", + ); + setMovies(movies); + } catch (error) { + console.error("Failed to fetch movies:", error); + } + }; + + fetchMovies(); +}, [connection, page, search]); +``` + +Finally, add a search bar that will set the value of `search`: + +```tsx +return ( +
+ setSearch(e.target.value)} + placeholder="Search" + /> + ... +
+); +``` + +And that's it! The app now has ordered reviews, paging, and search. + +That was a lot to digest, but you made it through. If you need to spend some +more time with the concepts, feel free to reread the sections that were most +challenging for you and/or have a look at the +[solution code](https://github.com/solana-developers/movie-review-frontend/tree/solutions-paging-account-data). + +## Challenge + +Now it's your turn to try and do this on your own. Using the Student Intros app +from last lesson, add paging, ordering alphabetically by name, and searching by +name. + +![Student Intros frontend](/assets/courses/student-intros-frontend.png) + +1. You can build this from scratch or you can download the + [starter code](https://github.com/solana-developers/solana-student-intro-frontend/tree/solution-deserialize-account-data) +2. Add paging to the project by prefetching accounts without data, then only + fetching the account data for each account when it's needed. +3. Order the accounts displayed in the app alphabetically by name. +4. Add the ability to search through introductions by a student's name. + +This is challenging. If you get stuck, feel free to reference the +[solution code](https://github.com/solana-developers/solana-student-intro-frontend/tree/solution-paging-account-data). + +As always, get creative with these challenges and take them beyond the +instructions if you want! + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=9342ad0a-1741-41a5-9f68-662642c8ec93)! + + diff --git a/content/courses/native-onchain-development/program-derived-addresses.mdx b/content/courses/native-onchain-development/program-derived-addresses.mdx new file mode 100644 index 000000000..a9f9fa4f0 --- /dev/null +++ b/content/courses/native-onchain-development/program-derived-addresses.mdx @@ -0,0 +1,988 @@ +--- +title: Program Derived Addresses (PDAs) +objectives: + - Explain Program Derived Addresses (PDAs) + - Explain various use cases of PDAs + - Describe how PDAs are derived + - Use PDA derivations to locate and retrieve data +description: "Get a deeper understanding of PDAs." +--- + +## Summary + +- A **Program Derived Address** (PDA) is derived from a **program ID** and an + optional list of **seeds** +- The program that derives PDAs owns and controls them. +- PDA derivation provides a deterministic way to find data based on the seeds + used for the derivation +- Seeds can be used to map to the data stored in a separate PDA account +- A program can sign instructions on behalf of the PDAs derived from its ID + +## Lesson + +### What is a Program Derived Address? + +Program Derived Addresses (PDAs) are addresses that, instead of being public +keys, are calculated (or 'found') based on a combination of: + +- The program ID +- A set of "seeds" determined by the programmer. + +More on this later, but these seeds will play a role in using PDAs for data +storage and retrieval. + +PDAs serve two main functions: + +1. Provide a deterministic way to find a given item of data for a program +2. Authorize the program that owns a PDA to sign on the PDAs behalf, just like a + user signs for their own account using their secret key. + +This lesson will focus on using PDAs to find and store data. We'll discuss +signing with a PDA more thoroughly in a future lesson, where we will cover +Cross-Program Invocations (CPIs). + +### Finding PDAs + +Technically, PDAs are _found_ or _derived_ based on a program ID and one or more +input seeds. + +Unlike other Solana accounts, PDAs are not public keys and don't have secret +keys. Since public keys are on Solana's Ed25519 curve, PDAs are sometimes called +'off curve addresses'. + +PDAs are found using a hashing function that deterministically generates a PDA +using the program ID and seeds. Both Solana frontend and backend code can +determine an address using the program ID and seeds, and the same program with +the same seeds always results in the same Program Derived Address. + +### Seeds + +"Seeds" are inputs in the `find_program_address` function. While you, the +developer, determine the seeds to pass into the `find_program_address` method, +`find_program_address` method adds an additional numeric seed called a bump seed +that is used to ensure the address is _off_ the Ed25519 curve, ie, is not a +valid public key and does not have a corresponding secret key. + +`find_program_address` uses a loop to calculate the off curve address, starting +with the bump seed value 255 and checks if the output is a public key address +(on the curve) or not a valid public key (off the curve). If an an off-curve +address is not found, the method decrements the bump seed by subtracting one and +tries again (`255`, `254`, `253`, et cetera). When the method finds a valid PDA, +it returns the PDA and the canonical bump seed that derived it. + +If the resulting PDA is on the Ed25519 curve, then an error +`PubkeyError::InvalidSeeds` is returned. + +A PDA allows a maximum of `16` seeds, with each seed limited to `32` bytes in +length. If a seed exceeds this length or the number of seeds surpasses the +limit, the system returns the error `PubkeyError::MaxSeedLengthExceeded,` +indicating that the `Length of the seed is too long for address generation`. +Developers commonly use static strings and public keys as seeds. + +The +[PublicKey](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#) +type has multiple methods that find a PDA within a Solana program: + +1. `find_program_address` +2. `try_find_program_address` +3. `create_program_address` + +These methods takes an optional list of "seeds" and a `program ID` as inputs and +can return the PDA and a bump seed or an error and a PDA. + +### 1. find_program_address + +The source code for `find_program_address`: + +```rust +pub fn find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> (Pubkey, u8) { + Self::try_find_program_address(seeds, program_id) + .unwrap_or_else(|| panic!("Unable to find a viable program address bump seed")) +} +``` + +Under the hood, the `find_program_address` method passes the input `seeds` and +`program_id` to the `try_find_program_address` method. + +### 2. try_find_program_address + +The `try_find_program_address` method then introduces the `bump_seed`. The +`bump_seed` is a `u8` variable with a value between 0 and 255. Iterating over a +descending range starting from 255, a `bump_seed` is appended to the optional +input seeds passed to the `create_program_address` method. If the output of +`create_program_address` is not a valid PDA, the `bump_seed` is decreased by one +and continues the loop until it finds a valid PDA. + +```rust +pub fn try_find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> Option<(Pubkey, u8)> { + //.. + let mut bump_seed = [u8::MAX]; + for _ in 0..u8::MAX { + { + let mut seeds_with_bump = seeds.to_vec(); + seeds_with_bump.push(&bump_seed); + match Self::create_program_address(&seeds_with_bump, program_id) { + Ok(address) => return Some((address, bump_seed[0])), + Err(PubkeyError::InvalidSeeds) => (), + _ => break, + } + } + bump_seed[0] -= 1; + } + None + + // ... +} +``` + +We can see that the `try_find_program_address` calls the +`create_program_address` method. + +```rust +pub fn try_find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> Option<(Pubkey, u8)> { + // ... + for _ in 0..std::u8::MAX { + { + // `create_program_address` is called here + match Self::create_program_address(&seeds_with_bump, program_id) { + //... + } + } + //... + } +} + +``` + +### 3. create_program_address + +The `create_program_address` method performs a hashing operation over the seeds +and `program_id`. These operations compute a key and verify whether it lies on +the Ed25519 elliptic curve. If a valid PDA is found (i.e., an address that is +_off_ the curve), then either the PDA or an error is returned. + +The source code for `create_program_address`: + +```rust +pub fn create_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> Result { + if seeds.len() > MAX_SEEDS { + return Err(PubkeyError::MaxSeedLengthExceeded); + } + for seed in seeds.iter() { + if seed.len() > MAX_SEED_LEN { + return Err(PubkeyError::MaxSeedLengthExceeded); + } + } + + //.. + let mut hasher = crate::hash::Hasher::default(); + for seed in seeds.iter() { + hasher.hash(seed); + } + hasher.hashv(&[program_id.as_ref(), PDA_MARKER]); + let hash = hasher.result(); + + if bytes_are_curve_point(hash) { + return Err(PubkeyError::InvalidSeeds); + } + + Ok(Pubkey::from(hash.to_bytes())) + + // ... +} +``` + +When an error occurs during the invocation of the `find_program_address` method, +it's essential to handle it effectively. Though statistically improbable, the +system returns the error `Unable to find a viable program address bump seed` +whenever it finds a PDA that lies on the curve. The `try_find_program_address` +method is used instead of panicking. + +Locating a valid PDA off the Ed25519 curve can be time-consuming due to the +iterations on the canonical bump seed. This operation can consume a variable +amount of the program's compute budget. Developers can optimize the performance +and lower the compute budget of programs by passing the `bump_seed`(also called +the canonical bump), and the user-supplied seeds as part of the instruction +data, and then deserialize the seed and canonical bump. These deserialized +outputs can then be passed to the `create_program_address` method to derive the +PDA. It's important to note that the `create_program_address` method incurs a +fixed cost to the compute budget. + +Address collisions can occur since the seeds are passed as a slice of bytes, +meaning that the seeds `{abcdef}`, `{abc, def}` and `{ab, cd, ef}` will result +in the same PDA being generated. In some cases, developers may wish to prevent +collisions by adding separator characters like hyphens. + +In summary, the `find_program_address` method passes the input seeds and +`program_id` to the `try_find_program_address` method. The +`try_find_program_address` method starts with a `bump_seed` of 255, adds it to +the input seeds, and then repeatedly calls the `create_program_address` method +until it finds a valid PDA. Once found, both the PDA and the `bump_seed` are +returned. + +Note that different valid bumps generate different valid PDAs for the same input +seeds. The `bump_seed` returned by `find_program_address` will always be the +first valid PDA found. + +Using the canonical bump when generating a PDA onchain is crucial. Starting with +a `bump_seed` value of `255` and iterating downward to `0` ensures that the +returned seed will always be the most significant valid `8-bit` value possible. +This `bump_seed` is commonly known as the "_canonical bump_". It's a best +practice always to use the canonical bump and validate every PDA passed into +your program to ensure the integrity of the process. + +One point to emphasize is that the `find_program_address` method only returns a +Program-Derived Address and the bump seed used to derive it. The method does not +initialize a new account, nor is any PDA returned by the method necessarily +associated with an account that stores data. + +### Use PDA accounts to store data + +Solana programs are stateless, so state is stored in separate accounts from +where the program's executable is stored. Although programs can use the System +Program to create non-PDA accounts for data storage, PDAs are the choice for +storing program-related data. This choice is popular because the seeds and +canonical bump directly map to the same PDA, and the program specified as the +program ID can sign on its behalf. + +Program Derived Addresses (PDAs) are account keys only the program can sign on +its behalf. During cross-program invocations, the program can "sign" for the key +by calling `invoke_signed` and providing the same seeds used to generate the +address, along with the calculated bump seed. The runtime then verifies that the +program associated with the address is the caller and thus authorized to sign. + +If you need a refresher on how to store data in PDAs, have a look at the +[State Management lesson](/developers/courses/native-onchain-development/program-state-management). + +### Map to data stored in PDA accounts + +Storing data in PDA accounts is only half of the equation. Retrieving the data +is the other half. We'll talk about two approaches: + +1. Creating a PDA "map" account that stores the addresses of various accounts + where data is stored +2. Strategically using seeds to locate the appropriate PDA accounts and retrieve + the necessary data + +### Map to data using PDA "map" accounts + +For example, imagine a note-taking app where the underlying program generates +PDA accounts using random seeds, with each account storing an individual note. +Additionally, the program derives a single global PDA account, called the "map" +account, using a static seed like "GLOBAL_MAPPING." This map account maintains a +mapping of users' public keys to the list of PDAs where their notes are stored. + +To retrieve a user's notes, a lookup of the map account is performed to check +the list of addresses associated with a user's public key and retrieve the +account for each address. + +While such a solution is more approachable for traditional web developers, it +has some drawbacks that are particular to web3 development. Since the map size +stored in the map account will grow over time, each time you create a new note, +you must either allocate more space than necessary when creating the account or +reallocate space. Additionally, you will eventually reach the account size limit +of 10 megabytes. + +You can mitigate this issue to a certain degree by creating a separate map +account for each user. For example, you can construct a PDA map account per user +rather than having a single PDA map account for the entire program. These map +accounts are with the user's public key. You can then store the addresses for +each note inside the corresponding user's map account. + +This approach reduces the size required for each map account but ultimately +still adds an unnecessary requirement to the process: having to read the +information on the map account _before_ being able to find the accounts with the +relevant note data. + +There are instances where this approach is a viable choice for an application, +but it should be different from the default or recommended strategy. + +### Map to data using PDA derivation + +If you're strategic about the seeds you use to derive PDAs, you can embed the +required mappings into them. It is the natural evolution of the note-taking app +example we just discussed. If you start to use the note creator's public key as +a seed to create one map account per user, then why not use both the creator's +public key and some other known piece of information to derive a PDA for the +note? + +We've been mapping seeds to accounts this entire course and have yet to discuss +it explicitly. Think about the Movie Review program we've built in previous +lessons. This program uses a review creator's public key and the title of the +movie they're reviewing to find the address that _should_ be used to store the +review. This approach lets the program create a unique address for every new +review while making it easy to locate a review when needed. When you want to +find a user's review of "Spiderman", you can derive the PDA account's address +using the user's public key and the text "Spiderman" as seeds. + +```rust +let (pda, bump_seed) = Pubkey::find_program_address( + &[initializer.key.as_ref(), title.as_bytes().as_ref()], + program_id, +); +``` + +### Associated token account addresses + +Another practical example of this mapping type is determining associated token +account (ATA) addresses. An ATA is an address used to hold the tokens for a +specific account - for example, Jane's USDC account. The ATA address is derived +using: + +- the wallet address of the user +- the mint address of the token +- the token program used - either the older token program or the newer + [token extensions program ID](https://docs.rs/spl-token-2022/latest/spl_token_2022/fn.id.html). + +```toml +# ... +[dependencies] +spl-token-2022 = "" +spl-associated-token-account = "" +``` + +```rust +// Get the token extensions program ID +let token2022_program = spl_token_2022::id(); +let associated_token_address = spl_associated_token_account::get_associated_token_address_with_program_id(&wallet_address, &token_mint_address, &token2022_program); +``` + +Under the hood, the associated token address is a PDA found using the +`wallet_address`, `token_program_id`, and `token_mint_address` as seeds, +providing a deterministic way to find a token account associated with any wallet +address for a specific token mint. + +```rust +fn get_associated_token_address_and_bump_seed_internal( + wallet_address: &Pubkey, + token_mint_address: &Pubkey, + program_id: &Pubkey, + token_program_id: &Pubkey, +) -> (Pubkey, u8) { + Pubkey::find_program_address( + &[ + &wallet_address.to_bytes(), + &token_program_id.to_bytes(), + &token_mint_address.to_bytes(), + ], + program_id, + ) +} +``` + +The mappings between seeds and PDA accounts you use will depend highly on your +specific program. While this isn't a lesson on system design or architecture, +it's worth calling out a few guidelines: + +- Use seeds known at the time of PDA derivation +- Be thoughtful about how you group data into a single account +- Be thoughtful about the data structure used within each account +- Simpler is usually better + +## Lab + +Let's practice with the Movie Review program we've worked on in previous +lessons. No worries if you're jumping into this lesson without doing the last +lesson - it should be possible to follow along either way. + +As a refresher, the Movie Review program lets users create movie reviews. These +reviews are stored in an account using a PDA derived from the initializer's +public key and the movie title they are reviewing. + +Previously, we finished implementing the ability to update a movie review +securely. In this lab, we'll add the ability for users to comment on a movie +review. We'll use building this feature as an opportunity to work through how to +structure the comment storage using PDA accounts. + +### 1. Get the starter code + +To begin, you can find +[the movie program starter code](https://github.com/solana-developers/movie-program/tree/starter) +on the `starter` branch. + +If you've been following along with the Movie Review labs, you'll notice that +this is the program we've built out so far. Previously, we +used [Solana Playground](https://beta.solpg.io/) to write, build, and deploy our +code. In this lesson, we'll develop and deploy the program locally. Ensure that +`solana-test-validator` is running. + +Open the folder, then run `cargo build-bpf` to build the program. The +`cargo build-bpf` command will output a shared library for deployment inside the +`./target/deploy/` path. + +The `./target/deploy/` directory contains the shared library in the format +`.so` and the keypair that includes the public key +of the program in the format `-keypair.json`. + +```sh +cargo build-bpf +``` + +Deploy the program by copying the output of `cargo build-bpf` and running the +`solana program deploy` command. + +```sh +solana program deploy +``` + +You can test the program by using the movie review +[frontend](https://github.com/solana-developers/movie-frontend/tree/solution-update-reviews) +and updating the program ID with the one you've just deployed. Make sure you use +the `solution-update-reviews` branch. + +### 2. Plan out the account structure + +Adding comments means we must make a few decisions about storing the data +associated with each comment. The criteria for a good structure here are: + +- Not overly complicated +- Data is easily retrievable +- Each comment has something to link it to the review it's associated with + +To do this, we'll create two new account types: + +- Comment counter account +- Comment account + +There will be one comment counter account per review, and one account linked to +each comment posted. The comment counter account will be linked to a given +review by using the review's address as a seed for finding the comment counter +PDA. It will also use the static string "comment" as a seed. + +Link the comment account to a review in the same way. However, it will not +include the "comment" string as a seed; instead, it will use the _actual comment +count_ as a seed. That way, the client can easily retrieve comments for a given +review by doing the following: + +1. Read the data on the comment counter account to determine the number of + comments on a review. +2. Where `n` is the total number of comments on the review, loop `n` times. Each + loop iteration will derive a PDA using the review address and the current + number as seeds. The result is the `n` number of PDAs, each of which is the + address of an account that stores a comment. +3. Fetch the accounts for each of the `n` PDAs and read the stored data. + +Every one of our accounts can be deterministically retrieved using data that is +already known ahead of time. + +To implement these changes, do the following: + +- Define structs to represent the comment counter and comment accounts +- Update the existing `MovieAccountState` to contain a discriminator (more on + this later) +- Add an instruction variant to represent the `add_comment` instruction +- Update the existing `add_movie_review` instruction processing function to + include creating the comment counter account +- Create a new `add_comment` instruction processing function + +### 3. Define MovieCommentCounter and MovieComment structs + +Recall that the `state.rs` file defines the structs our program uses to populate +the data field of a new account. + +We'll need to define two new structs to enable commenting. + +1. `MovieCommentCounter` - to store a counter for the number of comments + associated with a review +2. `MovieComment` - to store data associated with each comment + +Let's define the structs we'll be using for our program. We add a +`discriminator` field to each struct, including the existing +`MovieAccountState`. Since we now have multiple account types, we only need a +way to fetch the account type we need from the client. This discriminator is a +string that will filter through accounts when we fetch our program accounts. + +```rust +#[derive(BorshSerialize, BorshDeserialize)] +pub struct MovieAccountState { + pub discriminator: String, + pub is_initialized: bool, + pub reviewer: Pubkey, + pub rating: u8, + pub title: String, + pub description: String, +} + +#[derive(BorshSerialize, BorshDeserialize)] +pub struct MovieCommentCounter { + pub discriminator: String, + pub is_initialized: bool, + pub counter: u64, +} + +#[derive(BorshSerialize, BorshDeserialize)] +pub struct MovieComment { + pub discriminator: String, + pub is_initialized: bool, + pub review: Pubkey, + pub commenter: Pubkey, + pub comment: String, + pub count: u64, +} + +impl Sealed for MovieAccountState {} + +impl IsInitialized for MovieAccountState { + fn is_initialized(&self) -> bool { + self.is_initialized + } +} + +impl IsInitialized for MovieCommentCounter { + fn is_initialized(&self) -> bool { + self.is_initialized + } +} + +impl IsInitialized for MovieComment { + fn is_initialized(&self) -> bool { + self.is_initialized + } +} +``` + +Since we've added a new `discriminator` field to our existing struct, the +account size calculation needs to change. Let's clean up some of our code. We'll +add an implementation for each of the three structs above that adds a constant +`DISCRIMINATOR` and either a constant `SIZE` or method `get_account_size` to +quickly get the size needed when initializing an account. + +```rust +impl MovieAccountState { + pub const DISCRIMINATOR: &'static str = "review"; + + pub fn get_account_size(title: String, description: String) -> usize { + return (4 + MovieAccountState::DISCRIMINATOR.len()) + + 1 + + 1 + + (4 + title.len()) + + (4 + description.len()); + } +} + +impl MovieCommentCounter { + pub const DISCRIMINATOR: &'static str = "counter"; + pub const SIZE: usize = (4 + MovieCommentCounter::DISCRIMINATOR.len()) + 1 + 8; +} + +impl MovieComment { + pub const DISCRIMINATOR: &'static str = "comment"; + + pub fn get_account_size(comment: String) -> usize { + return (4 + MovieComment::DISCRIMINATOR.len()) + 1 + 32 + 32 + (4 + comment.len()) + 8; + } +} +``` + +Now, we can use this implementation everywhere we need the discriminator or +account size and not risk unintentional typos. + +### 4. Create AddComment instruction + +Recall that the `instruction.rs` file defines the instructions our program will +accept and how to deserialize the data for each. We need to add a new +instruction variant for adding comments. Let's start by adding a new variant +`AddComment,` to the `MovieInstruction` enum. + +```rust +pub enum MovieInstruction { + AddMovieReview { + title: String, + rating: u8, + description: String, + }, + UpdateMovieReview { + title: String, + rating: u8, + description: String, + }, + AddComment { + comment: String, + }, +} +``` + +Next, let's create a `CommentPayload` struct to represent the instruction data +associated with this new instruction. Most of the data we'll include in the +account are public keys associated with accounts passed into the program, so the +only thing we need here is a single field to represent the comment text. + +```rust +#[derive(BorshDeserialize)] +struct CommentPayload { + comment: String, +} +``` + +Now, update the unpacking of the instruction data. Notice that we've moved the +deserialization of instruction data into each matching case using the associated +payload struct for each instruction. + +```rust +impl MovieInstruction { + pub fn unpack(input: &[u8]) -> Result { + let (&variant, rest) = input + .split_first() + .ok_or(ProgramError::InvalidInstructionData)?; + Ok(match variant { + 0 => { + let payload = MovieReviewPayload::try_from_slice(rest).unwrap(); + Self::AddMovieReview { + title: payload.title, + rating: payload.rating, + description: payload.description, + } + } + 1 => { + let payload = MovieReviewPayload::try_from_slice(rest).unwrap(); + Self::UpdateMovieReview { + title: payload.title, + rating: payload.rating, + description: payload.description, + } + } + 2 => { + let payload = CommentPayload::try_from_slice(rest).unwrap(); + Self::AddComment { + comment: payload.comment, + } + } + _ => return Err(ProgramError::InvalidInstructionData), + }) + } +} +``` + +Lastly, update the `process_instruction` function in `processor.rs` to use our +new instruction variant. + +In `processor.rs`, import the new structs from `state.rs` into scope. + +```rust +use crate::state::{MovieAccountState, MovieCommentCounter, MovieComment}; +``` + +Then in `process_instruction`, match our deserialized `AddComment` instruction +data to the `add_comment` function we will be implementing shortly. + +```rust +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + let instruction = MovieInstruction::unpack(instruction_data)?; + match instruction { + MovieInstruction::AddMovieReview { + title, + rating, + description, + } => add_movie_review(program_id, accounts, title, rating, description), + MovieInstruction::UpdateMovieReview { + title, + rating, + description, + } => update_movie_review(program_id, accounts, title, rating, description), + + MovieInstruction::AddComment { comment } => add_comment(program_id, accounts, comment), + } +} +``` + +### 5. Update add_movie_review to create a comment counter account. + +Before implementing the `add_comment` function, we need to update the +`add_movie_review` function to create the review's comment counter account. + +Remember that this account will keep track of the total number of comments for +an associated review. Its address will be a PDA derived using the movie review +address and the word "comment" as seeds. Note that how we store the counter is +simply a design choice. We could add a "counter" field to the original movie +review account. + +Within the `add_movie_review` function, let's add a `pda_counter` to represent +the new counter account we'll be initializing along with the movie review +account. Now, expect four accounts passed into the `add_movie_review` function +through the `accounts` argument. + +```rust +let account_info_iter = &mut accounts.iter(); + +let initializer = next_account_info(account_info_iter)?; +let pda_account = next_account_info(account_info_iter)?; +let pda_counter = next_account_info(account_info_iter)?; +let system_program = next_account_info(account_info_iter)?; +``` + +Next, there's a check to ensure `total_len` is less than 1000 bytes, but +`total_len` is no longer accurate since we added the discriminator. Let's +replace `total_len` with a call to `MovieAccountState::get_account_size`: + +```rust +let account_len: usize = 1000; + +if MovieAccountState::get_account_size(title.clone(), description.clone()) > account_len { + msg!("Data length is larger than 1000 bytes"); + return Err(ReviewError::InvalidDataLength.into()); +} +``` + +Remember to update the code within the `update_movie_review` function for that +instruction to work correctly. + +Once we've initialized the review account, we'll also need to update the +`account_data` with the new fields we specified in the `MovieAccountState` +struct. + +```rust +account_data.discriminator = MovieAccountState::DISCRIMINATOR.to_string(); +account_data.reviewer = *initializer.key; +account_data.title = title; +account_data.rating = rating; +account_data.description = description; +account_data.is_initialized = true; +``` + +Finally, let's add the logic to initialize the counter account within the +`add_movie_review` function by: + +1. Calculating the rent exemption amount for the counter account +2. Deriving the counter PDA using the review address and the string "comment" as + seeds +3. Invoking the system program to create the account +4. Set the starting counter value +5. Serialize the account data and return from the function + +Add these steps to the end of the `add_movie_review` function before the +`Ok(())`. + +```rust +msg!("create comment counter"); +let rent = Rent::get()?; +let counter_rent_lamports = rent.minimum_balance(MovieCommentCounter::SIZE); + +let (counter, counter_bump) = + Pubkey::find_program_address(&[pda.as_ref(), "comment".as_ref()], program_id); +if counter != *pda_counter.key { + msg!("Invalid seeds for PDA"); + return Err(ProgramError::InvalidArgument); +} + +invoke_signed( + &system_instruction::create_account( + initializer.key, + pda_counter.key, + counter_rent_lamports, + MovieCommentCounter::SIZE.try_into().unwrap(), + program_id, + ), + &[ + initializer.clone(), + pda_counter.clone(), + system_program.clone(), + ], + &[&[pda.as_ref(), "comment".as_ref(), &[counter_bump]]], +)?; +msg!("comment counter created"); + +let mut counter_data = + try_from_slice_unchecked::(&pda_counter.data.borrow()).unwrap(); + +msg!("checking if counter account is already initialized"); +if counter_data.is_initialized() { + msg!("Account already initialized"); + return Err(ProgramError::AccountAlreadyInitialized); +} + +counter_data.discriminator = MovieCommentCounter::DISCRIMINATOR.to_string(); +counter_data.counter = 0; +counter_data.is_initialized = true; +msg!("comment count: {}", counter_data.counter); +counter_data.serialize(&mut &mut pda_counter.data.borrow_mut()[..])?; +``` + +The function initializes two accounts whenever it creates a new review: + +1. The first is the review account, which stores the review's contents. This is + unchanged from the program's version we started with. +2. The second account stores the counter for comments + +### 6. Implement add_comment + +Finally, implement the `add_comment` function to create new comment accounts. + +When creating a new comment for a review, the counter will be incremented on the +comment counter PDA account, and the PDA for the comment account will be derived +using the review address and current count. + +Like other instruction processing functions, we'll start by iterating through +accounts passed into the program. Then, before we do anything else, we need to +deserialize the counter account, so we have access to the current comment count: + +```rust +pub fn add_comment( + program_id: &Pubkey, + accounts: &[AccountInfo], + comment: String, +) -> ProgramResult { + msg!("Adding Comment..."); + msg!("Comment: {}", comment); + + let account_info_iter = &mut accounts.iter(); + + let commenter = next_account_info(account_info_iter)?; + let pda_review = next_account_info(account_info_iter)?; + let pda_counter = next_account_info(account_info_iter)?; + let pda_comment = next_account_info(account_info_iter)?; + let system_program = next_account_info(account_info_iter)?; + + let mut counter_data = + try_from_slice_unchecked::(&pda_counter.data.borrow()).unwrap(); + + Ok(()) +} +``` + +Now that we have access to the counter data, we can continue with the remaining +steps: + +1. Calculate the rent-exempt amount for the new comment account +2. Derive the PDA for the comment account using the review address and the + current comment count as seeds +3. Invoke the System Program to create the new comment account +4. Set the appropriate values to the newly created account +5. Serialize the account data and return from the method + +```rust +pub fn add_comment( + program_id: &Pubkey, + accounts: &[AccountInfo], + comment: String, +) -> ProgramResult { + msg!("Adding Comment..."); + msg!("Comment: {}", comment); + + let account_info_iter = &mut accounts.iter(); + + let commenter = next_account_info(account_info_iter)?; + let pda_review = next_account_info(account_info_iter)?; + let pda_counter = next_account_info(account_info_iter)?; + let pda_comment = next_account_info(account_info_iter)?; + let system_program = next_account_info(account_info_iter)?; + + let mut counter_data = + try_from_slice_unchecked::(&pda_counter.data.borrow()).unwrap(); + + let account_len = MovieComment::get_account_size(comment.clone()); + + let rent = Rent::get()?; + let rent_lamports = rent.minimum_balance(account_len); + + let (pda, bump_seed) = Pubkey::find_program_address( + &[ + pda_review.key.as_ref(), + counter_data.counter.to_be_bytes().as_ref(), + ], + program_id, + ); + if pda != *pda_comment.key { + msg!("Invalid seeds for PDA"); + return Err(ReviewError::InvalidPDA.into()); + } + + invoke_signed( + &system_instruction::create_account( + commenter.key, + pda_comment.key, + rent_lamports, + account_len.try_into().unwrap(), + program_id, + ), + &[ + commenter.clone(), + pda_comment.clone(), + system_program.clone(), + ], + &[&[ + pda_review.key.as_ref(), + counter_data.counter.to_be_bytes().as_ref(), + &[bump_seed], + ]], + )?; + + msg!("Created Comment Account"); + + let mut comment_data = + try_from_slice_unchecked::(&pda_comment.data.borrow()).unwrap(); + + msg!("checking if comment account is already initialized"); + if comment_data.is_initialized() { + msg!("Account already initialized"); + return Err(ProgramError::AccountAlreadyInitialized); + } + + comment_data.discriminator = MovieComment::DISCRIMINATOR.to_string(); + comment_data.review = *pda_review.key; + comment_data.commenter = *commenter.key; + comment_data.comment = comment; + comment_data.is_initialized = true; + comment_data.serialize(&mut &mut pda_comment.data.borrow_mut()[..])?; + + msg!("Comment Count: {}", counter_data.counter); + counter_data.counter += 1; + counter_data.serialize(&mut &mut pda_counter.data.borrow_mut()[..])?; + + Ok(()) +} +``` + +### 7. Build and deploy + +We're ready to build and deploy our program! + +Build the updated program by running `cargo build-bpf`. Run the command +`solana program deploy ` to deploy the program. + +You can test your program by submitting a transaction with the correct +instruction data. You can create your script or use +[this frontend](https://github.com/solana-developers/movie-frontend/tree/solution-add-comments). +Be sure to use the `solution-add-comments` branch and replace the +`MOVIE_REVIEW_PROGRAM_ID` in `utils/constants.ts` with your program's ID, or the +frontend won't work with your program. + +Remember that we made breaking changes to the review accounts (i.e., adding a +discriminator). If you were to use the same program ID you've used before adding +the discriminator when deploying this program, none of the reviews you created +will show on this frontend due to a data mismatch. + +If you need more time with this project to feel comfortable with these concepts, +have a look at +the [solution code](https://github.com/solana-developers/movie-program/tree/solution-add-comments) +before continuing. Note that the solution code is on the `solution-add-comments` +branch of the linked repository. + +## Challenge + +Now it's your turn to build something independently! Go ahead and work with the +Student Intro program that we've used in past lessons. The Student Intro program +is a Solana program that lets students introduce themselves. This program takes +a user's name and a short message as the `instruction_data` and creates an +account to store the data onchain. For this challenge, you should: + +1. Add an instruction allowing other users to reply to an intro +2. Build and deploy the program locally + +If you haven't been following along with past lessons or haven't saved your work +from before, feel free to use the starter code on the `starter` branch of +[solana-student-intro-program](https://github.com/solana-developers/student-intro-program/tree/starter). + +Try to do this independently! If you get stuck, though, you can reference the +[solution code](https://github.com/solana-developers/student-intro-program/tree/solution-add-replies). +Note that the solution code is on the `solution-add-replies` branch and that +your code may look slightly different. + + + +Push your code to GitHub and [tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=89d367b4-5102-4237-a7f4-4f96050fe57e)! + + diff --git a/content/courses/native-onchain-development/program-security.mdx b/content/courses/native-onchain-development/program-security.mdx new file mode 100644 index 000000000..8dffeaabb --- /dev/null +++ b/content/courses/native-onchain-development/program-security.mdx @@ -0,0 +1,956 @@ +--- +title: Create a Basic Program, Part 3 - Basic Security and Validation +objectives: + - Understand why "thinking like an attacker" is essential in securing Solana + programs. + - Learn and implement core security practices to protect your program. + - Perform owner and signer checks to verify account ownership and transaction + authenticity. + - Validate the accounts passed into your program to ensure they are what you + expect. + - Conduct basic data validation to prevent invalid or malicious input from + compromising your program. +description: + "Learn how to secure your Solana program with ownership, signer, and account + validation checks." +--- + +## Summary + +- **Thinking like an attacker** is about shifting your mindset to proactively + identify potential security gaps by asking, "How do I break this?" +- **Owner checks** ensure that an account is controlled by the expected public + key, such as verifying that a PDA (Program Derived Address) is owned by the + program. +- **Signer checks** confirm that the right parties have signed the transaction, + allowing for safe modifications to accounts. +- **Account validation** is used to ensure that the accounts passed into your + program match your expectations, like checking the correctness of a PDA's + derivation. +- **Data validation** verifies that the instruction data provided to your + program adheres to specific rules or constraints, ensuring it doesn't lead to + unintended behavior. + +## Lesson + +In the previous lessons +[deserialize instruction data](/developers/courses/native-onchain-development/deserialize-instruction-data) +and +[program state management](/developers/courses/native-onchain-development/program-state-management), +we built a Movie Review program, and while getting it to function was exciting, +secure development doesn't stop at "just working." It's critical to understand +potential failure points and take proactive steps to secure your program against +both accidental misuse and intentional exploitation. + +Remember, **you have no control over the transactions that will be sent to your +program once it's deployed**. You can only control how your program handles +them. While this lesson is far from a comprehensive overview of program +security, we'll cover some of the basic pitfalls to look out for. + +### Think Like an Attacker + +A fundamental principle in secure programming is adopting an "attacker's +mindset." This means considering every possible angle someone might use to break +or exploit your program. + +In their presentation at Breakpoint 2021, +[Neodyme](https://workshop.neodyme.io/) emphasized that secure program +development isn't just about identifying when something is broken; it's about +exploring how it can be broken. By asking, "How do I break this?" you shift from +simply testing expected functionality to uncovering potential weaknesses in the +implementation itself. + +All programs, regardless of complexity, can be exploited. The goal isn't to +achieve absolute security (which is impossible) but to make it as difficult as +possible for malicious actors to exploit weaknesses. By adopting this mindset, +you're better prepared to identify and close gaps in your program's security. + +#### All Programs Can Be Broken + +Every program has vulnerabilities. The question isn't whether it can be broken, +but how much effort it takes. As developers, our goal is to close as many +security gaps as possible and increase the effort required to break our code. +For example, while our Movie Review program creates accounts to store reviews, +there may be unintentional behaviors that could be caught by thinking like an +attacker. In this lesson, we'll explore these issues and how to address them. + +### Error handling + +Before we dive into some of the common security pitfalls and how to avoid them, +it's important to know how to use errors in your program. Security issues in a +Solana program often requires terminating the execution with a meaningful error. +Not all errors are catastrophic, but some should result in stopping the program +and returning an appropriate error code to prevent further processing. + +#### Creating Custom Errors + +Solana's +[`solana_program`](https://docs.rs/solana-program/latest/solana_program/) crate +provides a generic +[`ProgramError`](https://docs.rs/solana-program/latest/solana_program/program_error/enum.ProgramError.html) +enum for error handling. However, custom errors allow you to provide more +detailed, context-specific information that helps during debugging and testing. + +We can define our own errors by creating an enum type listing the errors we want +to use. For example, the `NoteError` contains variants `Forbidden` and +`InvalidLength`. The enum is made into a Rust `Error` type by using the `derive` +attribute macro to implement the `Error` trait from the +[`thiserror`](https://docs.rs/thiserror/latest/thiserror/) library. Each error +type also has its own `#[error("...")]` notation. This lets you provide an error +message for each particular error type. + +Here's an example of how you can define custom errors in your program: + +```rust +use solana_program::program_error::ProgramError; +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum NoteError { + #[error("Unauthorized access - You don't own this note.")] + Forbidden, + + #[error("Invalid note length - The text exceeds the allowed limit.")] + InvalidLength, +} +``` + +In this example, we create custom errors for unauthorized access and invalid +data input (such as note length). Defining custom errors gives us greater +flexibility when debugging or explaining what went wrong during execution. + +#### Returning Errors + +The compiler expects errors returned by the program to be of type `ProgramError` +from the `solana_program` crate. That means we won't be able to return our +custom error unless we have a way to convert it into this type. The following +implementation handles conversion between our custom error and the +`ProgramError` type. + +```rust +impl From for ProgramError { + fn from(e: NoteError) -> Self { + ProgramError::Custom(e as u32) + } +} +``` + +To return the custom error from the program, simply use the `into()` method to +convert the error into an instance of `ProgramError`. + +```rust +if pda != *note_pda.key { + return Err(NoteError::Forbidden.into()); +} +``` + +This ensures the program gracefully handles errors and provides meaningful +feedback when things go wrong. + +### Basic Security Checks + +To ensure your Solana program is resilient against common vulnerabilities, you +should incorporate key security checks. These are critical for detecting invalid +accounts or unauthorized transactions and preventing undesired behavior. + +#### Ownership checks + +An ownership check verifies that an account is owned by the expected program. +For instance, if your program relies on PDAs (Program Derived Addresses), you +want to ensure that those PDAs are controlled by your program and not by an +external party. + +Let's use the note-taking app example that we've referenced in the +[deserialize instruction data](/developers/courses/native-onchain-development/deserialize-instruction-data) +and +[program state management](/developers/courses/native-onchain-development/program-state-management) +lessons. In this app, users can create, update, and delete notes that are stored +by the program in PDA accounts. + +When a user invokes the `update` instruction handler, they also provide a +`pda_account`. We presume the provided `pda_account` is for the particular note +they want to update, but the user can input any instruction data they want. They +could even potentially send data that matches the data format of a note account +but was not also created by the note-taking program. This security vulnerability +is one potential way to introduce malicious code. + +The simplest way to avoid this problem is to always check that the owner of an +account is the public key you expect it to be. In this case, we expect the note +account to be a PDA account owned by the program itself. When this is not the +case, we can report it as an error accordingly. + +Here's how you can perform an ownership check to verify that an account is owned +by the program: + +```rust +if note_pda.owner != program_id { + return Err(ProgramError::InvalidNoteAccount); +} +``` + +In this example, we check if the `note_pda` is owned by the program itself +(denoted by `program_id`). Ownership checks like these prevent unauthorized +entities from tampering with critical accounts. + + + +PDAs are often considered to be trusted stores of a program's state. Ensuring +the correct program owns the PDAs is a fundamental way to prevent malicious +behavior. + + + +#### Signer Checks + +Signer checks confirm that a transaction has been signed by the correct parties. +In the note-taking app, for example, we want to verify that only the note +creator can update the note. Without this check, anyone could attempt to modify +another user's note by passing in their public key. + +```rust +if !initializer.is_signer { + msg!("Missing required signature"); + return Err(ProgramError::MissingRequiredSignature) +} +``` + +By verifying that the initializer has signed the transaction, we ensure that +only the legitimate owner of the account can perform actions on it. + +#### Account Validation + +Account validation checks that the accounts passed into the program are correct +and valid. This is often done by deriving the expected account using known seeds +(for PDAs) and comparing it to the passed account. + +For instance, in the note-taking app, you can derive the expected PDA using the +creator's public key and note ID, and then validate that it matches the provided +account: + +```rust +let (expected_pda, bump_seed) = Pubkey::find_program_address( + &[ + note_creator.key.as_ref(), + id.as_bytes().as_ref(), + ], + program_id +); + +if expected_pda != *note_pda.key { + msg!("Invalid seeds for PDA"); + return Err(ProgramError::InvalidArgument) +} +``` + +This check prevents a user from accidentally (or maliciously) passing the wrong +PDA or one that belongs to someone else. By validating the PDA's derivation, you +ensure the program is acting on the correct account. + +### Data Validation + +Data validation ensures that the input provided to your program meets the +expected criteria. This is crucial for avoiding incorrect or malicious data that +could cause the program to behave unpredictably. + +For example, let's say your program allows users to allocate points to a +character's attributes, but each attribute has a maximum allowed value. Before +making any updates, you should check that the new allocation does not exceed the +defined limit: + +```rust +if character.agility + new_agility > 100 { + msg!("Attribute points cannot exceed 100"); + return Err(AttributeError::TooHigh.into()) +} +``` + +Similarly, you should check that the user is not exceeding their allowed number +of points: + +```rust +if attribute_allowance < new_agility { + msg!("Trying to allocate more points than allowed"); + return Err(AttributeError::ExceedsAllowance.into()) +} +``` + +Without these validations, the program could end up in an undefined state or be +exploited by malicious actors, potentially causing financial loss or +inconsistent behavior. + +For example, imagine that the character referenced in these examples is an NFT. +Further, imagine that the program allows the NFT to be staked to earn token +rewards proportional to the NFTs number of attribute points. Failure to +implement these data validation checks would allow a bad actor to assign an +obscenely high number of attribute points and quickly drain your treasury of all +the rewards that were meant to be spread more evenly amongst a larger pool of +stakers. + +#### Integer overflow and underflow + +One of the common pitfalls when working with integers in Rust (and in Solana +programs) is handling integer overflow and underflow. Rust integers have fixed +sizes and can only hold values within a certain range. When a value exceeds that +range, it wraps around, leading to unexpected results. + +For example, with a `u8` (which holds values between 0 and 255), adding 1 to 255 +results in a value of 0 (overflow). To avoid this, you should use checked math +functions like +[`checked_add()`](https://doc.rust-lang.org/std/primitive.u8.html#method.checked_add) +and +[`checked_sub()`](https://doc.rust-lang.org/std/primitive.u8.html#method.checked_sub): + +To avoid integer overflow and underflow, either: + +1. Have logic in place that ensures overflow or underflow _cannot_ happen or +2. Use checked math like `checked_add()` instead of `+` + + ```rust + let first_int: u8 = 5; + let second_int: u8 = 255; + let sum = first_int.checked_add(second_int) + .ok_or(ProgramError::ArithmeticOverflow)?; + ``` + +## Lab + +In this lab, we will build upon the Movie Review program that allows users to +store movie reviews in PDA accounts. If you haven't completed the previous +lessons +[deserialize instruction data](/developers/courses/native-onchain-development/deserialize-instruction-data) +and +[program state management](/developers/courses/native-onchain-development/program-state-management), +don't worry—this guide is self-contained. + +The Movie Review program lets users add and update reviews in PDA accounts. In +previous lessons, we implemented basic functionality for adding reviews. Now, +we'll add security checks and implement an update feature in a secure manner. +We'll use [Solana Playground](https://beta.solpg.io/) to write, build, and +deploy our program. + +### 1. Get the starter code + +To begin, you can find +[the movie review starter code](https://beta.solpg.io/62b552f3f6273245aca4f5c9). +If you've been following along with the Movie Review labs, you'll notice that +we've refactored our program. + +The refactored starter code is almost the same as what it was before. Since +`lib.rs` was getting rather large and unwieldy, we've separated its code into 3 +files: `lib.rs`, `entrypoint.rs`, and `processor.rs`. `lib.rs` now _only_ +registers the code's modules, `entrypoint.rs` _only_ defines and sets the +program's entrypoint, and `processor.rs` handles the program logic for +processing instructions. We've also added an `error.rs` file where we'll be +defining custom errors. The complete file structure is as follows: + +- **lib.rs** - register modules +- **entrypoint.rs -** entry point to the program +- **instruction.rs -** serialize and deserialize instruction data +- **processor.rs -** program logic to process instructions +- **state.rs -** serialize and deserialize state +- **error.rs -** custom program errors + +In addition to some changes to the file structure, we've updated a small amount +of code that will let this lab be more focused on security without having you +write unnecessary boilerplate. + +Since we'll be allowing updates to movie reviews, we also changed `account_len` +in the `add_movie_review()` function (now in `processor.rs`). Instead of +calculating the size of the review and setting the account length to only as +large as it needs to be, we're simply going to allocate 1000 bytes to each +review account. This way, we don't have to worry about reallocating size or +re-calculating rent when a user updates their movie review. + +We went from this: + +```rust +let account_len: usize = 1 + 1 + (4 + title.len()) + (4 + description.len()); +``` + +To this: + +```rust +let account_len: usize = 1000; +``` + +The [realloc](https://docs.rs/solana-sdk/latest/solana_sdk/account_info/struct.AccountInfo.html#method.realloc) method +allows you to dynamically change the size of your accounts. We will not be using +this method for this lab, but it's something to be aware of. + +Finally, we've also implemented some additional functionality for our +`MovieAccountState` struct in `state.rs` using the `impl` keyword. + +For our movie reviews, we want the ability to check whether an account has +already been initialized. To do this, we create an `is_initialized` function +that checks the `is_initialized` field on the `MovieAccountState` struct. + +`Sealed` is Solana's version of Rust's `Sized` trait. This simply specifies that +`MovieAccountState` has a known size and provides for some compiler +optimizations. + +```rust title="state.rs" +impl Sealed for MovieAccountState {} + +impl IsInitialized for MovieAccountState { + fn is_initialized(&self) -> bool { + self.is_initialized + } +} +``` + +Before moving on, make sure you have a solid grasp of the current state of the +program. Look through the code and spend some time thinking through any spots +that are confusing to you. It may be helpful to compare the starter code to the +[solution code from the previous lesson](https://beta.solpg.io/62b23597f6273245aca4f5b4). + +### 2. Custom Errors + +We'll define custom errors to handle cases like uninitialized accounts, invalid +PDA matches, exceeding data limits, and invalid ratings (ratings must be between +1 and 5). These errors will be added to the `error.rs` file: + +The starter code includes an empty `error.rs` file. Open that file and add +errors for each of the above cases. + +```rust title="error.rs" +use solana_program::{program_error::ProgramError}; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ReviewError{ + // Error 0 + #[error("Account not initialized yet")] + UninitializedAccount, + // Error 1 + #[error("PDA derived does not equal PDA passed in")] + InvalidPDA, + // Error 2 + #[error("Input data exceeds max length")] + InvalidDataLength, + // Error 3 + #[error("Rating greater than 5 or less than 1")] + InvalidRating, +} + +impl From for ProgramError { + fn from(e: ReviewError) -> Self { + ProgramError::Custom(e as u32) + } +} +``` + +Note that in addition to adding the error cases, we also added an implementation +that lets us convert our error into a `ProgramError` type as needed. + +After adding the errors, import `ReviewError` in `processor.rs` to use them. + +```rust title="processor.rs" +use crate::error::ReviewError; +``` + +### 3. Add Security Checks to add_movie_review + +Now that we have errors to use, let's implement some security checks to our +`add_movie_review` function. + +#### Signer check + +The first thing we should do is ensure that the `initializer` of a review is +also a signer on the transaction. This ensures that you can't submit movie +reviews impersonating somebody else. We'll put this check right after iterating +through the accounts. + +```rust title="processor.rs" +let account_info_iter = &mut accounts.iter(); + +let initializer = next_account_info(account_info_iter)?; +let pda_account = next_account_info(account_info_iter)?; +let system_program = next_account_info(account_info_iter)?; + +if !initializer.is_signer { + msg!("Missing required signature"); + return Err(ProgramError::MissingRequiredSignature) +} +``` + +#### Account validation + +Next, let's make sure the `pda_account` passed in by the user is the `pda` we +expect. Recall we derived the `pda` for a movie review using the `initializer` +and `title` as seeds. Within our instruction, we'll derive the `pda` again and +then check if it matches the `pda_account`. If the addresses do not match, we'll +return our custom `InvalidPDA` error. + +```rust title="processor.rs" +// Derive PDA and check that it matches client +let (pda, _bump_seed) = Pubkey::find_program_address(&[initializer.key.as_ref(), account_data.title.as_bytes().as_ref(),], program_id); + +if pda != *pda_account.key { + msg!("Invalid seeds for PDA"); + return Err(ReviewError::InvalidPDA.into()) +} +``` + +#### Data validation + +Now let's perform some data validation. + +We'll start by making sure `rating` falls within the 1 to 5 scale. If the rating +provided by the user outside of this range, we'll return our custom +`InvalidRating` error. + +```rust title="processor.rs" +if rating > 5 || rating < 1 { + msg!("Rating cannot be higher than 5"); + return Err(ReviewError::InvalidRating.into()) +} +``` + +Next, let's check that the content of the review does not exceed the 1000 bytes +we've allocated for the account. If the size exceeds 1000 bytes, we'll return +our custom `InvalidDataLength` error. + +```rust title="processor.rs" +let total_len: usize = 1 + 1 + (4 + title.len()) + (4 + description.len()); +if total_len > 1000 { + msg!("Data length is larger than 1000 bytes"); + return Err(ReviewError::InvalidDataLength.into()) +} +``` + +Lastly, let's check if the account has already been initialized by calling the +`is_initialized` function we implemented for our `MovieAccountState`. If the +account already exists, then we will return an error. + +```rust title="processor.rs" +if account_data.is_initialized() { + msg!("Account already initialized"); + return Err(ProgramError::AccountAlreadyInitialized); +} +``` + +Altogether, the `add_movie_review()` function should look something like this: + +```rust title="processor.rs" +pub fn add_movie_review( + program_id: &Pubkey, + accounts: &[AccountInfo], + title: String, + rating: u8, + description: String +) -> ProgramResult { + msg!("Adding movie review..."); + msg!("Title: {}", title); + msg!("Rating: {}", rating); + msg!("Description: {}", description); + + let account_info_iter = &mut accounts.iter(); + + let initializer = next_account_info(account_info_iter)?; + let pda_account = next_account_info(account_info_iter)?; + let system_program = next_account_info(account_info_iter)?; + + if !initializer.is_signer { + msg!("Missing required signature"); + return Err(ProgramError::MissingRequiredSignature) + } + + let (pda, bump_seed) = Pubkey::find_program_address(&[initializer.key.as_ref(), title.as_bytes().as_ref(),], program_id); + if pda != *pda_account.key { + msg!("Invalid seeds for PDA"); + return Err(ProgramError::InvalidArgument) + } + + if rating > 5 || rating < 1 { + msg!("Rating cannot be higher than 5"); + return Err(ReviewError::InvalidRating.into()) + } + + let total_len: usize = 1 + 1 + (4 + title.len()) + (4 + description.len()); + if total_len > 1000 { + msg!("Data length is larger than 1000 bytes"); + return Err(ReviewError::InvalidDataLength.into()) + } + + let account_len: usize = 1000; + + let rent = Rent::get()?; + let rent_lamports = rent.minimum_balance(account_len); + + invoke_signed( + &system_instruction::create_account( + initializer.key, + pda_account.key, + rent_lamports, + account_len.try_into().unwrap(), + program_id, + ), + &[initializer.clone(), pda_account.clone(), system_program.clone()], + &[&[initializer.key.as_ref(), title.as_bytes().as_ref(), &[bump_seed]]], + )?; + + msg!("PDA created: {}", pda); + + msg!("unpacking state account"); + let mut account_data = try_from_slice_unchecked::(&pda_account.data.borrow()).unwrap(); + msg!("borrowed account data"); + + msg!("checking if movie account is already initialized"); + if account_data.is_initialized() { + msg!("Account already initialized"); + return Err(ProgramError::AccountAlreadyInitialized); + } + + account_data.title = title; + account_data.rating = rating; + account_data.description = description; + account_data.is_initialized = true; + + msg!("serializing account"); + account_data.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; + msg!("state account serialized"); + + Ok(()) +} +``` + +### 4. Support Movie Review Updates in MovieInstruction + +Next, we'll modify `instruction.rs` to add support for updating movie reviews. +We'll introduce a new `UpdateMovieReview()` variant in `MovieInstruction`: + +```rust title="instruction.rs" +pub enum MovieInstruction { + AddMovieReview { + title: String, + rating: u8, + description: String + }, + UpdateMovieReview { + title: String, + rating: u8, + description: String + } +} +``` + +The payload struct can stay the same since aside from the variant type, the +instruction data is the same as what we used for `AddMovieReview()`. + +We'll also update the `unpack()` function to handle `UpdateMovieReview()`. + +```rust title="instruction.rs" +// Inside instruction.rs +impl MovieInstruction { + pub fn unpack(input: &[u8]) -> Result { + let (&variant, rest) = input.split_first().ok_or(ProgramError::InvalidInstructionData)?; + let payload = MovieReviewPayload::try_from_slice(rest).unwrap(); + Ok(match variant { + 0 => Self::AddMovieReview { + title: payload.title, + rating: payload.rating, + description: payload.description }, + 1 => Self::UpdateMovieReview { + title: payload.title, + rating: payload.rating, + description: payload.description }, + _ => return Err(ProgramError::InvalidInstructionData) + }) + } +} +``` + +### 5. Define update_movie_review Function + +Now that we can unpack our `instruction_data` and determine which instruction of +the program to run, we can add `UpdateMovieReview()` to the match statement in +the `process_instruction()` function in the `processor.rs` file. + +```rust title="processor.rs" +// Inside processor.rs +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8] +) -> ProgramResult { + // Unpack instruction data + let instruction = MovieInstruction::unpack(instruction_data)?; + match instruction { + MovieInstruction::AddMovieReview { title, rating, description } => { + add_movie_review(program_id, accounts, title, rating, description) + }, + // Add UpdateMovieReview to match against our new data structure + MovieInstruction::UpdateMovieReview { title, rating, description } => { + // Make call to update function that we'll define next + update_movie_review(program_id, accounts, title, rating, description) + } + } +} +``` + +Next, we can define the new `update_movie_review()` function. The definition +should have the same parameters as the definition of `add_movie_review`. + +```rust title="processor.rs" +pub fn update_movie_review( + program_id: &Pubkey, + accounts: &[AccountInfo], + title: String, + rating: u8, + description: String +) -> ProgramResult { + +} +``` + +### 6. Implement update_movie_review Function + +All that's left now is to fill in the logic for updating a movie review. Only +let's make it secure from the start. + +Just like the `add_movie_review()` function, let's start by iterating through +the accounts. The only accounts we'll need are the first two: `initializer` and +`pda_account`. + +```rust title="processor.rs" +pub fn update_movie_review( + program_id: &Pubkey, + accounts: &[AccountInfo], + title: String, + rating: u8, + description: String +) -> ProgramResult { + msg!("Updating movie review..."); + + // Get Account iterator + let account_info_iter = &mut accounts.iter(); + + // Get accounts + let initializer = next_account_info(account_info_iter)?; + let pda_account = next_account_info(account_info_iter)?; + +} +``` + +#### Ownership Check + +Before we continue, let's implement some basic security checks. We'll start with +an ownership check on for `pda_account` to verify that it is owned by our +program. If it isn't, we'll return an `InvalidOwner` error. + +```rust title="processor.rs" +if pda_account.owner != program_id { + return Err(ProgramError::InvalidOwner) +} +``` + +#### Signer Check + +Next, let's perform a signer check to verify that the `initializer` of the +update instruction has also signed the transaction. Since we are updating the +data for a movie review, we want to ensure that the original `initializer` of +the review has approved the changes by signing the transaction. If the +`initializer` did not sign the transaction, we'll return an error. + +```rust title="processor.rs" +if !initializer.is_signer { + msg!("Missing required signature"); + return Err(ProgramError::MissingRequiredSignature) +} +``` + +#### Account Validation + +Next, let's check that the `pda_account` passed in by the user is the PDA we +expect by deriving the PDA using `initializer` and `title` as seeds. If the +addresses do not match, we'll return our custom `InvalidPDA` error. We'll +implement this the same way we did in the `add_movie_review()` function. + +```rust title="processor.rs" +// Derive PDA and check that it matches client +let (pda, _bump_seed) = Pubkey::find_program_address(&[initializer.key.as_ref(), account_data.title.as_bytes().as_ref(),], program_id); + +if pda != *pda_account.key { + msg!("Invalid seeds for PDA"); + return Err(ReviewError::InvalidPDA.into()) +} +``` + +#### Unpack pda_account and Perform Data Validation + +Now that our code ensures we can trust the passed in accounts, let's unpack the +`pda_account` and perform some data validation. We'll start by unpacking +`pda_account` and assigning it to a mutable variable `account_data`. + +```rust title="processor.rs" +msg!("unpacking state account"); +let mut account_data = try_from_slice_unchecked::(&pda_account.data.borrow()).unwrap(); +msg!("borrowed account data"); +``` + +Now that we have access to the account and its fields, the first thing we need +to do is verify that the account has already been initialized. An uninitialized +account can't be updated so the program should return our custom +`UninitializedAccount` error. + +```rust +if !account_data.is_initialized() { + msg!("Account is not initialized"); + return Err(ReviewError::UninitializedAccount.into()); +} +``` + +Next, we need to validate the `rating`, `title`, and `description` data just +like in the `add_movie_review()` function. We want to limit the `rating` to a +scale of 1 to 5 and limit the overall size of the review to be fewer than 1000 +bytes. If the rating provided by the user is outside of this range, then we'll +return our custom `InvalidRating` error. If the review is too long, then we'll +return our custom `InvalidDataLength` error. + +```rust title="processor.rs" +if rating > 5 || rating < 1 { + msg!("Rating cannot be higher than 5"); + return Err(ReviewError::InvalidRating.into()) +} + +let total_len: usize = 1 + 1 + (4 + account_data.title.len()) + (4 + description.len()); +if total_len > 1000 { + msg!("Data length is larger than 1000 bytes"); + return Err(ReviewError::InvalidDataLength.into()) +} +``` + +#### Update the movie review account + +Now that we've implemented all of the security checks, we can finally update the +movie review account by updating `account_data` and re-serializing it. At that +point, we can return `Ok` from our program. + +```rust title="processor.rs" +account_data.rating = rating; +account_data.description = description; + +account_data.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; + +Ok(()) +``` + +All together, the `update_movie_review()` function should look something like +the code snippet below. We've included some additional logging for clarity in +debugging. + +```rust title="processor.rs" +pub fn update_movie_review( + program_id: &Pubkey, + accounts: &[AccountInfo], + title: String, + rating: u8, + description: String +) -> ProgramResult { + msg!("Updating movie review..."); + + let account_info_iter = &mut accounts.iter(); + + let initializer = next_account_info(account_info_iter)?; + let pda_account = next_account_info(account_info_iter)?; + + if pda_account.owner != program_id { + return Err(ProgramError::IllegalOwner) + } + + if !initializer.is_signer { + msg!("Missing required signature"); + return Err(ProgramError::MissingRequiredSignature) + } + + msg!("unpacking state account"); + let mut account_data = try_from_slice_unchecked::(&pda_account.data.borrow()).unwrap(); + msg!("review title: {}", account_data.title); + + let (pda, _bump_seed) = Pubkey::find_program_address(&[initializer.key.as_ref(), account_data.title.as_bytes().as_ref(),], program_id); + if pda != *pda_account.key { + msg!("Invalid seeds for PDA"); + return Err(ReviewError::InvalidPDA.into()) + } + + msg!("checking if movie account is initialized"); + if !account_data.is_initialized() { + msg!("Account is not initialized"); + return Err(ReviewError::UninitializedAccount.into()); + } + + if rating > 5 || rating < 1 { + msg!("Invalid Rating"); + return Err(ReviewError::InvalidRating.into()) + } + + let update_len: usize = 1 + 1 + (4 + description.len()) + account_data.title.len(); + if update_len > 1000 { + msg!("Data length is larger than 1000 bytes"); + return Err(ReviewError::InvalidDataLength.into()) + } + + msg!("Review before update:"); + msg!("Title: {}", account_data.title); + msg!("Rating: {}", account_data.rating); + msg!("Description: {}", account_data.description); + + account_data.rating = rating; + account_data.description = description; + + msg!("Review after update:"); + msg!("Title: {}", account_data.title); + msg!("Rating: {}", account_data.rating); + msg!("Description: {}", account_data.description); + + msg!("serializing account"); + account_data.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; + msg!("state account serialized"); + + Ok(()) +} +``` + +### 7. Build and upgrade + +We're ready to build and upgrade our program! You can test your program by +submitting a transaction with the right instruction data. For that, feel free to +use this +[frontend](https://github.com/solana-developers/movie-frontend/tree/solution-update-reviews). +Remember, to make sure you're testing the right program you'll need to replace +`MOVIE_REVIEW_PROGRAM_ID` with your program ID in `Form.tsx` and +`MovieCoordinator.ts`. + +If you need more time with this project to feel comfortable with these concepts, +have a look at the +[solution code](https://beta.solpg.io/62c8c6dbf6273245aca4f5e7) before +continuing. + +## Challenge + +Now it's your turn to build something independently by building on top of the +Student Intro program that you've used in previous lessons. If you haven't been +following along or haven't saved your code before, feel free to use +[this starter code](https://beta.solpg.io/62b11ce4f6273245aca4f5b2). + +The Student Intro program is a Solana Program that lets students introduce +themselves. The program takes a user's name and a short message as the +instruction_data and creates an account to store the data onchain. + +Using what you've learned in this lesson, try applying what you've learned to +the Student Intro Program. The program should: + +1. Add an instruction allowing students to update their message +2. Implement the basic security checks we've learned in this lesson + +Try to do this independently if you can! But if you get stuck, feel free to +reference the [solution code](https://beta.solpg.io/62c9120df6273245aca4f5e8). +Note that your code may look slightly different than the solution code depending +on the checks you implement and the errors you write. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=3dfb98cc-7ba9-463d-8065-7bdb1c841d43)! + + diff --git a/content/courses/native-onchain-development/program-state-management.mdx b/content/courses/native-onchain-development/program-state-management.mdx new file mode 100644 index 000000000..e54c46dbc --- /dev/null +++ b/content/courses/native-onchain-development/program-state-management.mdx @@ -0,0 +1,597 @@ +--- +title: Create a Basic Program, Part 2 - State Management +objectives: + - Describe the process of creating a new account using a Program Derived + Address (PDA) + - Demonstrate how to use seeds to derive a PDA + - Use the space required by an account to calculate the amount of rent (in + lamports) a user must allocate + - Use a Cross Program Invocation (CPI) to initialize an account with a PDA as + the address of the new account + - Explain how to update the data stored on a new account +description: + "Learn how programs store data using Solana's built-in key-value store." +--- + +## Summary + +- Program state is stored in other accounts, not in the program itself. +- State is stored in Program Derived Address (PDA) accounts, which are generated + from a program ID and optional seeds. The data within a PDA is defined by the + programmer. +- Creating an account requires calculating the necessary space and corresponding + rent in lamports. +- A Cross Program Invocation (CPI) to the `create_account` instruction handler + on the System Program is needed to create a new account. +- Updating the data field on an account involves serializing (converting to a + byte array) the data into the account. + +## Lesson + +Solana maintains speed, efficiency, and extensibility by making programs +stateless. Instead of storing state alongside the program's executable, programs +use Solana's account model to read and write state to separate PDA accounts. + +This model provides a simple, user-friendly key-value store for managing data +and allows programs to be upgraded without affecting their data. However, if +you're familiar with older blockchains, this might be challenging. In this +lesson, we'll begin with the basics and gradually introduce more complex onchain +programs. You'll learn the fundamentals of state management in a Solana program, +including representing state as a Rust type, creating accounts using PDAs, and +serializing account data. + +### Program State + +All Solana accounts have a data field that holds a byte array, making accounts +as flexible as files on a computer. You can store anything in an account, as +long as it has the necessary storage space. + +Just like files in a traditional filesystem conform to specific formats like PDF +or MP3, data stored in a Solana account must follow a pattern to be retrieved +and deserialized into something usable. + +#### Represent State as a Rust Type + +When writing a program in Rust, we typically create this "format" by defining a +Rust data type. This is similar to how we created an enum to represent discrete +instructions in the +[first part of deserialize instruction data lesson](/developers/courses/native-onchain-development/deserialize-instruction-data#enumerations). + +A simple `struct` is usually sufficient for most use cases. For example, a +note-taking program that stores notes in separate accounts might have fields for +a title, body, and an ID: + +```rust +struct NoteState { + title: String, + body: String, + id: u64 +} +``` + +#### Using Borsh for Serialization and Deserialization + +Just as with instruction data, we need to convert our Rust data type to a byte +array and vice versa. **Serialization** converts an object into a byte array, +while **deserialization** reconstructs an object from a byte array. + +We'll continue using Borsh for serialization and deserialization. In Rust, the +`borsh` crate provides the `BorshSerialize` and `BorshDeserialize` traits. We +apply these traits using the `derive` attribute macro: + +```rust +use borsh::{BorshSerialize, BorshDeserialize}; + +#[derive(BorshSerialize, BorshDeserialize)] +struct NoteState { + title: String, + body: String, + id: u64 +} +``` + +These traits provide methods on `NoteState` for serializing and deserializing +data. + +### Creating Accounts + +Before we can update the data field of an account, we must first create the +account. + +To create a new account in our program, we need to: + +1. Calculate the space and rent required for the account. +2. Determine an address for the new account. +3. Invoke the system program to create the new account. + +#### Space and rent + +Storing data on the Solana network requires users to allocate rent in the form +of lamports. The required rent depends on the amount of space allocated to the +account, so we must determine the space needed before creating the account. + +Note that rent is more like a deposit; all lamports allocated for rent can be +fully refunded when an account is closed. Additionally, all new accounts must be +[rent-exempt](https://twitter.com/jacobvcreech/status/1524790032938287105), +meaning lamports are not deducted over time. An account is rent-exempt if it +holds at least 2 years' worth of rent, ensuring accounts are stored onchain +permanently until the owner closes the account and withdraws the rent. + +In our note-taking app example, the `NoteState` struct has three fields: +`title`, `body`, and `id`. To calculate the required account size, we add up the +space needed for each field. + +For dynamic data like strings, Borsh adds an additional 4 bytes to store the +field's length. This means `title` and `body` each require 4 bytes plus their +respective sizes. The `id` field is a 64-bit integer or 8 bytes. + +We can add these lengths and calculate the required rent using the +`minimum_balance` function from the `rent` module of the `solana_program` crate: + +```rust +// Calculate account size required for struct NoteState +let account_len: usize = (4 + title.len()) + (4 + body.len()) + 8; + +// Calculate rent required +let rent = Rent::get()?; +let rent_lamports = rent.minimum_balance(account_len); +``` + +#### Program Derived Addresses (PDA) + +Before creating an account, we also need an address to assign the account. For +program-owned accounts, this will be a Program Derived Address (PDA) found using +the `find_program_address` function. + +PDAs are derived using the program ID (the address of the program creating the +account) and optional seeds. The `find_program_address` function returns the +same address every time with the same inputs, allowing us to deterministically +create and find any number of PDA accounts. + +The `find_program_address` function also provides a "bump seed" to ensure the +PDA doesn't have a corresponding secret key, making it secure for program +ownership. The function starts with a bump seed of 255, decreasing it until a +valid PDA is found. + +For our note-taking program, we'll use the note creator's public key and the ID +as seeds to derive the PDA. This allows us to deterministically find the account +for each note: + +```rust +let (note_pda_account, bump_seed) = Pubkey::find_program_address(&[note_creator.key.as_ref(), id.as_bytes().as_ref(),], program_id); +``` + +#### Cross Program Invocation (CPI) + +Once we've calculated the rent and derived a valid PDA, we can create the +account using a Cross Program Invocation (CPI). A CPI is when one program +invokes an instruction on another program. To create a new account, we'll invoke +the `create_account` instruction on the system program. + +CPIs can be done using either `invoke` or `invoke_signed`. + +```rust +pub fn invoke( + instruction: &Instruction, + account_infos: &[AccountInfo<'_>] +) -> ProgramResult +``` + +```rust +pub fn invoke_signed( + instruction: &Instruction, + account_infos: &[AccountInfo<'_>], + signers_seeds: &[&[&[u8]]] +) -> ProgramResult +``` + +In this lesson, we'll explore `invoke_signed`, a function that allows a program +to authorize actions for a Program Derived Address (PDA) without using a +traditional secret key. Here's how it operates: + +1. `invoke_signed` derives a PDA using seeds, a bump seed, and the program ID. +2. It compares this derived PDA against all accounts in the instruction. +3. If an account matches the derived PDA, that account's signer field becomes + true. + +This method ensures security because `invoke_signed` generates the PDA using the +invoking program's ID, preventing other programs from producing matching PDAs to +authorize accounts derived with a different program ID. It's crucial to +understand that while we describe the PDA as "authorizing," it doesn't use a +secret key like traditional signatures. Instead, this mechanism enables programs +to approve actions onchain for PDA accounts they control. + +```rust +invoke_signed( + // instruction + &system_instruction::create_account( + note_creator.key, + note_pda_account.key, + rent_lamports, + account_len.try_into().unwrap(), + program_id, + ), + // account_infos + &[note_creator.clone(), note_pda_account.clone(), system_program.clone()], + // signers_seeds + &[&[note_creator.key.as_ref(), note_id.as_bytes().as_ref(), &[bump_seed]]], +)?; +``` + +### Serializing and Deserializing Account Data + +After creating an account, we need to update its data field by deserializing its +byte array into the Rust type, updating the fields, and then serializing it +back. + +#### Deserialize Account Data + +To update an account's data, first, deserialize its data byte array into its +Rust type. Borrow the data field on the account to access it without taking +ownership. Then, use the `try_from_slice_unchecked()` function to deserialize +the data into the appropriate Rust type: + +```rust +let mut account_data = try_from_slice_unchecked::(note_pda_account.data.borrow()).unwrap(); + +account_data.title = title; +account_data.body = rating; +account_data.id = id; +``` + +#### Serialize Account Data + +Once the Rust instance representing the account's data has been updated with the +appropriate values, you can "save" the data on the account. + +This is done with the `serialize` function on the instance of the Rust type you +created. You'll need to pass in a mutable reference to the account data. The +syntax here is tricky, so don't worry if you don't understand it completely. +Borrowing and references are two of the toughest concepts in Rust. + +```rust +account_data.serialize(&mut &mut note_pda_account.data.borrow_mut()[..])?; +``` + +The above example converts the `account_data` object to a byte array and sets it +to the `data` property on `note_pda_account`. This saves the updated +`account_data` variable to the data field of the new account. Now when a user +fetches the `note_pda_account` and deserializes the data, it will display the +updated data we've serialized into the account. + +### Iterators + +You may have noticed in the previous examples that we referenced `note_creator` +and didn't show where that came from. + +To get access to this and other accounts, we use an +[Iterator](https://doc.rust-lang.org/std/iter/trait.Iterator.html). An iterator +is a Rust trait used to give sequential access to each element in a collection +of values. Iterators are used in Solana programs to safely iterate over the list +of accounts passed into the program entry point through the `accounts` argument. + +#### Rust Iterator + +The iterator pattern allows you to perform tasks on a sequence of items. The +`iter()` method creates an iterator object that references a collection. In +Rust, iterators are lazy and have no effect until methods that consume the +iterator are called. Use the `next()` function to get the next element in the +sequence, advancing the iterator each time. + +```rust +let v1 = vec![1, 2, 3]; + +// Create the iterator over the vec +let v1_iter = v1.iter(); + +// Use the iterator to get the first item +let first_item = v1_iter.next(); + +// Use the iterator to get the second item +let second_item = v1_iter.next(); +``` + +#### Solana Accounts Iterator + +In Solana programs, the instruction handler receives an `accounts` argument +containing `AccountInfo` items for all required accounts. To use these accounts +within your instruction handler, create an iterator with a mutable reference to +`accounts`. This approach allows you to process the account information +sequentially and access the data you need for your instruction handler logic. + +Instead of using the iterator directly, you can pass the iterator to the +`next_account_info` function from the `account_info` module provided by the +`solana_program` crate. + +For example, consider an instruction to create a new note in a note-taking +program. This instruction would minimally require the following accounts: + +- The account of the user creating the note. +- A PDA to store the note. +- The `system_program` account to initialize a new account. + +All three accounts would be passed into the program entry point via the +`accounts` argument. An iterator of `accounts` is then used to separate the +`AccountInfo` associated with each account to process the instruction. + +Note: The `&mut` keyword indicates a mutable reference to the `accounts` +argument. For more details, refer to +[references in Rust](https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html) +and [the `mut` keyword](https://doc.rust-lang.org/std/keyword.mut.html). + +```rust +// Get Account iterator +let account_info_iter = &mut accounts.iter(); + +// Get accounts +let note_creator = next_account_info(account_info_iter)?; +let note_pda_account = next_account_info(account_info_iter)?; +let system_program = next_account_info(account_info_iter)?; +``` + +## Lab + +This section introduces several new concepts. Let's practice them together by +continuing with the Movie Review program from the previous lesson. Even if +you're starting with this lesson, you should be able to follow along. We'll be +using the [Solana Playground](https://beta.solpg.io) to write, build, and deploy +our code. + +As a refresher, we are building a Solana program that lets users review movies. +In +[the previous lesson deserialize instruction data](/developers/courses/native-onchain-development/deserialize-instruction-data), +we deserialized the instruction data passed in by the user but did not store +this data in an account. Let's now update our program to create new accounts to +store the user's movie review. + +### 1. Get the starter code + +If you didn't complete the lab from the last lesson or just want to make sure +that you didn't miss anything, you can reference +[the starter code](https://beta.solpg.io/66d67d97cffcf4b13384d333). + +Our program currently includes an `instruction.rs` file used to deserialize the +`instruction_data` passed into the program entry point. We've also completed the +`lib.rs` file to the point where we can print our deserialized instruction data +to the program log using the `msg!` macro. + +### 2. Create struct to represent account data + +Let's begin by creating a new file named `state.rs`. + +This file will: + +1. Define the `struct` used to populate the data field of a new account. +2. Add `BorshSerialize` and `BorshDeserialize` traits to this struct + +First, import the necessary items from the `borsh` crate: + +```rust +use borsh::{BorshSerialize, BorshDeserialize}; +``` + +Next, create the `MovieAccountState` struct, which defines the parameters that +each new movie review account will store in its data field. The struct includes +the following fields: + +- `is_initialized` - indicates whether the account has been initialized. +- `rating` - the user's rating of the movie. +- `description` - the user's description of the movie. +- `title` - the title of the movie being reviewed. + +```rust +#[derive(BorshSerialize, BorshDeserialize, Default)] +pub struct MovieAccountState { + pub is_initialized: bool, + pub rating: u8, + pub title: String, + pub description: String, +} +``` + +### 3. Update lib.rs + +Next, update the `lib.rs` file. Start by importing everything needed to complete +the Movie Review program. For more details on each item, refer to +[the `solana_program` crate](https://docs.rs/solana-program/latest/solana_program/). + +```rust +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + msg, + program::invoke_signed, + pubkey::Pubkey, + rent::Rent, + system_instruction, + sysvar::Sysvar, +}; +use borsh::{BorshDeserialize, BorshSerialize}; + +pub mod instruction; +pub mod state; + +use instruction::MovieInstruction; +use state::MovieAccountState; +``` + +### 4. Iterate through accounts + +Continue building out the `add_movie_review` function. Recall that an array of +accounts is passed into the `add_movie_review` function through a single +`accounts` argument. To process the instruction, iterate through `accounts` and +assign the `AccountInfo` for each account to a variable. + +```rust +// Get Account iterator +let account_info_iter = &mut accounts.iter(); + +// Get accounts +let initializer = next_account_info(account_info_iter)?; +let pda_account = next_account_info(account_info_iter)?; +let system_program = next_account_info(account_info_iter)?; +``` + +### 5. Derive PDA + +Within the `add_movie_review` function, derive the PDA you expect the user to +have passed in. Even though `pda_account` should reference the same account, you +still need to call `find_program_address()` as the bump seed is required for the +derivation. + +The PDA for each new account is derived using the initializer's public key and +the movie title as seeds. This setup restricts each user to only one review per +movie title but allows different users to review the same movie and the same +user to review different movies. + +```rust +// Derive PDA +let (pda, bump_seed) = Pubkey::find_program_address( + &[initializer.key.as_ref(), title.as_bytes().as_ref()], + program_id, +); +``` + +### 6. Calculate space and rent + +Calculate the rent required for the new account. Rent is the amount of lamports +a user must allocate to an account for storing data on the Solana network. To +calculate rent, first determine the space required by the new account. + +The `MovieAccountState` struct has four fields. We will allocate 1 byte each for +`rating` and `is_initialized`. For both `title` and `description` we will +allocate space equal to 4 bytes plus the length of the string. + +```rust +// Calculate account size required +let account_len: usize = 1 + 1 + (4 + title.len()) + (4 + description.len()); + +// Calculate rent required +let rent = Rent::get()?; +let rent_lamports = rent.minimum_balance(account_len); +``` + +### 7. Create new account + +Once rent is calculated and the PDA is verified, create the new account. To do +this, call the `create_account` instruction from the system program using a +Cross Program Invocation (CPI) with the `invoke_signed` function. Use +`invoke_signed` because the account is being created with a PDA and the Movie +The review program needs to “sign” the instructions. + +```rust +// Create the account +invoke_signed( + &system_instruction::create_account( + initializer.key, + pda_account.key, + rent_lamports, + account_len.try_into().unwrap(), + program_id, + ), + &[ + initializer.clone(), + pda_account.clone(), + system_program.clone(), + ], + &[&[ + initializer.key.as_ref(), + title.as_bytes().as_ref(), + &[bump_seed], + ]], + )?; + +msg!("PDA created: {}", pda); +``` + +### 8. Update account data + +Now that we've created a new account, we are ready to update the data field of +the new account using the format of the `MovieAccountState` struct from our +`state.rs` file. We first deserialize the account data from `pda_account` using +`try_from_slice_unchecked`, then set the values of each field. + +```rust +msg!("Unpacking state account"); +let mut account_data = + MovieAccountState::try_from_slice(&pda_account.data.borrow()) + .unwrap_or(MovieAccountState::default()); +msg!("Borrowed account data"); + +account_data.title = title; +account_data.rating = rating; +account_data.description = description; +account_data.is_initialized = true; +``` + +Finally, serialize the updated `account_data` into the data field of +`pda_account`. + +```rust +msg!("Serializing account"); +account_data.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; +msg!("State account serialized"); +``` + +### 9. Build and deploy + +You're now ready to build and deploy your program! + +![Gif Build and Deploy Program](/assets/courses/unboxed/movie-review-pt2-build-deploy.gif) + +You can test your program by submitting a transaction with the right instruction +data. For that, feel free to use +[this script](https://github.com/solana-developers/movie-review-program-client) +or [the frontend](https://github.com/solana-developers/movie-review-frontend) we +built in the +[Deserialize Custom Instruction Data lesson](/developers/courses/native-onchain-development/deserialize-custom-data-frontend). +In both cases, set the program ID for your program in the appropriate file +`web/components/ui/review-form.ts` to make sure you're testing the right +program. + +- If you're using the script, simply replace the value assigned to + `movieProgramId` in the `index.ts` component with the public key of the + program you've deployed. +- If you use the frontend, simply replace the `MOVIE_REVIEW_PROGRAM_ID` in the + `review-form.tsx` components with the address of the program you've deployed. + +Then run the frontend, submit a view, and refresh the browser to see the review. +If you need more time with this project to feel comfortable with these concepts, +have a look at the +[solution code](https://beta.solpg.io/66d67f31cffcf4b13384d334) before +continuing. + +## Challenge + +Now it's your turn to build something independently. Equipped with the concepts +introduced in this lesson, you now know everything you'll need to recreate the +entirety of the Student Intro program from Module 1. + +The Student Intro program is a Solana Program that lets students introduce +themselves. The program takes a user's name and a short message as the +`instruction_data` and creates an account to store the data onchain. + +Using what you've learned in this lesson, build out this program. In addition to +taking a name a short message as instruction data, the program should: + +1. Create a separate account for each student +2. Store `is_initialized` as a boolean, `name` as a string, and `msg` as a + string in each account + +You can test your program by building the +[frontend](https://github.com/solana-developers/solana-student-intro-frontend) +we created in the +[Page, Order, and Filter Program Data lesson](/developers/courses/native-onchain-development/paging-ordering-filtering-data-frontend). +Remember to replace the program ID in the frontend code with the one you've +deployed. + +Try to do this independently if you can! But if you get stuck, feel free to +reference the [solution code](https://beta.solpg.io/62b11ce4f6273245aca4f5b2). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=8320fc87-2b6d-4b3a-8b1a-54b55afed781)! + + diff --git a/content/courses/native-onchain-development/serialize-instruction-data-frontend.mdx b/content/courses/native-onchain-development/serialize-instruction-data-frontend.mdx new file mode 100644 index 000000000..d569cd37b --- /dev/null +++ b/content/courses/native-onchain-development/serialize-instruction-data-frontend.mdx @@ -0,0 +1,651 @@ +--- +title: Serialize Custom Instruction Data for Native Program Development +objectives: + - Explain the contents of a transaction + - Explain transaction instructions + - Explain the basics of Solana's runtime optimizations + - Explain Borsh + - Use Borsh to serialize program data for native programs +description: How to deserialize data fetched from Solana accounts. +--- + +## Summary + +- Native (non-Anchor) Solana development requires manual serialization and + deserialization of data. +- Transactions are made up of an array of instructions, a single transaction can + have any number of instructions in it, each targeting different programs. When + a transaction is submitted, the Solana runtime will process its instructions + in order and atomically, meaning that if any of the instructions fail for any + reason, the entire transaction will fail to be processed. +- Every _instruction_ is made up of 3 components: the intended program's ID, an + array of all accounts involved, and a byte buffer of instruction data. +- Every _transaction_ contains an array of all accounts it intends to read or + write, one or more instructions, a recent blockhash, and one or more + signatures. +- To pass instruction data from a client, it must be serialized into a byte + buffer. To facilitate this process of serialization, we will be using + [Borsh](https://borsh.io/). +- Transactions can fail to be processed by the blockchain for any number of + reasons, we'll discuss some of the most common ones here. + +## Lesson + +### Transactions + + + +This course requires completing +[Introduction to Solana](/developers/courses/intro-to-solana) or equivalent +knowledge. It's also aimed at advanced developers that prefer more control over +the ease of use and safe defaults Anchor provides. If you're new to developing +onchain programs you may prefer +[Anchor](/developers/courses/onchain-development) + + + +In [Introduction to Solana](/developers/courses/intro-to-solana) we learned how to +create transactions with instructions for common Solana programs. + +This lessons shows how to create instructions for our own native Solana +programs, which we will develop in a few lessons. Specifically, we're going to +learn about serialization and deserialization, which is required for native +(non-Anchor) program development. + +#### Transaction Contents + +Every transaction contains: + +- An array that includes every account it intends to read or write +- One or more instructions +- A recent blockhash +- One or more signatures + +`@solana/web3.js` simplifies this process for you so that all you need to focus +on is adding instructions and signatures. The library builds the array of +accounts based on that information and handles the logic for including a recent +blockhash. + +### Instructions + +Every instruction contains: + +- The program ID (public key) of the intended program +- An array listing every account that will be read from or written to during + execution +- A byte buffer of instruction data + +Identifying the program by its public key ensures that the instruction is +carried out by the correct program. + +Including an array of every account that will be read from or written to allows +the network to perform several optimizations which allow for high transaction +load and quicker execution. + +The byte buffer lets you pass external data to a program. + +You can include multiple instructions in a single transaction. The Solana +runtime will process these instructions in order and atomically. In other words, +if every instruction succeeds then the transaction as a whole will be +successful, but if a single instruction fails then the entire transaction will +fail immediately with no side-effects. + +The account array is not just an array of the accounts' public keys. Each object +in the array includes the account's public key, whether or not it is a signer on +the transaction, and whether or not it is writable. Including whether or not an +account is writable during the execution of an instruction allows the runtime to +facilitate parallel processing of smart contracts. Because you must define which +accounts are read-only and which you will write to, the runtime can determine +which transactions are non-overlapping or read-only and allow them to execute +concurrently. To learn more about Solana's runtime, check out this +[blog post on Sealevel](/news/sealevel---parallel-processing-thousands-of-smart-contracts). + +#### Instruction Data + +The ability to add arbitrary data to an instruction ensures that programs can be +dynamic and flexible enough for broad use cases in the same way that the body of +an HTTP request lets you build dynamic and flexible REST APIs. + +Just as the structure of the body of an HTTP request is dependent on the +endpoint you intend to call, the structure of the byte buffer used as +instruction data is entirely dependent on the recipient program. If you're +building a full-stack dApp on your own, then you'll need to copy the same +structure that you used when building the program over to the client-side code. +If you're working with another developer who is handling the program +development, you can coordinate to ensure matching buffer layouts. + +Let's think about a concrete example. Imagine working on a Web3 game and being +responsible for writing client-side code that interacts with a player inventory +program. The program was designed to allow the client to: + +- Add inventory based on a player's game-play results +- Transfer inventory from one player to another +- Equip a player with selected inventory items + +This program would have been structured such that each of these is encapsulated +in its own function. + +Each program, however, only has one entry point. You would instruct the program +on which of these functions to run through the instruction data. + +You would also include in the instruction data any information the function +needs to execute properly, e.g. an inventory item's ID, a player to transfer +inventory to, etc. + +Exactly _how_ this data would be structured would depend on how the program was +written, but it's common to have the first field in instruction data be a number +that the program can map to a function, after which additional fields act as +function arguments. + +### Serialization + +In addition to knowing what information to include in an instruction data +buffer, you also need to serialize it properly. The most common serializer used +in Solana is [Borsh](https://borsh.io). Per the website: + +> Borsh stands for Binary Object Representation Serializer for Hashing. It is +> meant to be used in security-critical projects as it prioritizes consistency, +> safety, speed; and comes with a strict specification. + +Borsh maintains a [JS library](https://github.com/near/borsh-js) that handles +serializing common types into a buffer. There are also other packages built on +top of Borsh that try to make this process even easier. We'll be using the +`@coral-xyz/borsh` library which can be installed using `npm`. + +Building off of the previous game inventory example, let's look at a +hypothetical scenario where we are instructing the program to equip a player +with a given item. Assume the program is designed to accept a buffer that +represents a struct with the following properties: + +1. `variant` is an unsigned, 8-bit integer that instructs the program which + instruction, or function, to execute. +2. `playerId` is an unsigned, 16-bit integer that represents the player ID of + the player who is to be equipped with the given item. +3. `itemId` is an unsigned, 256-bit integer that represents the item ID of the + item that will be equipped for the given player. + +All of this will be passed as a byte buffer that will be read in order, so +ensuring proper buffer layout order is crucial. You would create the buffer +layout schema or template for the above as follows: + +```typescript +import * as borsh from "@coral-xyz/borsh"; + +const equipPlayerSchema = borsh.struct([ + borsh.u8("variant"), + borsh.u16("playerId"), + borsh.u256("itemId"), +]); +``` + +You can then encode data using this schema with the `encode` method. This method +accepts as arguments an object representing the data to be serialized and a +buffer. In the below example, we allocate a new buffer that's much larger than +needed, then encode the data into that buffer and slice the original buffer down +into a new buffer that's only as large as needed. + +```typescript +import * as borsh from "@coral-xyz/borsh"; + +const equipPlayerSchema = borsh.struct([ + borsh.u8("variant"), + borsh.u16("playerId"), + borsh.u256("itemId"), +]); + +const buffer = Buffer.alloc(1000); +equipPlayerSchema.encode( + { variant: 2, playerId: 1435, itemId: 737498 }, + buffer, +); + +const instructionBuffer = buffer.subarray(0, equipPlayerSchema.getSpan(buffer)); +``` + +Once a buffer is properly created and the data serialized, all that's left is +building the transaction. This is similar to what you've done in previous +lessons. The example below assumes that: + +- `player`, `playerInfoAccount`, and `PROGRAM_ID` are already defined somewhere + outside the code snippet +- `player` is a user's public key +- `playerInfoAccount` is the public key of the account where inventory changes + will be written +- `SystemProgram` will be used in the process of executing the instruction. + +```typescript +import * as borsh from "@coral-xyz/borsh"; +import { + clusterApiUrl, + Connection, + SystemProgram, + Transaction, + TransactionInstruction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +const equipPlayerSchema = borsh.struct([ + borsh.u8("variant"), + borsh.u16("playerId"), + borsh.u256("itemId"), +]); + +const buffer = Buffer.alloc(1000); +equipPlayerSchema.encode( + { variant: 2, playerId: 1435, itemId: 737498 }, + buffer, +); + +const instructionBuffer = buffer.subarray(0, equipPlayerSchema.getSpan(buffer)); + +const endpoint = clusterApiUrl("devnet"); +const connection = new Connection(endpoint); + +const transaction = new Transaction(); +const instruction = new TransactionInstruction({ + keys: [ + { + pubkey: player.publicKey, + isSigner: true, + isWritable: false, + }, + { + pubkey: playerInfoAccount, + isSigner: false, + isWritable: true, + }, + { + pubkey: SystemProgram.programId, + isSigner: false, + isWritable: false, + }, + ], + data: instructionBuffer, + programId: PROGRAM_ID, +}); + +transaction.add(instruction); + +try { + const transactionId = await sendAndConfirmTransaction( + connection, + transaction, + [player], + ); + const explorerLink = getExplorerLink("transaction", transactionId, "devnet"); + console.log(`Transaction submitted: ${explorerLink}`); +} catch (error) { + alert(error); +} +``` + +## Lab + +Let's practice this together by building a Movie Review app that lets users +submit a movie review and have it stored on Solana's network. We'll build this +app a little bit at a time over the next few lessons, adding new functionality +each lesson. + +![Movie review frontend](/assets/courses/movie-review-dapp.png) + +Here's a quick diagram of the program we'll build: + +![Solana stores data items in PDAs, which can be found using their seeds](/assets/courses/unboxed/movie-review-program.svg) + +The public key of the Solana program we'll use for this application is +`CenYq6bDRB7p73EjsPEpiYN7uveyPUTdXkDkgUduboaN`. + +#### 1. Download the starter code + +Before we get started, go ahead and download the +[starter code](https://github.com/solana-developers/movie-review-frontend/tree/starter). + +The project is a fairly simple Next.js application. It includes the +`WalletContextProvider` we created in the Wallets lesson, a `Card` component for +displaying a movie review, a `MovieList` component that displays reviews in a +list, a `Form` component for submitting a new review, and a `Movie.ts` file that +contains a class definition for a `Movie` object. + +Note that for now, the movies displayed on the page when you run `npm run dev` +are mocks. In this lesson, we'll focus on adding a new review but we won't be +able to see that review displayed. Next lesson, we'll focus on deserializing +custom data from onchain accounts. + +#### 2. Create the buffer layout + +Remember that to properly interact with a Solana program, you need to know how +it expects data to be structured. Our Movie Review program expects instruction +data to contain: + +1. `variant` as an unsigned, 8-bit integer representing which instruction should + be executed (in other words which function on the program should be called). +2. `title` as a string representing the title of the movie that you are + reviewing. +3. `rating` as an unsigned, 8-bit integer representing the rating out of 5 that + you are giving to the movie you are reviewing. +4. `description` as a string representing the written portion of the review you + are leaving for the movie. + +Let's configure a `borsh` layout in the `Movie` class. Start by importing +`@coral-xyz/borsh`. Next, create a `borshInstructionSchema` property and set it +to the appropriate `borsh` struct containing the properties listed above. + +```typescript +import * as borsh from '@coral-xyz/borsh' + +export class Movie { + title: string; + rating: number; + description: string; + + ... + + borshInstructionSchema = borsh.struct([ + borsh.u8('variant'), + borsh.str('title'), + borsh.u8('rating'), + borsh.str('description'), + ]) +} +``` + +Keep in mind that _order matters_. If the order of properties here differs from +how the program is structured, the transaction will fail. + +#### 3. Create a method to serialize data + +Now that we have the buffer layout set up, let's create a method in `Movie` +called `serialize()` that will return a `Buffer` with a `Movie` object's +properties encoded into the appropriate layout. + +Instead of allocating a fixed buffer size, we'll calculate the size dynamically +using known constants for the space required by each field in the `Movie` +object. Specifically, we'll use `INIT_SPACE` (to account for string length +metadata) and `ANCHOR_DISCRIMINATOR` (to account for the 8-byte discriminator +used by Anchor). + +```typescript +import * as borsh from "@coral-xyz/borsh"; + +// Constants for size calculations +const ANCHOR_DISCRIMINATOR = 8; // 8 bytes for the account discriminator used by Anchor +const STRING_LENGTH_SPACE = 4; // 4 bytes to store the length of each string + +// Specific sizes for 'title' and 'description' strings +const TITLE_SIZE = 100; // Allocate 100 bytes for the 'title' +const DESCRIPTION_SIZE = 500; // Allocate 500 bytes for the 'description' + +// Total space calculation for the Movie review structure +const MOVIE_REVIEW_SPACE = + ANCHOR_DISCRIMINATOR + // 8 bytes for the account discriminator + STRING_LENGTH_SPACE + + TITLE_SIZE + // 4 bytes for the title length + 100 bytes for the title + STRING_LENGTH_SPACE + + DESCRIPTION_SIZE + // 4 bytes for the description length + 500 bytes for the description + 1 + // 1 byte for 'variant' + 1; // 1 byte for 'rating' + +export class Movie { + title: string; + rating: number; + description: string; + + constructor(title: string, rating: number, description: string) { + // Enforce specific sizes for title and description + if (title.length > TITLE_SIZE) { + throw new Error(`Title cannot exceed ${TITLE_SIZE} characters.`); + } + if (description.length > DESCRIPTION_SIZE) { + throw new Error( + `Description cannot exceed ${DESCRIPTION_SIZE} characters.`, + ); + } + + this.title = title; + this.rating = rating; + this.description = description; + } + + borshInstructionSchema = borsh.struct([ + borsh.u8("variant"), + borsh.str("title"), + borsh.u8("rating"), + borsh.str("description"), + ]); + + serialize(): Buffer { + try { + // Allocate a buffer with the exact space needed + const buffer = Buffer.alloc(MOVIE_REVIEW_SPACE); + this.borshInstructionSchema.encode({ ...this, variant: 0 }, buffer); + return buffer.subarray(0, this.borshInstructionSchema.getSpan(buffer)); + } catch (error) { + console.error("Serialization error:", error); + return Buffer.alloc(0); + } + } +} +``` + +The method shown above first creates a large enough buffer for our object, then +encodes `{ ...this, variant: 0 }` into the buffer. Because the `Movie` class +definition contains 3 of the 4 properties required by the buffer layout and uses +the same naming, we can use it directly with the spread operator and just add +the `variant` property. Finally, the method returns a new buffer that leaves off +the unused portion of the original. + +#### 4. Send a transaction when the user submits the form + +Now that we have the building blocks for the instruction data, we can create and +send the transaction when a user submits the form. Open `Form.tsx` and locate +the `handleTransactionSubmit` function. This gets called by `handleSubmit` each +time a user submits the Movie Review form. + +Inside this function, we'll be creating and sending the transaction that +contains the data submitted through the form. + +Start by importing `@solana/web3.js` and importing `useConnection` and +`useWallet` from `@solana/wallet-adapter-react`. + +```tsx +import { FC } from "react"; +import { Movie } from "../models/Movie"; +import { useState } from "react"; +import { + Connection, + PublicKey, + SystemProgram, + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; +import { useConnection, useWallet } from "@solana/wallet-adapter-react"; +``` + +Next, before the `handleSubmit` function, call `useConnection()` to get a +`connection` object and call `useWallet()` to get `publicKey` and +`sendTransaction`. + +```tsx +import { FC } from 'react' +import { Movie } from '../models/Movie' +import { useState } from 'react' +import { + Connection, + PublicKey, + SystemProgram, + Transaction, + TransactionInstruction, +} from "@solana/web3.js" +import { useConnection, useWallet } from '@solana/wallet-adapter-react' +import { getExplorerLink } from "@solana-developers/helpers"; + +const MOVIE_REVIEW_PROGRAM_ID = 'CenYq6bDRB7p73EjsPEpiYN7uveyPUTdXkDkgUduboaN' + +export const Form: FC = () => { + const [title, setTitle] = useState('') + const [rating, setRating] = useState(0) + const [message, setMessage] = useState('') + + const { connection } = useConnection(); + const { publicKey, sendTransaction } = useWallet(); + + const handleSubmit = (event: any) => { + event.preventDefault() + const movie = new Movie(title, rating, description) + handleTransactionSubmit(movie) + } + + ... +} +``` + +Before we implement `handleTransactionSubmit`, let's talk about what needs to be +done. We need to: + +1. Check that `publicKey` exists to ensure that the user has connected their + wallet. +2. Call `serialize()` on `movie` to get a buffer representing the instruction + data. +3. Create a new `Transaction` object. +4. Get all of the accounts that the transaction will read or write. +5. Create a new `Instruction` object that includes all of these accounts in the + `keys` argument, includes the buffer in the `data` argument, and includes the + program's public key in the `programId` argument. +6. Add the instruction from the last step to the transaction. +7. Call `sendTransaction`, passing in the assembled transaction. + +That's quite a lot to process! But don't worry, it gets easier the more you do +it. Let's start with the first 3 steps from above: + +```typescript +const handleTransactionSubmit = async (movie: Movie) => { + if (!publicKey) { + alert("Please connect your wallet!"); + return; + } + + const buffer = movie.serialize(); + const transaction = new Transaction(); +}; +``` + +The next step is to get all of the accounts that the transaction will read or +write. In past lessons, the account where data will be stored has been given to +you. This time, the account's address is more dynamic, so it needs to be +computed. We'll cover this in-depth in the next lesson, but for now, you can use +the following, where `pda` is the address to the account where data will be +stored: + +```typescript +const [pda] = await PublicKey.findProgramAddressSync( + [publicKey.toBuffer(), Buffer.from(movie.title)], + new PublicKey(MOVIE_REVIEW_PROGRAM_ID), +); +``` + +In addition to this account, the program will also need to read from +`SystemProgram`, so our array needs to include `SystemProgram.programId` as +well. + +With that, we can finish the remaining steps: + +```typescript +const handleTransactionSubmit = async (movie: Movie) => { + if (!publicKey) { + alert("Please connect your wallet!"); + return; + } + + const buffer = movie.serialize(); + const transaction = new Transaction(); + + const [pda] = await PublicKey.findProgramAddressSync( + [publicKey.toBuffer(), new TextEncoder().encode(movie.title)], + new PublicKey(MOVIE_REVIEW_PROGRAM_ID), + ); + + const instruction = new TransactionInstruction({ + keys: [ + { + pubkey: publicKey, + isSigner: true, + isWritable: false, + }, + { + pubkey: pda, + isSigner: false, + isWritable: true, + }, + { + pubkey: SystemProgram.programId, + isSigner: false, + isWritable: false, + }, + ], + data: buffer, + programId: new PublicKey(MOVIE_REVIEW_PROGRAM_ID), + }); + + transaction.add(instruction); + + try { + let transactionId = await sendTransaction(transaction, connection); + const explorerLink = getExplorerLink( + "transaction", + transactionId, + "devnet", + ); + console.log(`Transaction submitted: ${explorerLink}`); + } catch (error) { + alert(error); + } +}; +``` + +And that's it! You should now be able to use the form on the site to submit a +movie review. While you won't see the UI update to reflect the new review, you +can look at the transaction's program logs on Solana Explorer to see that it was +successful. + +If you need a bit more time with this project to feel comfortable, have a look +at the complete +[solution code](https://github.com/solana-developers/movie-review-frontend/tree/solution-serialize-instruction-data). + +## Challenge + +Now it's your turn to build something independently. Create an application that +lets students of this course introduce themselves! The Solana program that +supports this is at `HdE95RSVsdb315jfJtaykXhXY478h53X6okDupVfY9yf`. + +![Student Intros frontend](/assets/courses/student-intros-frontend.png) + +1. You can build this from scratch or you can + [download the starter code](https://github.com/solana-developers/solana-student-intro-frontend/tree/starter). +2. Create the instruction buffer layout in `StudentIntro.ts`. The program + expects instruction data to contain: + 1. `variant` as an unsigned, 8-bit integer representing the instruction to + run (should be 0). + 2. `name` as a string representing the student's name. + 3. `message` as a string representing the message the student is sharing + about their Solana journey. +3. Create a method in `StudentIntro.ts` that will use the buffer layout to + serialize a `StudentIntro` object. +4. In the `Form` component, implement the `handleTransactionSubmit` function so + that it serializes a `StudentIntro`, builds the appropriate transaction and + transaction instructions, and submits the transaction to the user's wallet. +5. You should now be able to submit introductions and have the information + stored onchain! Be sure to log the transaction ID and look at it in Solana + Explorer to verify that it worked. + +If you get stumped, you can +[check out the solution code](https://github.com/solana-developers/solana-student-intro-frontend/tree/solution-serialize-instruction-data). + +Feel free to get creative with these challenges and take them even further. The +instructions aren't here to hold you back! + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=6cb40094-3def-4b66-8a72-dd5f00298f61)! + + diff --git a/content/courses/offline-transactions/durable-nonces.mdx b/content/courses/offline-transactions/durable-nonces.mdx new file mode 100644 index 000000000..be73b33b9 --- /dev/null +++ b/content/courses/offline-transactions/durable-nonces.mdx @@ -0,0 +1,1062 @@ +--- +title: Durable Nonces +objectives: + - Be able to explain the differences between durable transactions and regular + transactions. + - Create and submit durable transactions. + - Navigate edge cases that can happen when dealing with durable transactions. +description: "Use durable nonces to sign transactions ahead of time." +--- + +## Summary + +- Durable transactions have no expiration date unlike regular transactions that + have an expiration date of 150 blocks (~80-90 seconds). +- After signing a durable transaction you can store it in a database or a file + or send it to another device to submit it later. +- A durable transaction is created using a nonce account. A nonce account holds + the authority and the nonce value which replaces the recent blockhash to make + a durable transaction +- Durable transactions must start with an `advanceNonce` instruction, and the + nonce authority must be a signer of the transaction. +- If the transaction fails for any reason other than the `advanceNonce` + instruction, the nonce will still be advanced, even though all other + instructions will be reverted. + +## Overview + +Durable Nonces are a way to bypass the expiration date of regular transactions. +To understand this, we'll start by looking at the concepts behind regular +transactions. + +In Solana, transactions have three main parts: + +1. **Instructions**: Instructions are the operations you want to perform on the + blockchain, like transferring tokens, creating accounts, or calling a + program. These are executed in order. + +2. **Signatures**: Signatures are proof that the transaction was made by someone + with the signer's private key - which should usually be the signer + themselves. For instance, if you are transferring SOL from your wallet to + another, you'll need to sign the transaction so the network can verify that + the transaction is valid. + +3. **Recent Blockhash**: The recent blockhash is a unique identifier for each + transaction. It is used to prevent replay attacks, where an attacker records + a transaction and then tries to submit it again. The recent blockhash ensures + that each transaction is unique and can only be submitted once. A recent + blockhash is only valid for 150 blocks. + +In durable transactions, the first two concepts remain the same. Durable +transactions are possible by playing with recent blockhashes. + +Let's dive deep into the recent blockhash, to understand the blockhash better +let's look at the problem that it tries to solve, the +[double-spend](/developers/guides/advanced/introduction-to-durable-nonces#double-spend) +problem. + +Imagine you're buying an NFT on MagicEden or Tensor. You must sign a transaction +that allows the marketplace's program to extract some SOL from your wallet. +After signing the transaction the marketplace will submit it to the network. If +the marketplace submits it again, without checks, you could be charged twice. + +This is known as the double-spend problem and is one of the core issues that +blockchains, like Solana, solve. A naive solution could be to crosscheck all +transactions made in the past and see if we find a duplicate transaction +signature. This is not practically possible, as the size of the Solana ledger +is >80 TB. So to solve this, Solana uses recent blockhashes. + +A recent blockhash is a 32-byte SHA-256 hash of a valid block's last +[entry id](/docs/terminology#blockhash) within the last 150 +blocks. Since this recent blockhash was part of the transaction before it was +signed, we can guarantee the signer has signed it within the last 150 blocks. +Checking 150 blocks is much more reasonable than the entire ledger. + +When the transaction is submitted, the Solana validators will do the following: + +1. Check if the signature of the transaction has been submitted within the last + 150 blocks - if there is a duplicate signature it'll fail the duplicate + transaction. +2. If the transaction signature has not been found, it will check the recent + blockhash to see if it exists within the last 150 blocks - if it does not, it + will return a "Blockhash not found" error. If it does, the transaction goes + through its execution checks. + +While this solution is great for most use cases, it has some limitations. +Mainly, the transaction needs to get signed and submitted to the network within +150 blocks or around ~80-90 seconds. But there are some use cases where we need +more than 90 seconds to submit a transaction. + +From the +[Durable Nonce guide](/developers/guides/advanced/introduction-to-durable-nonces#durable-nonce-applications): + +> 1. **Scheduled Transactions**: One of the most apparent applications of +> Durable Nonces is the ability to schedule transactions. Users can pre-sign +> a transaction and then submit it at a later date, allowing for planned +> transfers, contract interactions, or even executing pre-determined +> investment strategies. +> 2. **Multisig Wallets**: Durable Nonces are very useful for multi-signature +> wallets where one party signs a transaction, and others may confirm it at a +> later time. This feature enables the proposal, review, and later execution +> of a transaction within a trustless system. +> 3. **Programs Requiring Future Interaction**: If a program on Solana requires +> interaction at a future point (such as a vesting contract or a timed +> release of funds), a transaction can be pre-signed using a Durable Nonce. +> This ensures the contract interaction happens at the correct time without +> necessitating the presence of the transaction creator. +> 4. **Cross-chain Interactions**: When you need to interact with another +> blockchain and it requires waiting for confirmations, you can sign the +> transaction with a Durable Nonce and execute it once the required +> confirmations are received. +> 5. **Decentralized Derivatives Platforms**: In a decentralized derivatives +> platform, complex transactions might need to be executed based on specific +> triggers. With Durable Nonces, these transactions can be pre-signed and +> executed when the trigger condition is met. + +### Considerations + +### Considerations + +Durable transactions should be treated with care, which is why users should +always trust the transactions they sign. + +As developers, it's important to inform users that their durable nonce +transactions may be flagged by wallets. Durable nonces are often used for +malicious transactions, and understanding the risks can help users make informed +decisions. + +For example, imagine a user blindly signed a malicious durable transaction. This +transaction could sign away 500 SOL to an attacker and change the nonce +authority to the attacker as well. Even if the user doesn't have this amount +yet, the attacker could wait to cash this check as soon as the user's balance +exceeds 500 SOL. The user would have no recollection of what they clicked on, +and the transaction could remain dormant for days, weeks, or years. + +To mitigate these risks, developers should educate users on the following +points: + +1. **Trust the Source**: Users should only sign transactions from trusted + sources. Encourage users to verify the origin of the transaction before + signing. +2. **Use Hot Wallets Cautiously**: Users should only keep in hot wallets what + they're willing to lose. Hot wallets are more susceptible to attacks, so it's + wise to limit the amount of funds stored in them. +3. **Protect Cold Wallets**: Users should avoid signing transactions with their + cold wallets unless absolutely necessary. Cold wallets are more secure and + should be used to store larger amounts of funds. +4. **Monitor Transactions**: Encourage users to regularly monitor their + transaction history and account balances. Promptly reporting any suspicious + activity can help mitigate potential losses. + +By providing this information, developers can help users understand the +potential dangers of durable nonce transactions and take appropriate +precautions. This is not meant to provoke hysteria but serves to show what's +possible and emphasize the importance of security in handling durable +transactions. + +### Using Durable Nonces to Overcome the Short Lifespan of Regular Transactions + +Durable nonces are a way to sign transactions offchain and keep them in storage +until they are ready to be submitted to the network. This allows us to create +durable transactions. + +Durable nonces, which are 32 bytes in length (usually represented as base58 +encoded strings), are used in place of recent blockhashes to make each +transaction unique (to avoid double-spending) while removing the expiration of +the unexecuted transactions. + +If nonces are used in place of recent blockhashes, the first instruction of the +transaction needs to be a `nonceAdvance` instruction, which changes or advances +the nonce. This ensures that every transaction which is signed using the nonce +as the recent blockhash will be unique. + +It is important to note that durable nonces require +[unique mechanisms within Solana](https://docs.anza.xyz/implemented-proposals/durable-tx-nonces) +to function, thus they have some special rules that don't apply normally. We'll +see this as we deep dive into the technicals. + +### Durable Nonces In-Depth + +Durable transactions differ from regular transactions in the following ways: + +1. Durable Nonces replace the recent blockhash with a nonce. This nonce is + stored in a nonce account and will be used only once in one transaction. The + nonce is a unique blockhash. +2. Each durable transaction must start with the `nonceAdvance` instruction, + which will change the nonce in the nonce account. This ensures that the nonce + is unique and cannot be reused in another transaction. + +The nonce account is an account that holds the following values: + +1. nonce value: the nonce value that will be used in the transaction. +2. authority: the public key that can change the nonce value. +3. fee calculator: the fee calculator for the transaction. + +Again, every durable transaction must start with the `nonceAdvance` instruction, +and the `authority` must be a signer. + +Lastly, there is a special rule - if a durable transaction fails because of any +instruction other than the `nonceAdvance` instruction, the nonce will still +advance, while the rest of the transaction is rolled back. This behavior is +unique only to durable nonces. + +### Durable Nonce Operations + +Durable nonces have a few helpers and constants in the `@solana/web3.js` +package: + +1. `SystemProgram.nonceInitialize`: This instruction creates a new nonce + account. +2. `SystemProgram.nonceAdvance`: This instruction changes the Nonce in the nonce + account. +3. `SystemProgram.nonceWithdraw`: This instruction withdraws funds from the + nonce account. To delete the nonce account, withdraw all the funds from it. +4. `SystemProgram.nonceAuthorize`: This instruction changes the authority of the + nonce account. +5. `NONCE_ACCOUNT_LENGTH`: A constant that represents the length of the nonce + account data. +6. `NonceAccount`: A class that represents the nonce account. It contains a + static function `fromAccountData` that can take the nonce account data and + return a nonce account object. + +Let's look into each one of the helper functions in detail. + +#### `nonceInitialize` + +The `nonceInitialize` instruction is used to create a new nonce account. It +takes two parameters: + +1. `noncePubkey`: the public key of the nonce account. +2. `authorizedPubkey`: the public key of the authority of the nonce account. + +Here is a code example for it: + +```typescript +// 1. Generate/get a keypair for the nonce account, and the authority. +const [nonceKeypair, nonceAuthority] = makeKeypairs(2); // from '@solana-developers/helpers' +// Calculate the minimum balance required for rent exemption +const rentExemptBalance = + await connection.getMinimumBalanceForRentExemption(NONCE_ACCOUNT_LENGTH); + +const tx = new Transaction().add( + // 2. Allocate the account and transfer funds to it (the rent-exempt balance) + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: nonceKeypair.publicKey, + lamports: rentExemptBalance, + space: NONCE_ACCOUNT_LENGTH, + programId: SystemProgram.programId, + }), + // 3. Initialize the nonce account using the `SystemProgram.nonceInitialize` instruction. + SystemProgram.nonceInitialize({ + noncePubkey: nonceKeypair.publicKey, + authorizedPubkey: nonceAuthority.publicKey, + }), +); + +// Send the transaction +await sendAndConfirmTransaction(connection, tx, [payer, nonceKeypair]); +``` + +The system program will take care of setting the nonce value for us inside the +nonce account. + +#### `nonceAdvance` + +This instruction is used to change the nonce value in the nonce account, it +takes two parameters: + +1. `noncePubkey`: the public key of the nonce account. +2. `authorizedPubkey`: the public key of the authority of the nonce account. + +Here is a code example for it: + +```typescript +const instruction = SystemProgram.nonceAdvance({ + authorizedPubkey: nonceAuthority.publicKey, + noncePubkey: nonceKeypair.publicKey, +}); +``` + +You will see this instruction as the first instruction in any durable +transaction. But that doesn't mean that you only have to use it as the first +instruction of a durable transaction. You can always call this function, and it +will automatically invalidate any durable transaction tied to its previous nonce +value. + +#### `nonceWithdraw` + +This instruction is used to withdraw the funds from the nonce account, it takes +four parameters: + +1. `noncePubkey`: the public key of the nonce account. +2. `toPubkey`: the public key of the account that will receive the funds. +3. `lamports`: the amount of lamports that will be withdrawn. +4. `authorizedPubkey`: the public key of the authority of the nonce account. + +Here is a code example for it: + +```typescript +const instruction = SystemProgram.nonceWithdraw({ + noncePubkey: nonceKeypair.publicKey, + toPubkey: payer.publicKey, + lamports: amount, + authorizedPubkey: nonceAuthority.publicKey, +}); +``` + +You can also use this instruction to close the nonce account by withdrawing all +the funds in it. + +#### `nonceAuthorize` + +This instruction is used to change the authority of the nonce account, it takes +three parameters: + +1. `noncePubkey`: the public key of the nonce account. +2. `authorizedPubkey`: the public key of the current authority of the nonce + account. +3. `newAuthorizedPubkey`: the public key of the new authority of the nonce + account. + +Here is a code example for it: + +```typescript +const instruction = SystemProgram.nonceAuthorize({ + noncePubkey: nonceKeypair.publicKey, + authorizedPubkey: nonceAuthority.publicKey, + newAuthorizedPubkey: newAuthority.publicKey, +}); +``` + +### How to use the durable nonces + +Now that we learned about the nonce account and its different operations, let's +talk about how to use it. + +We'll discuss: + +1. Fetching the nonce account +2. Using the nonce in the transaction to make a durable transaction. +3. Submitting a durable transaction. + +#### Fetching the nonce account + +We can fetch the nonce account to get the nonce value by fetching the account +and serializing it: + +```typescript +const nonceAccount = await connection.getAccountInfo(nonceKeypair.publicKey); + +const nonce = NonceAccount.fromAccountData(nonceAccount.data); +``` + +#### Using the nonce in the transaction to make a durable transaction + +To build a fully functioning durable transaction, we need the following: + +1. Use the nonce value in replacement of the recent blockhash. +2. Add the nonceAdvance instruction as the first instruction in the transaction. +3. Sign the transaction with the authority of the nonce account. + +After building and signing the transaction we can serialize it and encode it +into a base58 string, and we can save this string in some store to submit it +later. + +```typescript +// Assemble the durable transaction +const durableTx = new Transaction(); +durableTx.feePayer = payer.publicKey; + +// use the nonceAccount's stored nonce as the recentBlockhash +durableTx.recentBlockhash = nonceAccount.nonce; + +// make a nonce advance instruction +durableTx.add( + SystemProgram.nonceAdvance({ + authorizedPubkey: nonceAuthority.publicKey, + noncePubkey: nonceKeypair.publicKey, + }), +); + +// Add any instructions you want to the transaction in this case we are just doing a transfer +durableTx.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient.publicKey, + lamports: 0.1 * LAMPORTS_PER_SOL, + }), +); + +// sign the tx with the nonce authority's keypair +durableTx.sign(payer, nonceAuthority); + +// once you have the signed tx, you can serialize it and store it in a database, or send it to another device. +// You can submit it at a later point. +const serializedTx = base58.encode( + durableTx.serialize({ requireAllSignatures: false }), +); +``` + +#### submitting a durable transaction: + +Now that we have a base58 encoded transaction, we can decode it and submit it: + +```typescript +const tx = base58.decode(serializedTx); +const sig = await sendAndConfirmRawTransaction(connection, tx as Buffer); +``` + +### Some important edge cases + +There are a few things that you need to consider when dealing with durable +transactions: + +1. If the transaction fails due to an instruction other than the nonce advanced + instruction. +2. If the transaction fails due to the nonce advanced instruction. + +#### If the transaction fails due to an instruction other than the nonce advanced instruction + +In the normal case of failing transactions, the known behavior is that all the +instructions in the transaction will get reverted to the original state. But in +the case of a durable transaction, if any instruction fails that is not the +advance nonce instruction, the nonce will still get advanced and all other +instructions will get reverted. This feature is designed for security purposes, +ensuring that once a user signs a transaction, if it fails, it cannot be used +again. + +Presigned, never expiring, durable transactions are like signed paychecks. They +can be dangerous in the right scenarios. This extra safety feature effectively +"voids" the paycheck if handled incorrectly. + +#### If the transaction fails due to the nonce advanced instruction + +If a transaction fails because of the advance instruction, the entire +transaction is reverted, meaning the nonce does not advance. + +## Lab + +In this lab, we'll learn how to create a durable transaction. We'll focus on +what you can and can't do with it. Additionally, we'll discuss some edge cases +and how to handle them. + +### 0. Getting started + +Let's go ahead and clone our starter code + +```bash +git clone https://github.com/Unboxed-Software/solana-lab-durable-nonces +cd Solana-lab-durable-nonces +git checkout starter +npm install +``` + +In the starter code, you will find a file inside `test/index.ts`, with a testing +skeleton, we'll write all of our code here. + +We're going to use the local validator for this lab. However, feel free to use +devnet if you'd like. ( If you have issues airdropping on devnet, check out +[Solana's Faucet](https://faucet.solana.com/) ) + +To run the local validator, you'll need to have it installed, if you don't you +can refer to [installing the Solana CLI](/docs/intro/installation), once you +install the CLI you'll have access to the `solana-test-validator`. + +In a separate terminal run: + +```bash +solana-test-validator +``` + +In `test/index.ts` you'll see five tests, these will help us understand durable +nonces better. + +We'll discuss each test case in depth. + +### 1. Create the nonce account + +Before we write any tests, let's create a helper function above the `describe` +block called `createNonceAccount`. + +It will take the following parameters: + +- `Connection`: Connection to use +- `payer`: The payer +- `nonceKeypair`: The nonce keypair +- `authority`: Authority over the nonce + +It will: + +1. Assemble and submit a transaction that will: + 1. Allocate the account that will be the nonce account. + 2. Initialize the nonce account using the `SystemProgram.nonceInitialize` + instruction. +2. Fetch the nonce account. +3. Serialize the nonce account data and return it. + +Paste the following somewhere above the `describe` block. + +```typescript +async function createNonceAccount( + connection: Connection, + payer: Keypair, + nonceKeypair: Keypair, + authority: PublicKey, +) { + const rentExemptBalance = + await connection.getMinimumBalanceForRentExemption(NONCE_ACCOUNT_LENGTH); + // 2. Assemble and submit a transaction that will: + const tx = new Transaction().add( + // 2.1. Allocate the account that will be the nonce account. + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: nonceKeypair.publicKey, + lamports: rentExemptBalance, + space: NONCE_ACCOUNT_LENGTH, + programId: SystemProgram.programId, + }), + // 2.2. Initialize the nonce account using the `SystemProgram.nonceInitialize` instruction. + SystemProgram.nonceInitialize({ + noncePubkey: nonceKeypair.publicKey, + authorizedPubkey: authority, + }), + ); + + const sig = await sendAndConfirmTransaction(connection, tx, [ + payer, + nonceKeypair, + ]); + console.log("Creating Nonce TX:", getExplorerLink("tx", sig, "localnet")); + + // 3. Fetch the nonce account. + const accountInfo = await connection.getAccountInfo(nonceKeypair.publicKey); + // 4. Serialize the nonce account data and return it. + return NonceAccount.fromAccountData(accountInfo!.data); +} +``` + +### 2. Test: Create and submit a durable transaction + +To create and submit a durable transaction we must follow these steps: + +1. Create a Durable Transaction. +1. Create the nonce account. +1. Create a new transaction. +1. Set the `recentBlockhash` to be the nonce value. +1. Add the `nonceAdvance` instruction as the first instruction in the + transaction. +1. Add the transfer instruction (you can add any instruction you want here). +1. Sign the transaction with the keypairs that need to sign it, and make sure to + add the nonce authority as a signer as well. +1. Serialize the transaction and encode it. +1. At this point you have a durable transaction, you can store it in a database + or a file or send it somewhere else, etc. +1. Submit the durable transaction. +1. Decode the serialized transaction. +1. Submit it using the `sendAndConfirmRawTransaction` function. + +We can put all of this together in our first test: + +```typescript +it("Creates a durable transaction and submits it", async () => { + // Step 1: Initialize the payer + const payer = await initializeKeypair(connection, { + airdropAmount: AIRDROP_AMOUNT, + minimumBalance: MINIMUM_BALANCE, + }); + + // Step 1.1: Create keypairs for nonce account and recipient + const [nonceKeypair, recipient] = makeKeypairs(2); + + // Step 1.2: Create the nonce account + const nonceAccount = await createNonceAccount( + connection, + payer, + nonceKeypair, + payer.publicKey, + ); + + // Step 1.3: Create a new transaction + const durableTx = new Transaction(); + durableTx.feePayer = payer.publicKey; + + // Step 1.4: Set the recentBlockhash to the nonce value from the nonce account + durableTx.recentBlockhash = nonceAccount.nonce; + + // Step 1.5: Add the `nonceAdvance` instruction as the first instruction + durableTx.add( + SystemProgram.nonceAdvance({ + authorizedPubkey: payer.publicKey, + noncePubkey: nonceKeypair.publicKey, + }), + ); + + // Step 1.6: Add the transfer instruction + durableTx.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient.publicKey, + lamports: TRANSFER_AMOUNT, + }), + ); + + // Step 1.7: Sign the transaction with the payer's keypair + await durableTx.partialSign(payer); + + // Step 1.8: Serialize the transaction (base64 encoding for easier handling) + const serializedTx = durableTx + .serialize({ requireAllSignatures: false }) + .toString("base64"); + + // Step 1.9: At this point, you can store the durable transaction for future use. + // ------------------------------------------------------------------ + + // Step 2: Submit the durable transaction + + // Step 2.1: Decode the serialized transaction + const tx = Buffer.from(serializedTx, "base64"); + + // Step 2.2: Submit the transaction using `sendAndConfirmRawTransaction` + const sig = await sendAndConfirmRawTransaction(connection, tx, { + skipPreflight: true, + }); + + // Step 2.3: Generate and log the explorer link using `getExplorerLink` + console.log("Transaction Signature:", getExplorerLink("tx", sig, "localnet")); +}); +``` + +### 3. Test: Transaction fails if the nonce has advanced + +Because we are using the nonce in place of the recent blockhash, the system will +check to ensure that the nonce we provided matches the nonce in the +`nonce_account`. Additionally with each transaction, we need to add the +`nonceAdvance` instruction is the first instruction. This ensures that if the +transaction goes through, the nonce will change, and no one will be able to +submit it twice. + +Here is what we'll test: + +1. Create a durable transaction just like in the previous step. +2. Advance the nonce. +3. Try to submit the transaction, and it should fail. + +```typescript +it("Fails if the nonce has advanced", async () => { + try { + const payer = await initializeKeypair(connection, { + airdropAmount: AIRDROP_AMOUNT, + minimumBalance: MINIMUM_BALANCE, + }); + + const [nonceKeypair, nonceAuthority, recipient] = makeKeypairs(3); + + // Step 1: Create a Durable Transaction + const nonceAccount = await createNonceAccount( + connection, + payer, + nonceKeypair, + nonceAuthority.publicKey, + ); + + const durableTransaction = new Transaction(); + durableTransaction.feePayer = payer.publicKey; + durableTransaction.recentBlockhash = nonceAccount.nonce; + + // Add a nonce advance instruction + durableTransaction.add( + SystemProgram.nonceAdvance({ + authorizedPubkey: nonceAuthority.publicKey, + noncePubkey: nonceKeypair.publicKey, + }), + ); + + // Add a transfer instruction + durableTransaction.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient.publicKey, + lamports: TRANSFER_AMOUNT, + }), + ); + + // Sign the transaction with both the payer and nonce authority's keypairs + await durableTransaction.partialSign(payer, nonceAuthority); + + // Serialize the transaction (in base64 format for simplicity) + const serializedTransaction = durableTransaction + .serialize({ requireAllSignatures: false }) + .toString("base64"); + + // Step 2: Advance the nonce + const nonceAdvanceSignature = await sendAndConfirmTransaction( + connection, + new Transaction().add( + SystemProgram.nonceAdvance({ + noncePubkey: nonceKeypair.publicKey, + authorizedPubkey: nonceAuthority.publicKey, + }), + ), + [payer, nonceAuthority], + ); + + // Using getExplorerLink from solana-helpers + console.log( + "Nonce Advance Signature:", + getExplorerLink("tx", nonceAdvanceSignature, "localnet"), + ); + + // Deserialize the transaction + const deserializedTransaction = Buffer.from( + serializedTransaction, + "base64", + ); + + // Step 3: Try to submit the transaction, expecting it to fail due to nonce advancement + await assert.rejects( + sendAndConfirmRawTransaction(connection, deserializedTransaction), + ); + } catch (error) { + console.error("Test failed:", error); + throw error; + } +}); +``` + +### 4. Test: Nonce account advances even if the transaction fails + +An important edge case to be aware of is that even if a transaction fails for +any reason other than the nonce advance instruction, the nonce will still +advance. This feature is designed for security purposes, ensuring that once a +user signs a transaction and it fails, that durable transaction cannot be used +again. + +The following code demonstrates this use case. We'll attempt to create a durable +transaction to transfer 50 SOL from the payer to the recipient. However, the +payer doesn't have enough SOL for the transfer, so the transaction will fail, +but the nonce will still advance. + +```typescript +it("Advances the nonce account even if the transaction fails", async () => { + const TRANSFER_AMOUNT = 50; + const payer = await initializeKeypair(connection, { + airdropAmount: 3 * LAMPORTS_PER_SOL, + minimumBalance: 1 * LAMPORTS_PER_SOL, + }); + + // Generate keypairs for nonce account, nonce authority, and recipient + const [nonceKeypair, nonceAuthority, recipient] = makeKeypairs(3); + + // Step 1: Create the nonce account + const nonceAccount = await createNonceAccount( + connection, + payer, + nonceKeypair, + nonceAuthority.publicKey, + ); + const nonceBeforeAdvancing = nonceAccount.nonce; + + console.log("Nonce Before Advancing:", nonceBeforeAdvancing); + + // Step 2: Check payer's balance to ensure it doesn't have enough to transfer + const balance = await connection.getBalance(payer.publicKey); + + // Ensure the balance is less than the transfer amount (50 SOL) + assert( + balance < TRANSFER_AMOUNT * LAMPORTS_PER_SOL, + `Balance too high! Adjust 'TRANSFER_AMOUNT' to be higher than the current balance of ${balance / LAMPORTS_PER_SOL} SOL.`, + ); + + // Step 3: Create a durable transaction that will fail + const durableTx = new Transaction(); + durableTx.feePayer = payer.publicKey; + + // Set the recent blockhash to the nonce value from the nonce account + durableTx.recentBlockhash = nonceAccount.nonce; + + // Step 4: Add the nonce advance instruction as the first instruction + durableTx.add( + SystemProgram.nonceAdvance({ + authorizedPubkey: nonceAuthority.publicKey, + noncePubkey: nonceKeypair.publicKey, + }), + ); + + // Step 5: Add a transfer instruction that will fail (since the payer has insufficient funds) + durableTx.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient.publicKey, + lamports: TRANSFER_AMOUNT * LAMPORTS_PER_SOL, + }), + ); + + // Step 6: Sign the transaction with both the payer and nonce authority + durableTx.sign(payer, nonceAuthority); + + // Serialize the transaction and store or send it (if needed) + const serializedTx = base58.encode( + durableTx.serialize({ requireAllSignatures: false }), + ); + + const tx = base58.decode(serializedTx); + + // Step 7: Send the transaction and expect it to fail (due to insufficient funds) + await assert.rejects( + sendAndConfirmRawTransaction(connection, tx as Buffer, { + skipPreflight: true, // Ensure the transaction reaches the network despite the expected failure + }), + ); + + // Step 8: Fetch the nonce account again after the failed transaction + const nonceAccountAfterAdvancing = await connection.getAccountInfo( + nonceKeypair.publicKey, + ); + const nonceAfterAdvancing = NonceAccount.fromAccountData( + nonceAccountAfterAdvancing!.data, + ).nonce; + + // Step 9: Verify that the nonce has advanced even though the transaction failed + assert.notEqual(nonceBeforeAdvancing, nonceAfterAdvancing); +}); +``` + +Notice that we are setting `skipPreflight: true` in the +`sendAndConfirmRawTransaction` function. This step is crucial because, without +it, the transaction would never reach the network. Instead, the library would +reject it and throw an error, leading to a failure where the nonce does not +advance. + +However, this is not the whole story. In the upcoming test case, we'll discover +a scenario where even if the transaction fails, the nonce will not advance. + +### 5. Test: Nonce account will not advance if the transaction fails because of the nonce advance instruction + +For the nonce to advance, the `advanceNonce` instruction must succeed. Thus, if +the transaction fails for any reason related to this instruction, the nonce will +not advance. + +A well-formatted `nonceAdvance` instruction will only ever fail if the nonce +authority did not sign the transaction. + +Let's see this in action. + +```typescript +it("The nonce account will not advance if the transaction fails because the nonce authority did not sign the transaction", async () => { + // Step 1: Initialize payer with SOL airdrop + const payer = await initializeKeypair(connection, { + airdropAmount: 3 * LAMPORTS_PER_SOL, + minimumBalance: 1 * LAMPORTS_PER_SOL, + }); + + // Step 2: Generate keypairs for nonce account, nonce authority, and recipient + const [nonceKeypair, nonceAuthority, recipient] = makeKeypairs(3); + + // Step 3: Create the nonce account + const nonceAccount = await createNonceAccount( + connection, + payer, + nonceKeypair, + nonceAuthority.publicKey, + ); + const nonceBeforeAdvancing = nonceAccount.nonce; + + console.log("Nonce before submitting:", nonceBeforeAdvancing); + + // Step 4: Create a durable transaction that will fail (due to missing nonce authority signature) + const durableTx = new Transaction(); + durableTx.feePayer = payer.publicKey; + + // Use the nonce account's stored nonce as the recent blockhash + durableTx.recentBlockhash = nonceAccount.nonce; + + // Add nonce advance instruction + durableTx.add( + SystemProgram.nonceAdvance({ + authorizedPubkey: nonceAuthority.publicKey, + noncePubkey: nonceKeypair.publicKey, + }), + ); + + // Add transfer instruction + durableTx.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient.publicKey, + lamports: 0.1 * LAMPORTS_PER_SOL, + }), + ); + + // Sign the transaction only with the payer, omitting nonce authority signature (this will cause the failure) + durableTx.sign(payer); + + // Step 5: Serialize the transaction + const serializedTx = base58.encode( + durableTx.serialize({ requireAllSignatures: false }), + ); + + // Decode the serialized transaction + const tx = base58.decode(serializedTx); + + // Step 6: Send the transaction and expect it to fail (due to missing nonce authority signature) + await assert.rejects( + sendAndConfirmRawTransaction(connection, tx as Buffer, { + skipPreflight: true, // Ensure the transaction reaches the network despite the expected failure + }), + ); + + // Step 7: Fetch the nonce account again after the failed transaction + const nonceAccountAfterAdvancing = await connection.getAccountInfo( + nonceKeypair.publicKey, + ); + const nonceAfterAdvancing = NonceAccount.fromAccountData( + nonceAccountAfterAdvancing!.data, + ).nonce; + + // Step 8: Verify that the nonce has not advanced, as the failure was due to the nonce advance instruction + assert.equal(nonceBeforeAdvancing, nonceAfterAdvancing); +}); +``` + +### 6. Test sign transaction and then change nonce authority + +The last test case we'll go over is creating a durable transaction. Try to send +it with the wrong nonce authority (it will fail). Change the nonce authority and +send it with the correct one this time and it will succeed. + +```typescript +it("Submits after changing the nonce authority to an already signed address", async () => { + try { + // Step 1: Initialize payer with an airdrop + const payer = await initializeKeypair(connection, { + airdropAmount: AIRDROP_AMOUNT, + minimumBalance: MINIMUM_BALANCE, + }); + + // Step 2: Generate keypairs for nonce account, nonce authority, and recipient + const [nonceKeypair, nonceAuthority, recipient] = makeKeypairs(3); + + // Step 3: Create the nonce account + const nonceAccount = await createNonceAccount( + connection, + payer, + nonceKeypair, + nonceAuthority.publicKey, + ); + const nonceBeforeAdvancing = nonceAccount.nonce; + + console.log("Nonce before submitting:", nonceBeforeAdvancing); + + // Step 4: Create a durable transaction that will initially fail + const durableTransaction = new Transaction(); + durableTransaction.feePayer = payer.publicKey; + + // Use the nonceAccount's stored nonce as the recent blockhash + durableTransaction.recentBlockhash = nonceAccount.nonce; + + // Add nonce advance instruction + durableTransaction.add( + SystemProgram.nonceAdvance({ + authorizedPubkey: payer.publicKey, // should be nonce authority, will fail + noncePubkey: nonceKeypair.publicKey, + }), + ); + + // Add a transfer instruction + durableTransaction.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient.publicKey, + lamports: TRANSACTION_LAMPORTS, + }), + ); + + // Sign the transaction with the payer + durableTransaction.sign(payer); + + // Step 5: Serialize and store the transaction + const serializedTransaction = base58.encode( + durableTransaction.serialize({ requireAllSignatures: false }), + ); + + const deserializedTx = base58.decode(serializedTransaction); + + // Step 6: Attempt to send the transaction, expect it to fail (due to incorrect authority) + await assert.rejects( + sendAndConfirmRawTransaction(connection, deserializedTx as Buffer, { + skipPreflight: true, // Ensures the transaction hits the network despite failure + }), + ); + + // Step 7: Verify that the nonce did not advance after the failed transaction + const nonceAccountAfterAdvancing = await connection.getAccountInfo( + nonceKeypair.publicKey, + ); + const nonceAfterAdvancing = NonceAccount.fromAccountData( + nonceAccountAfterAdvancing!.data, + ).nonce; + assert.equal(nonceBeforeAdvancing, nonceAfterAdvancing); + + // Step 8: Change the nonce authority to the payer + const nonceAuthSignature = await sendAndConfirmTransaction( + connection, + new Transaction().add( + SystemProgram.nonceAuthorize({ + noncePubkey: nonceKeypair.publicKey, + authorizedPubkey: nonceAuthority.publicKey, + newAuthorizedPubkey: payer.publicKey, // changing authority to payer + }), + ), + [payer, nonceAuthority], + ); + + console.log( + "Nonce Auth Signature:", + getExplorerLink("tx", nonceAuthSignature, "localnet"), + ); + + // Step 9: Submit the transaction again, which should now succeed + const transactionSignature = await sendAndConfirmRawTransaction( + connection, + deserializedTx as Buffer, + { + skipPreflight: true, // Ensures submission without preflight checks + }, + ); + + console.log( + "Transaction Signature:", + getExplorerLink("tx", transactionSignature, "localnet"), + ); + } catch (error) { + console.error("Test failed:", error); + throw error; + } +}); +``` + +### 8. Run the tests + +Finally, let's run the tests: + +```bash +npm start +``` + +Ensure that all tests pass successfully. + +For your reference, here is a screenshot showing the successful execution of the +tests: + +![image](https://github.com/user-attachments/assets/03b2396a-f146-49e2-872b-6a657a209cd4) + +If you see this result, it means your durable nonce implementation is correct! + +Congratulations! You now know how durable nonces work! + +## Challenge + +Write a program that creates a durable transaction and saves it to a file, then +create a separate program that reads the durable transaction file and sends it +to the network. diff --git a/content/courses/offline-transactions/index.mdx b/content/courses/offline-transactions/index.mdx new file mode 100644 index 000000000..86b94bf8d --- /dev/null +++ b/content/courses/offline-transactions/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Offline transactions +description: Use durable nonces to sign transactions ahead of time. +--- diff --git a/content/courses/offline-transactions/meta.json b/content/courses/offline-transactions/meta.json new file mode 100644 index 000000000..d304a57aa --- /dev/null +++ b/content/courses/offline-transactions/meta.json @@ -0,0 +1,3 @@ +{ + "pages": ["durable-nonces"] +} diff --git a/content/courses/onchain-development/anchor-cpi.mdx b/content/courses/onchain-development/anchor-cpi.mdx new file mode 100644 index 000000000..126977e65 --- /dev/null +++ b/content/courses/onchain-development/anchor-cpi.mdx @@ -0,0 +1,774 @@ +--- +title: Anchor CPIs and Errors +objectives: + - Make Cross Program Invocations (CPIs) from an Anchor program + - Use the `cpi` feature to generate helper functions for invoking instructions + on existing Anchor programs + - Use `invoke` and `invoke_signed` to make CPIs where CPI helper functions are + unavailable + - Create and return custom Anchor errors +description: "Invoke other Solana programs from your Anchor app. " +--- + +## Summary + +- Anchor provides a simplified way to create CPIs using a **`CpiContext`** +- Anchor's **`cpi`** feature generates CPI helper functions for invoking + instructions on existing Anchor programs +- If you do not have access to CPI helper functions, you can still use `invoke` + and `invoke_signed` directly +- The **`error_code`** attribute macro is used to create custom Anchor Errors + +## Lesson + +Anchor makes invoking other Solana programs easier, especially if the program +you're invoking is also an Anchor program whose crate you can access. + +In this lesson, you'll learn how to construct an Anchor CPI. You'll also learn +how to throw custom errors from an Anchor program so that you can start to write +more sophisticated Anchor programs. + +### Cross Program Invocations (CPIs) with Anchor + +CPIs allow programs to invoke instructions on other programs using the `invoke` +or `invoke_signed` functions. This allows new programs to build on top of +existing programs (we call that composability). + +While making CPIs directly using `invoke` or `invoke_signed` is still an option, +Anchor also provides a simplified way to make CPIs by using a `CpiContext`. + +In this lesson, you'll use the `anchor_spl` crate to make CPIs to the SPL Token +Program. You can +[explore what's available in the `anchor_spl` crate](https://docs.rs/anchor-spl/latest/anchor_spl/#). + +#### `CpiContext` + +The first step in making a CPI is to create an instance of `CpiContext`. +`CpiContext` is very similar to `Context`, the first argument type required by +Anchor instruction functions. They are both declared in the same module and +share similar functionality. + +The `CpiContext` type specifies non-argument inputs for cross program +invocations: + +- `accounts` - the list of accounts required for the instruction being invoked +- `remaining_accounts` - accounts that are not part of this instruction but may + be used elsewhere (for example, by inner instructions) +- `program` - the program ID of the program being invoked +- `signer_seeds` - if one or more PDAs are signing the transaction, the seeds + required to derive the PDAs + +```rust +pub struct CpiContext<'a, 'b, 'c, 'info, T> +where + T: ToAccountMetas + ToAccountInfos<'info>, +{ + pub accounts: T, + pub remaining_accounts: Vec>, + pub program: AccountInfo<'info>, + pub signer_seeds: &'a [&'b [&'c [u8]]], +} +``` + +You use `CpiContext::new` to construct a new instance when passing along the +original transaction signature. + +```rust +CpiContext::new(cpi_program, cpi_accounts) +``` + +```rust +pub fn new( + program: AccountInfo<'info>, + accounts: T + ) -> Self { + Self { + accounts, + program, + remaining_accounts: Vec::new(), + signer_seeds: &[], + } +} +``` + +You use `CpiContext::new_with_signer` to construct a new instance when signing +on behalf of a PDA for the CPI. + +```rust +CpiContext::new_with_signer(cpi_program, cpi_accounts, seeds) +``` + +```rust +pub fn new_with_signer( + program: AccountInfo<'info>, + accounts: T, + signer_seeds: &'a [&'b [&'c [u8]]], +) -> Self { + Self { + accounts, + program, + signer_seeds, + remaining_accounts: Vec::new(), + } +} +``` + +#### CPI accounts + +One of the key features of `CpiContext` is that the `accounts` argument is +generic, allowing you to pass in any object that implements the `ToAccountMetas` +and `ToAccountInfos<'info>` traits. + +These traits are added by the `#[derive(Accounts)]` attribute macro you've used +before, to specify the accounts required by your instruction handlers. You can +use also use `#[derive(Accounts)]` structs with `CpiContext`. + +This helps with code organization and type safety. + +#### Invoke an instruction handler on another Anchor program + +When calling another Anchor program with a published crate, Anchor can generate +instruction builders and CPI helper functions for you. + +Simply declare your program's dependency on the program you're calling in your +program's `Cargo.toml` file as follows: + +``` +[dependencies] +callee = { path = "../callee", features = ["cpi"]} +``` + +By adding `features = ["cpi"]`, you enable the `cpi` feature and your program +gains access to the `callee::cpi` module. + +The `cpi` module turns `callee`'s instruction handlers into Rust functions. +These functions take a `CpiContext` and any extra data needed for the +instruction. They work just like the instruction handlers in your Anchor +programs, but use `CpiContext` instead of `Context`. The `cpi` module also +provides the account structs needed for these instruction handlers. + +For example, if `callee` has the instruction `do_something` that requires the +accounts defined in the `DoSomething` struct, you could invoke `do_something` as +follows: + +```rust +use anchor_lang::prelude::*; +use callee; +... + +#[program] +pub mod lootbox_program { + use super::*; + + pub fn call_another_program(ctx: Context, params: InitUserParams) -> Result<()> { + callee::cpi::do_something( + CpiContext::new( + ctx.accounts.callee.to_account_info(), + callee::DoSomething { + user: ctx.accounts.user.to_account_info() + } + ) + ) + Ok(()) + } +} +... +``` + +#### Invoke an instruction on a non-Anchor program + +When the program you're calling is _not_ an Anchor program, there are two +possible options: + +1. The program maintainers may have published a crate with their own helper + functions for calling into their program. For example, the `anchor_spl` crate + provides virtually identical helper functions from a call-site perspective to + what you would get with the `cpi` module of an Anchor program. E.g. you can + mint using the + [`mint_to` helper function](https://docs.rs/anchor-spl/latest/src/anchor_spl/token.rs.html#36-58) + and use the + [`MintTo` accounts struct](https://docs.rs/anchor-spl/latest/anchor_spl/token/struct.MintTo.html). + + ```rust + token::mint_to( + CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::MintTo { + mint: ctx.accounts.mint_account.to_account_info(), + to: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.mint_authority.to_account_info(), + }, + &[&[ + "mint".as_bytes(), + &[*ctx.bumps.get("mint_authority").unwrap()], + ]] + ), + amount, + )?; + ``` + +2. If there is no helper module for the program whose instruction(s) you need to + invoke, you can fall back to using `invoke` and `invoke_signed`. In fact, the + source code of the `mint_to` helper function referenced above shows an + example using `invoke_signed` when given a `CpiContext`. You can follow a + similar pattern if you use an accounts struct and `CpiContext` to organize + and prepare your CPI. + + ```rust + pub fn mint_to<'info>( + ctx: CpiContext<'_foo, '_bar, '_baz, 'info, MintTo<'info>>, + amount: u64, + ) -> Result<()> { + let instruction_handler = spl_token::instruction::mint_to( + &spl_token::ID, + ctx.accounts.mint.key, + ctx.accounts.to.key, + ctx.accounts.authority.key, + &[], + amount, + )?; + anchor_lang::solana_program::program::invoke_signed( + &instruction_handler, + &[ + ctx.accounts.to, + ctx.accounts.mint, + ctx.accounts.authority + ], + ctx.signer_seeds, + ) + .map_err(Into::into) + } + ``` + +### Throw errors in Anchor + +We're deep enough into Anchor at this point that it's essential to know how to +create custom errors. + +Ultimately, all programs return the same error type:  +[`ProgramError`](https://docs.rs/solana-program/latest/solana_program/program_error/enum.ProgramError.html). +However, when writing a program using Anchor, you can use `AnchorError` as an +abstraction on top of `ProgramError`. This abstraction provides additional +information when a program fails, including: + +- The error name and number +- Location in the code where the error was thrown +- The account that violated a constraint + +```rust +pub struct AnchorError { + pub error_name: String, + pub error_code_number: u32, + pub error_msg: String, + pub error_origin: Option, + pub compared_values: Option, +} +``` + +Anchor Errors can be divided into: + +- Anchor Internal Errors that the framework returns from inside its own code +- Custom errors that you, the developer, can create + +You can add errors unique to your program by using the `error_code` attribute. +Simply add this attribute to a custom `enum` type. You can then use the `enum` +variants as errors in your program. Additionally, you can add an error message +to each variant using the `msg` attribute. Clients can then display this error +message if the error occurs. + +```rust +#[error_code] +pub enum MyError { + #[msg("MyAccount may only hold data below 100")] + DataTooLarge +} +``` + +To return a custom error from an instruction handler you can use +the [err](https://docs.rs/anchor-lang/latest/anchor_lang/macro.err.html) or [error](https://docs.rs/anchor-lang/latest/anchor_lang/prelude/macro.error.html) +macro. These add helpful file and line information to the error that Anchor logs +to help with debugging: + +```rust +#[program] +mod hello_anchor { + use super::*; + pub fn set_data(ctx: Context, data: MyAccount) -> Result<()> { + if data.data >= 100 { + return err!(MyError::DataTooLarge); + } + ctx.accounts.my_account.set_inner(data); + Ok(()) + } +} + +#[error_code] +pub enum MyError { + #[msg("MyAccount may only hold data below 100")] + DataTooLarge +} +``` + +Alternatively, you can use +the [require](https://docs.rs/anchor-lang/latest/anchor_lang/macro.require.html) macro +to simplify returning errors. The code above can be refactored to the following: + +```rust +#[program] +mod hello_anchor { + use super::*; + pub fn set_data(ctx: Context, data: MyAccount) -> Result<()> { + require!(data.data < 100, MyError::DataTooLarge); + ctx.accounts.my_account.set_inner(data); + Ok(()) + } +} + +#[error_code] +pub enum MyError { + #[msg("MyAccount may only hold data below 100")] + DataTooLarge +} +``` + +## Lab + +Let's practice the concepts we've gone over in this lesson by building on top of +the Movie Review program from previous lessons. + +In this lab we'll update the program to mint tokens to users when they submit a +new movie review. + + + + + +### Starter + +To get started, we will be using the final state of the Anchor Movie Review +program from the previous lesson. So, if you just completed that lesson, then +you're all set and ready to go. If you are just jumping in here, no worries, you +can [download the starter code](https://github.com/Unboxed-Software/anchor-movie-review-program/tree/solution-pdas). +We'll be using the `solution-pdas` branch as our starting point. + + + + +### Add dependencies to Cargo.toml + +Before we get started we need enable the `init-if-needed` feature and add the +`anchor-spl` crate to the dependencies in `Cargo.toml`. If you need to brush up +on the `init-if-needed` feature take a look at the +[Anchor PDAs and Accounts lesson](/developers/courses/onchain-development/anchor-pdas)). + +```rust +[dependencies] +anchor-lang = { version = "0.30.1", features = ["init-if-needed"] } +anchor-spl = "0.30.1" +``` + +Since we are adding `anchor-spl` as a dependency we also need to add the +`idl-build` for it in the features section of `Cargo.toml`. This is because all +types that will be used in the `Accounts` structures that we are adding in this +lesson require the `IdlBuild` trait implementation to generate an IDL. + +```rust +[features] +# All lines remain unchanged, except for this idl-build line +idl-build = ["anchor-lang/idl-build", "anchor-spl/idl-build"] +``` + + + + +### Initialize reward token + +Next, navigate to `lib.rs` and implement the `InitializeMint` context type and +list the accounts and constraints the instruction requires. Here we initialize a +new `Mint` account using a PDA with the string "mint" as a seed. Note that we +can use the same PDA for both the address of the `Mint` account and the mint +authority. Using a PDA as the mint authority enables our program to sign for the +minting of the tokens. + +To initialize the `Mint` account, we'll need to include the `token_program`, +`rent`, and `system_program` in the list of accounts. + +```rust +#[derive(Accounts)] +pub struct InitializeMint<'info> { + #[account( + init, + seeds = ["mint".as_bytes()], + bump, + payer = user, + mint::decimals = 6, + mint::authority = user, + )] + pub mint: Account<'info, Mint>, + #[account(mut)] + pub user: Signer<'info>, + pub token_program: Program<'info, Token>, + pub rent: Sysvar<'info, Rent>, + pub system_program: Program<'info, System>, +} +``` + +There may be some constraints above that you haven't seen yet. Adding +`mint::decimals` and `mint::authority` along with `init` ensures that the +account is initialized as a new token mint with the appropriate decimals and +mint authority set. + +Now, create an instruction to initialize a new token mint. This will be the +token that is minted each time a user leaves a review. Note that we don't need +to include any custom instruction logic since the initialization can be handled +entirely through Anchor constraints. + +```rust +pub fn initialize_token_mint(_ctx: Context) -> Result<()> { + msg!("Token mint initialized"); + Ok(()) +} +``` + + + + +### Anchor Error + +Next, let's create an Anchor Error that we'll use to validate the following: + +- The `rating` passed to either the `add_movie_review` or `update_movie_review` + instruction. +- The `title` passed to the `add_movie_review` instruction. +- The `description` passed to either the `add_movie_review` or + `update_movie_review` instruction. + +```rust +#[error_code] +enum MovieReviewError { + #[msg("Rating must be between 1 and 5")] + InvalidRating, + #[msg("Movie Title too long")] + TitleTooLong, + #[msg("Movie Description too long")] + DescriptionTooLong, +} +``` + + + + +### Update add_movie_review instruction + +Now that we've done some setup, let's update the `add_movie_review` instruction +and `AddMovieReview` context type to mint tokens to the reviewer. + +Next, update the `AddMovieReview` context type to add the following accounts: + +- `token_program` - we'll be using the Token Program to mint tokens +- `mint` - the mint account for the tokens that we'll mint to users when they + add a movie review +- `token_account` - the associated token account for the aforementioned `mint` + and reviewer +- `associated_token_program` - required because we'll be using the + `associated_token` constraint on the `token_account` + +```rust +#[derive(Accounts)] +#[instruction(title: String, description: String)] +pub struct AddMovieReview<'info> { + #[account( + init, + seeds=[title.as_bytes(), initializer.key().as_ref()], + bump, + payer = initializer, + space = DISCRIMINATOR + MovieAccountState::INIT_SPACE + )] + pub movie_review: Account<'info, MovieAccountState>, + #[account(mut)] + pub initializer: Signer<'info>, + pub system_program: Program<'info, System>, + pub token_program: Program<'info, Token>, + #[account( + seeds = ["mint".as_bytes()], + bump, + mut + )] + pub mint: Account<'info, Mint>, + #[account( + init_if_needed, + payer = initializer, + associated_token::mint = mint, + associated_token::authority = initializer + )] + pub token_account: Account<'info, TokenAccount>, + pub associated_token_program: Program<'info, AssociatedToken>, +} +``` + +Again, some of the above constraints may be unfamiliar to you. The +`associated_token::mint` and `associated_token::authority` constraints along +with the `init_if_needed` constraint ensures that if the account has not already +been initialized, it will be initialized as an associated token account for the +specified mint and authority. Also, the payer for the costs related with the +account initialization will be set under the constraint `payer`. + +Next, let's update the `add_movie_review` instruction to do the following: + +- Check that `rating` is valid. If it is not a valid rating, return the + `InvalidRating` error. +- Check that `title` length is valid. If it is not a valid length, return the + `TitleTooLong` error. +- Check that `description` length is valid. If it is not a valid length, return + the `DescriptionTooLong` error. +- Make a CPI to the token program's `mint_to` instruction using the mint + authority PDA as a signer. Note that we'll mint 10 tokens to the user but need + to adjust for the mint decimals by making it `10*10^6`. + +Fortunately, we can use the `anchor_spl` crate to access helper functions and +types like `mint_to` and `MintTo` for constructing our CPI to the Token Program. +`mint_to` takes a `CpiContext` and integer as arguments, where the integer +represents the number of tokens to mint. `MintTo` can be used for the list of +accounts that the mint instruction needs. + +Update your `use` statements to include: + +```rust +use anchor_spl::token::{mint_to, MintTo, Mint, TokenAccount, Token}; +use anchor_spl::associated_token::AssociatedToken; +``` + +Next, update the `add_movie_review` function to: + +```rust +pub fn add_movie_review( + ctx: Context, + title: String, + description: String, + rating: u8 +) -> Result<()> { + // We require that the rating is between 1 and 5 + require!( + rating >= MIN_RATING && rating <= MAX_RATING, + MovieReviewError::InvalidRating + ); + + // We require that the title is not longer than 20 characters + require!( + title.len() <= MAX_TITLE_LENGTH, + MovieReviewError::TitleTooLong + ); + + // We require that the description is not longer than 50 characters + require!( + description.len() <= MAX_DESCRIPTION_LENGTH, + MovieReviewError::DescriptionTooLong + ); + + msg!("Movie review account created"); + msg!("Title: {}", title); + msg!("Description: {}", description); + msg!("Rating: {}", rating); + + let movie_review = &mut ctx.accounts.movie_review; + movie_review.reviewer = ctx.accounts.initializer.key(); + movie_review.title = title; + movie_review.description = description; + movie_review.rating = rating; + + mint_to( + CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + MintTo { + authority: ctx.accounts.initializer.to_account_info(), + to: ctx.accounts.token_account.to_account_info(), + mint: ctx.accounts.mint.to_account_info() + }, + &[&[ + "mint".as_bytes(), + &[ctx.bumps.mint] + ]] + ), + 10 * 10u64.pow(6) + )?; + + msg!("Minted tokens"); + + Ok(()) +} +``` + + + + +### Update `update_movie_review` instruction + +Here we are only adding the check that `rating` and `description` are valid. + +```rust +pub fn update_movie_review( + ctx: Context, + title: String, + description: String, + rating: u8 +) -> Result<()> { + // We require that the rating is between 1 and 5 + require!( + rating >= MIN_RATING && rating <= MAX_RATING, + MovieReviewError::InvalidRating + ); + + // We require that the description is not longer than 50 characters + require!( + description.len() <= MAX_DESCRIPTION_LENGTH, + MovieReviewError::DescriptionTooLong + ); + + msg!("Movie review account space reallocated"); + msg!("Title: {}", title); + msg!("Description: {}", description); + msg!("Rating: {}", rating); + + let movie_review = &mut ctx.accounts.movie_review; + movie_review.description = description; + movie_review.rating = rating; + + Ok(()) +} +``` + + + + +### Test + +Those are all of the changes we need to make to the program! Now, let's update +our tests. + +Start by making sure your imports and `describe` function look like this: + +```typescript +import * as anchor from "@coral-xyz/anchor" +import { Program } from "@coral-xyz/anchor" +import { expect } from "chai" +import { getAssociatedTokenAddress, getAccount } from "@solana/spl-token" +import { AnchorMovieReviewProgram } from "../target/types/anchor_movie_review_program" + +describe("anchor-movie-review-program", () => { + // Configure the client to use the local cluster. + const provider = anchor.AnchorProvider.env() + anchor.setProvider(provider) + + const program = anchor.workspace + .AnchorMovieReviewProgram as Program + + const movie = { + title: "Just a test movie", + description: "Wow what a good movie it was real great", + rating: 5, + } + + const [movie_pda] = anchor.web3.PublicKey.findProgramAddressSync( + [Buffer.from(movie.title), provider.wallet.publicKey.toBuffer()], + program.programId + ) + + const [mint] = anchor.web3.PublicKey.findProgramAddressSync( + [Buffer.from("mint")], + program.programId + ) +... +} +``` + +You can run `npm install @solana/spl-token --save-dev` if you don't have it +installed. + +With that done, add a test for the `initializeTokenMint` instruction: + +```typescript +it("Initializes the reward token", async () => { + const tx = await program.methods.initializeTokenMint().rpc(); +}); +``` + +Notice that we didn't have to add `.accounts` because they can be inferred, +including the `mint` account (assuming you have seed inference enabled). + +Next, update the test for the `addMovieReview` instruction. The primary +additions are: + +1. To get the associated token address that needs to be passed into the + instruction as an account that cannot be inferred +2. Check at the end of the test that the associated token account has 10 tokens + +```typescript +it("Movie review is added`", async () => { + const tokenAccount = await getAssociatedTokenAddress( + mint, + provider.wallet.publicKey, + ); + + const tx = await program.methods + .addMovieReview(movie.title, movie.description, movie.rating) + .accounts({ + tokenAccount: tokenAccount, + }) + .rpc(); + + const account = await program.account.movieAccountState.fetch(movie_pda); + expect(account.title).to.equal(movie.title); + expect(account.rating).to.equal(movie.rating); + expect(account.description).to.equal(movie.description); + expect(account.reviewer.toBase58()).to.equal( + provider.wallet.publicKey.toBase58(), + ); + + const userAta = await getAccount(provider.connection, tokenAccount); + expect(Number(userAta.amount)).to.equal(10 * Math.pow(10, 6)); +}); +``` + +After that, neither the test for `updateMovieReview` nor the test for +`deleteMovieReview` need any changes. + +At this point, run `anchor test` and you should see the following output + +```bash +anchor-movie-review-program + ✔ Initializes the reward token (458ms) + ✔ Movie review is added (410ms) + ✔ Movie review is updated (402ms) + ✔ Deletes a movie review (405ms) + + 5 passing (2s) +``` + + + + + +If you need more time with the concepts from this lesson or got stuck along the +way, feel free to take a look at the +[solution code](https://github.com/Unboxed-Software/anchor-movie-review-program/tree/solution-add-tokens). +Note that the solution to this lab is on the `solution-add-tokens` branch. + +## Challenge + +To apply what you've learned about CPIs in this lesson, think about how you +could incorporate them into the Student Intro program. You could do something +similar to what we did in the lab here and add some functionality to mint tokens +to users when they introduce themselves. + +Try to do this independently if you can! But if you get stuck, feel free to +reference +this [solution code](https://github.com/Unboxed-Software/anchor-student-intro-program/tree/cpi-challenge). +Note that your code may look slightly different than the solution code depending +on your implementation. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=21375c76-b6f1-4fb6-8cc1-9ef151bc5b0a)! + + diff --git a/content/courses/onchain-development/anchor-pdas.mdx b/content/courses/onchain-development/anchor-pdas.mdx new file mode 100644 index 000000000..2e66fc825 --- /dev/null +++ b/content/courses/onchain-development/anchor-pdas.mdx @@ -0,0 +1,933 @@ +--- +title: Anchor PDAs and Accounts +objectives: + - Use the `seeds` and `bump` constraints to work with PDA accounts in Anchor + - Enable and use the `init_if_needed` constraint + - Use the `realloc` constraint to reallocate space on an existing account + - Use the `close` constraint to close an existing account +description: + "Store arbitrary data on Solana, using PDAs, an inbuilt key-value store." +--- + +## Summary + +- The `seeds` and `bump` constraints are used to initialize and validate PDA + accounts in Anchor +- The `init_if_needed` constraint is used to conditionally initialize a new + account +- The `realloc` constraint is used to reallocate space on an existing account +- The `close` constraint is used to close an account and refund its rent + +## Lesson + +In this lesson you'll learn how to work with PDAs, reallocate accounts, and +close accounts in Anchor. + +Recall that Anchor programs separate instruction logic from account validation. +Account validation happens in structs that list the accounts needed for an +instruction. Each field in the struct represents an account, and you can +customize the validation using the `#[account(...)]` attribute macro. + +In addition to validating accounts, some constraints can automate tasks that +would otherwise require repetitive code in our instructions. This lesson will +cover the `seeds`, `bump`, `realloc`, and `close` constraints to help you easily +handle PDAs, reallocate space, and close accounts. + +### PDAs with Anchor + +PDAs store data, at addresses specified by the onchain programmer, using a list +of seeds, a bump seed, and a program ID. + +Anchor provides a convenient way to validate a PDA with the `seeds` and `bump` +constraints. + +```rust +#[derive(Accounts)] +struct ExampleAccounts { + #[account( + seeds = [b"example_seed"], + bump + )] + pub pda_account: Account<'info, AccountType>, +} +``` + +During account validation, Anchor will use the specified seeds to derive a PDA +and check if the provided account matches the derived PDA. + +When the `bump` constraint is included without specifying a specific bump, +Anchor will use the canonical bump (the first bump that results in a valid PDA, +with a value of 255). Typically, you should use the canonical bump. + +You can also use other fields from within the struct as seeds, such as the +signer's public key. + +You can also reference the deserialized instruction data if you add the +`#[instruction(...)]` attribute macro to the struct. + +For example, the following example shows a list of accounts that include: + +- `pda_account` +- `user` + +The `pda_account` is constrained such that the seeds must be the string +"example_seed," the public key of `user`, and the string passed into the +instruction as `instruction_data`. + +```rust +#[derive(Accounts)] +#[instruction(instruction_data: String)] +pub struct Example<'info> { + #[account( + seeds = [ + b"example_seed", + user.key().as_ref(), + instruction_data.as_ref() + ], + bump + )] + pub pda_account: Account<'info, AccountType>, + #[account(mut)] + pub user: Signer<'info> +} +``` + +If the `pda_account` address provided by the client doesn't match the PDA +derived using the specified seeds and the canonical bump, then the account +validation will fail. + +#### Use PDAs with the `init` constraint + +You can combine the `seeds` and `bump` constraints with the `init` constraint to +initialize an account using a PDA. + +Recall that the `init` constraint must be used with the `payer` and `space` +constraints to specify who pays for the account initialization and how much +space to allocate. + +Additionally, you need to include `system_program` to handle the creation and +funding of the new account. + +```rust +#[derive(Accounts)] +pub struct InitializePda<'info> { + #[account( + init, + seeds = [b"example_seed", user.key().as_ref()], + bump, + payer = user, + space = DISCRIMINATOR + Accountype::INIT_SPACE + )] + pub pda_account: Account<'info, AccountType>, + #[account(mut)] + pub user: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +#[derive(InitSpace)] +pub struct AccountType { + pub data: u64, +} + +const DISCRIMINATOR: usize = 8; +``` + +When using `init` for non-PDA accounts, Anchor defaults to setting the owner of +the initialized account to be the program currently executing the instruction. + +However, when using `init` in combination with `seeds` and `bump`, the owner +_must_ be the executing program. This is because initializing an account for the +PDA requires a signature that only the executing program can provide. In other +words, the signature verification for the initialization of the PDA account +would fail if the program ID used to derive the PDA did not match the program ID +of the executing program. + +#### Seed inference + +The account list for an instruction can get really long for some programs. To +simplify the client-side experience when invoking an Anchor program instruction, +we can turn on **seed inference**. + +Seed inference adds information about PDA seeds to the IDL so that Anchor can +infer PDA seeds from existing call-site information. In the previous example, +the seeds are `b"example_seed"` and `user.key()`. The first is static and +therefore known, and the second is known because `user` is the transaction +signer. + +If you use seed inference when building your program, then as long as you're +calling the program using Anchor, you don't need to explicitly derive and pass +in the PDA. Instead, the Anchor library will do it for you. + +You can turn on seed inference in the `Anchor.toml` file with `seeds = true` +under `[features]`. + +``` +[features] +seeds = true +``` + +#### Use the `#[instruction(...)]` attribute macro + +Let's briefly look at the `#[instruction(...)]` attribute macro before moving +on. When using `#[instruction(...)]`, the instruction data you provide in the +list of arguments must match and be in the same order as the instruction +arguments. You can omit unused arguments at the end of the list, but you must +include all arguments up until the last one you will be using. + +For example, imagine an instruction has arguments `input_one`, `input_two`, and +`input_three`. If your account constraints need to reference `input_one` and +`input_three`, you need to list all three arguments in the `#[instruction(...)]` +attribute macro. + +However, if your constraints only reference `input_one` and `input_two`, you can +omit `input_three`. + +```rust +pub fn example_instruction( + ctx: Context, + input_one: String, + input_two: String, + input_three: String, +) -> Result<()> { + ... + Ok(()) +} + +#[derive(Accounts)] +#[instruction(input_one:String, input_two:String)] +pub struct Example<'info> { + ... +} +``` + +Additionally, you will get an error if you list the inputs in the incorrect +order: + +```rust +#[derive(Accounts)] +#[instruction(input_two:String, input_one:String)] +pub struct Example<'info> { + ... +} +``` + +### Init-if-needed + +Anchor provides an `init_if_needed` constraint that can be used to initialize an +account if the account has not already been initialized. + +This feature is gated behind a feature flag to make sure you are intentional +about using it. For security reasons, it's smart to avoid having one instruction +branch into multiple logic paths. And as the name suggests, `init_if_needed` +executes one of two possible code paths depending on the state of the account in +question. + +When using `init_if_needed`, you need to make sure to properly protect your +program against re-initialization attacks. You need to include checks in your +code that check that the initialized account cannot be reset to its initial +settings after the first time it was initialized. + +To use `init_if_needed`, you must first enable the feature in `Cargo.toml`. + +```rust +[dependencies] +anchor-lang = { version = "0.30.1", features = ["init-if-needed"] } +``` + +Once you've enabled the feature, you can include the constraint in the +`#[account(...)]` attribute macro. The example below demonstrates using the +`init_if_needed` constraint to initialize a new associated token account if one +does not already exist. + +```rust +#[program] +mod example { + use super::*; + pub fn initialize(ctx: Context) -> Result<()> { + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account( + init_if_needed, + payer = payer, + associated_token::mint = mint, + associated_token::authority = payer + )] + pub token_account: Account<'info, TokenAccount>, + pub mint: Account<'info, Mint>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, + pub token_program: Program<'info, Token>, + pub associated_token_program: Program<'info, AssociatedToken>, + pub rent: Sysvar<'info, Rent>, +} +``` + +When the `initialize` instruction is invoked in the previous example, Anchor +will check if the `token_account` exists and initialize it if it does not. If it +already exists, then the instruction will continue without initializing the +account. Just as with the `init` constraint, you can use `init_if_needed` in +conjunction with `seeds` and `bump` if the account is a PDA. + +### Realloc + +The `realloc` constraint provides a simple way to reallocate space for existing +accounts. + +The `realloc` constraint must be used in combination with the following +constraints: + +- `mut` - the account must be set as mutable +- `realloc::payer` - the account to subtract or add lamports to depending on + whether the reallocation is decreasing or increasing account space +- `realloc::zero` - boolean to specify if new memory should be zero initialized + +As with `init`, you must include `system_program` as one of the accounts in the +account validation struct when using `realloc`. + +Below is an example of reallocating space for an account that stores a `data` +field of type `String`. + +```rust +#[derive(Accounts)] +#[instruction(instruction_data: String)] +pub struct ReallocExample<'info> { + #[account( + mut, + seeds = [b"example_seed", user.key().as_ref()], + bump, + realloc = DISCRIMINATOR + STRING_SIZE_SPACE + instruction_data.len(), + realloc::payer = user, + realloc::zero = false, + )] + pub pda_account: Account<'info, AccountType>, + #[account(mut)] + pub user: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +#[derive(InitSpace)] +pub struct AccountType { + pub data: String, +} + +const DISCRIMINATOR: usize = 8; +const STRING_SIZE_SPACE: usize = 4; +``` + +The `realloc` constraint from the above example can be broken down as follows: + +- the `DISCRIMINATOR` is `8` +- the `STRING_SIZE_SPACE` is `4` for the space required to store the length of + the string. As required by BORSH serialization +- `instruction_data.len()` is the length of the string itself + +> [BORSH](https://solanacookbook.com/guides/serialization.html) stands for +> _Binary Object Representation Serializer for Hashing_ and is used to +> efficiently and compactly serialize and deserialize data structures. + +If the change in account data length is additive, lamports will be transferred +from the `realloc::payer` to the account to maintain rent exemption. Likewise, +if the change is subtractive, lamports will be transferred from the account back +to the `realloc::payer`. + +The `realloc::zero` constraint ensures that any new memory allocated during +reallocation is set to zero. This should be set to true if you expect the memory +of an account to change size frequently. This way, you clear out any old data +that might otherwise remain. + +### Close + +The `close` constraint provides a simple and secure way to close an existing +account. + +The `close` constraint marks the account as closed at the end of the +instruction's execution by setting its discriminator to a _special value_ called +`CLOSED_ACCOUNT_DISCRIMINATOR` and sends its lamports to a specified account. +This _special value_ prevents the account from being reopened because any +attempt to reinitialize the account will fail the discriminator check. + +The example below uses the `close` constraint to close the `data_account` and +sends the lamports allocated for rent to the `receiver` account. + +```rust +pub fn close(ctx: Context) -> Result<()> { + Ok(()) +} + +#[derive(Accounts)] +pub struct Close<'info> { + #[account(mut, close = receiver)] + pub data_account: Account<'info, AccountType>, + #[account(mut)] + pub receiver: Signer<'info> +} +``` + +## Lab + +Let's practice the concepts we've gone over in this lesson by creating a Movie +Review program using the Anchor framework. + +This program will allow users to: + +- Use a PDA to initialize a new movie review account to store the review +- Update the content of an existing movie review account +- Close an existing movie review account + + + + + +### Create a new Anchor project + +To begin, let's create a new project using `anchor init`. + +```bash +anchor init anchor-movie-review-program +``` + +Next, navigate to the `lib.rs` file within the `programs` folder and you should +see the following starter code. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod anchor_movie_review_program { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize {} +``` + +Go ahead and remove the `initialize` instruction and `Initialize` type. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod anchor_movie_review_program { + use super::*; + +} +``` + + + + +### MovieAccountState + +First, let's use the `#[account]` attribute macro to define the +`MovieAccountState` that will represent the data structure of the movie review +accounts. As a reminder, the `#[account]` attribute macro implements various +traits that help with serialization and deserialization of the account, set the +discriminator for the account, and set the owner of a new account as the program +ID defined in the `declare_id!` macro. + +Within each movie review account, we'll store the: + +- `reviewer` - user creating the review +- `rating` - rating for the movie +- `title` - title of the movie +- `description` - content of the review + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod anchor_movie_review_program { + use super::*; + +} + +#[account] +#[derive(InitSpace)] +pub struct MovieAccountState { + pub reviewer: Pubkey, // 32 + pub rating: u8, // 1 + #[max_len(20)] + pub title: String, // 4 + len() + #[max_len(50)] + pub description: String, // 4 + len() +} + +const DISCRIMINATOR: usize = 8; +``` + +Using the `#[derive(InitSpace)]` macro on the `AccountStruct` automatically +calculates the `INIT_SPACE` constant which represents the space required for the +account fields, including fixed-size fields and the length-prefixed strings. + +In cases of dynamic fields like strings, we can use the `#[max_len]` macro to +specify the maximum length of these fields to determining the space needed for +the account during initialization. Here, we have chosen the `title` string to be +of length 20 (max), and the `description` string to be of length 50 (max). + + + + +### Custom error codes + +During our implementation, we will be doing some checks and throwing some custom +errors in case those checks are not successful. + +For, that let's go ahead and create an enum that will contain the different type +of errors as well as the error messages associated: + +```rust +#[error_code] +enum MovieReviewError { + #[msg("Rating must be between 1 and 5")] + InvalidRating, + #[msg("Movie Title too long")] + TitleTooLong, + #[msg("Movie Description too long")] + DescriptionTooLong, +} +``` + +The `#[error_code]` macro will generate error types to be used as return types +from our instruction handlers. + +Don't worry too much about custom errors for now, as they will be covered with +more detail in the next chapter. + + + + +### Add Movie Review + +Next, let's implement the `add_movie_review` instruction. The `add_movie_review` +instruction will require a `Context` of type `AddMovieReview` that we'll +implement shortly. + +The instruction will require three additional arguments as instruction data +provided by a reviewer: + +- `title` - title of the movie as a `String` +- `description` - details of the review as a `String` +- `rating` - rating for the movie as a `u8` + +Within the instruction logic, we'll populate the data of the new `movie_review` +account with the instruction data. We'll also set the `reviewer` field as the +`initializer` account from the instruction context. + +We will also perform some checks, using the `require!` macro, to make sure that: + +- The rating is between 1 and 5 +- The title is no longer than 20 characters +- The description is no longer than 50 characters + +The `require!` macro will perform a check and throw a custom error in case that +check is not successful. + +```rust +const MIN_RATING: u8 = 1; +const MAX_RATING: u8 = 5; +const MAX_TITLE_LENGTH: usize = 20; +const MAX_DESCRIPTION_LENGTH: usize = 50; + +#[program] +pub mod anchor_movie_review_program{ + use super::*; + + pub fn add_movie_review( + ctx: Context, + title: String, + description: String, + rating: u8, + ) -> Result<()> { + // We require that the rating is between 1 and 5 + require!(rating >= MIN_RATING && rating <= MAX_RATING, MovieReviewError::InvalidRating); + + // We require that the title is not longer than 20 characters + require!(title.len() <= MAX_TITLE_LENGTH, MovieReviewError::TitleTooLong); + + // We require that the description is not longer than 50 characters + require!(description.len() <= MAX_DESCRIPTION_LENGTH, MovieReviewError::DescriptionTooLong); + + msg!("Movie Review Account Created"); + msg!("Title: {}", title); + msg!("Description: {}", description); + msg!("Rating: {}", rating); + + let movie_review = &mut ctx.accounts.movie_review; + movie_review.reviewer = ctx.accounts.initializer.key(); + movie_review.title = title; + movie_review.rating = rating; + movie_review.description = description; + Ok(()) + } +} +``` + +Next, let's create the `AddMovieReview` struct that we used as the generic in +the instruction's context. This struct will list the accounts the +`add_movie_review` instruction requires. + +Remember, you'll need the following macros: + +- The `#[derive(Accounts)]` macro is used to deserialize and validate the list + of accounts specified within the struct +- The `#[instruction(...)]` attribute macro is used to access the instruction + data passed into the instruction +- The `#[account(...)]` attribute macro then specifies additional constraints on + the accounts + +The `movie_review` account is a PDA that needs to be initialized, so we'll add +the `seeds` and `bump` constraints as well as the `init` constraint with its +required `payer` and `space` constraints. + +For the PDA seeds, we'll use the movie title and the reviewer's public key. The +payer for the initialization should be the reviewer, and the space allocated on +the account should be enough for the account discriminator, the reviewer's +public key, and the movie review's rating, title, and description. + +```rust +#[derive(Accounts)] +#[instruction(title:String)] +pub struct AddMovieReview<'info> { + #[account( + init, + seeds = [title.as_bytes(), initializer.key().as_ref()], + bump, + payer = initializer, + space = DISCRIMINATOR + MovieAccountState::INIT_SPACE + )] + pub movie_review: Account<'info, MovieAccountState>, + #[account(mut)] + pub initializer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + + + + +### Update Movie Review + +Next, let's implement the `update_movie_review` instruction with a context whose +generic type is `UpdateMovieReview`. + +Just as before, the instruction will require three additional arguments as +instruction data provided by a reviewer: + +- `title` - title of the movie +- `description` - details of the review +- `rating` - rating for the movie + +Within the instruction logic we'll update the `rating` and `description` stored +on the `movie_review` account. + +While the `title` doesn't get used in the instruction function itself, we'll +need it for account validation of `movie_review` in the next step. + +```rust +#[program] +pub mod anchor_movie_review_program { + use super::*; + + ... + + pub fn update_movie_review( + ctx: Context, + title: String, + description: String, + rating: u8, + ) -> Result<()> { + + // We require that the rating is between 1 and 5 + require!(rating >= MIN_RATING && rating <= MAX_RATING, MovieReviewError::InvalidRating); + + // We require that the title is not longer than 20 characters + require!(title.len() <= MAX_TITLE_LENGTH, MovieReviewError::TitleTooLong); + + // We require that the description is not longer than 50 characters + require!(description.len() <= MAX_DESCRIPTION_LENGTH, MovieReviewError::DescriptionTooLong); + + msg!("Movie review account space reallocated"); + msg!("Title: {}", title); + msg!("Description: {}", description); + msg!("Rating: {}", rating); + + let movie_review = &mut ctx.accounts.movie_review; + movie_review.rating = rating; + movie_review.description = description; + + Ok(()) + } + +} +``` + +Next, let's create the `UpdateMovieReview` struct to define the accounts that +the `update_movie_review` instruction needs. + +Since the `movie_review` account will have already been initialized by this +point, we no longer need the `init` constraint. However, since the value of +`description` may now be different, we need to use the `realloc` constraint to +reallocate the space on the account. Accompanying this, we need the `mut`, +`realloc::payer`, and `realloc::zero` constraints. + +We'll also still need the `seeds` and `bump` constraints as we had them in +`AddMovieReview`. + +```rust +#[derive(Accounts)] +#[instruction(title:String)] +pub struct UpdateMovieReview<'info> { + #[account( + mut, + seeds = [title.as_bytes(), initializer.key().as_ref()], + bump, + realloc = DISCRIMINATOR + MovieAccountState::INIT_SPACE, + realloc::payer = initializer, + realloc::zero = true, + )] + pub movie_review: Account<'info, MovieAccountState>, + #[account(mut)] + pub initializer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +Note that the `realloc` constraint is set to the new space required by the +`movie_review` account based on the updated value of `description`. + +Additionally, the `realloc::payer` constraint specifies that any additional +lamports required or refunded will come from or be send to the `initializer` +account. + +Finally, we set the `realloc::zero` constraint to `true` because the +`movie_review` account may be updated multiple times either shrinking or +expanding the space allocated to the account. + + + + +### Delete Movie Review + +Lastly, let's implement the `delete_movie_review` instruction to close an +existing `movie_review` account. + +We'll use a context whose generic type is `DeleteMovieReview` and won't include +any additional instruction data. Since we are only closing an account, we +actually don't need any instruction logic inside the body of the function. The +closing itself will be handled by the Anchor constraint in the +`DeleteMovieReview` type. + +```rust +#[program] +pub mod anchor_movie_review_program { + use super::*; + + ... + + pub fn delete_movie_review(_ctx: Context, title: String) -> Result<()> { + msg!("Movie review for {} deleted", title); + Ok(()) + } + +} +``` + +Next, let's implement the `DeleteMovieReview` struct. + +```rust +#[derive(Accounts)] +#[instruction(title: String)] +pub struct DeleteMovieReview<'info> { + #[account( + mut, + seeds=[title.as_bytes(), initializer.key().as_ref()], + bump, + close=initializer + )] + pub movie_review: Account<'info, MovieAccountState>, + #[account(mut)] + pub initializer: Signer<'info>, + pub system_program: Program<'info, System> +} +``` + +Here we use the `close` constraint to specify we are closing the `movie_review` +account and that the rent should be refunded to the `initializer` account. We +also include the `seeds` and `bump` constraints for the `movie_review` account +for validation. Anchor then handles the additional logic required to securely +close the account. + + + + +### Testing + +The program should be good to go! Now let's test it out. Navigate to +`anchor-movie-review-program.ts` and replace the default test code with the +following. + +Here we: + +- Create default values for the movie review instruction data +- Derive the movie review account PDA +- Create placeholders for tests + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { expect } from "chai"; +import { AnchorMovieReviewProgram } from "../target/types/anchor_movie_review_program"; + +describe("anchor-movie-review-program", () => { + // Configure the client to use the local cluster. + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + + const program = anchor.workspace + .AnchorMovieReviewProgram as Program; + + const movie = { + title: "Just a test movie", + description: "Wow what a good movie it was real great", + rating: 5, + }; + + const [moviePda] = anchor.web3.PublicKey.findProgramAddressSync( + [Buffer.from(movie.title), provider.wallet.publicKey.toBuffer()], + program.programId, + ); + + it("Movie review is added`", async () => {}); + + it("Movie review is updated`", async () => {}); + + it("Deletes a movie review", async () => {}); +}); +``` + +Next, let's create the first test for the `addMovieReview` instruction. Note +that we don't explicitly add `.accounts`. This is because the `Wallet` from +`AnchorProvider` is automatically included as a signer, Anchor can infer certain +accounts like `SystemProgram`, and Anchor can also infer the `movieReview` PDA +from the `title` instruction argument and the signer's public key. + + + +Don't forget to turn on seed inference with `seeds = true` +in the `Anchor.toml` file. + + + +Once the instruction runs, we then fetch the `movieReview` account and check +that the data stored on the account match the expected values. + +```typescript +it("Movie review is added`", async () => { + // Add your test here. + const tx = await program.methods + .addMovieReview(movie.title, movie.description, movie.rating) + .rpc(); + + const account = await program.account.movieAccountState.fetch(moviePda); + expect(movie.title === account.title); + expect(movie.rating === account.rating); + expect(movie.description === account.description); + expect(account.reviewer === provider.wallet.publicKey); +}); +``` + +Next, let's create the test for the `updateMovieReview` instruction following +the same process as before. + +```typescript +it("Movie review is updated`", async () => { + const newDescription = "Wow this is new"; + const newRating = 4; + + const tx = await program.methods + .updateMovieReview(movie.title, newDescription, newRating) + .rpc(); + + const account = await program.account.movieAccountState.fetch(moviePda); + expect(movie.title === account.title); + expect(newRating === account.rating); + expect(newDescription === account.description); + expect(account.reviewer === provider.wallet.publicKey); +}); +``` + +Next, create the test for the `deleteMovieReview` instruction + +```typescript +it("Deletes a movie review", async () => { + const tx = await program.methods.deleteMovieReview(movie.title).rpc(); +}); +``` + +Lastly, run `anchor test` and you should see the following output in the +console. + +```bash + anchor-movie-review-program + ✔ Movie review is added` (139ms) + ✔ Movie review is updated` (404ms) + ✔ Deletes a movie review (403ms) + + + 3 passing (950ms) +``` + +If you need more time with this project to feel comfortable with these concepts, +feel free to have a look at +the [solution code](https://github.com/Unboxed-Software/anchor-movie-review-program/tree/solution-pdas) before +continuing. + + + + + +## Challenge + +Now it's your turn to build something independently. Equipped with the concepts +introduced in this lesson, try to recreate the Student Intro program that we've +used before using the Anchor framework. + +The Student Intro program is a Solana Program that lets students introduce +themselves. The program takes a user's name and a short message as the +instruction data and creates an account to store the data onchain. + +Using what you've learned in this lesson, build out this program. The program +should include instructions to: + +1. Initialize a PDA account for each student that stores the student's name and + their short message +2. Update the message on an existing account +3. Close an existing account + +Try to do this independently if you can! But if you get stuck, feel free to +reference +the [solution code](https://github.com/Unboxed-Software/anchor-student-intro-program). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=f58108e9-94a0-45b2-b0d5-44ada1909105)! + + diff --git a/content/courses/onchain-development/index.mdx b/content/courses/onchain-development/index.mdx new file mode 100644 index 000000000..5d72567e2 --- /dev/null +++ b/content/courses/onchain-development/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Onchain program development +description: Build onchain programs (sometimes called 'smart contracts') with Anchor. +--- diff --git a/content/courses/onchain-development/intro-to-anchor-frontend.mdx b/content/courses/onchain-development/intro-to-anchor-frontend.mdx new file mode 100644 index 000000000..98e23602d --- /dev/null +++ b/content/courses/onchain-development/intro-to-anchor-frontend.mdx @@ -0,0 +1,757 @@ +--- +title: Intro to client-side Anchor development +objectives: + - Use an IDL to interact with a Solana program from the client + - Explain an Anchor `Provider` object + - Explain an Anchor `Program` object + - Use the Anchor `MethodsBuilder` to build instructions and transactions + - Use Anchor to fetch accounts + - Set up a frontend to invoke instructions using Anchor and an IDL +description: + "Use Anchor's automatic JS/TS clients to send instructions to your program." +--- + +## Summary + +- An **IDL** is a file representing the structure of a Solana program. Programs + written and built using Anchor automatically generate a corresponding IDL. IDL + stands for Interface Description Language. +- `@coral-xyz/anchor` is a Typescript client that includes everything you'll + need to interact with Anchor programs +- An **Anchor `Provider`** object combines a `connection` to a cluster and a + specified `wallet` to enable transaction signing +- An **Anchor `Program`** object provides a custom API to interact with a + specific program. You create a `Program` instance using a program's IDL and + `Provider`. +- The **Anchor `MethodsBuilder`** provides a simple interface through `Program` + for building instructions and transactions + +## Lesson + +Anchor simplifies the process of interacting with Solana programs from the +client by providing an Interface Description Language (IDL) file that reflects +the structure of a program. Using the IDL in conjunction with Anchor's +Typescript library (`@coral-xyz/anchor`) provides a simplified format for +building instructions and transactions. + +```typescript +// sends transaction +await program.methods + .instructionName(instructionDataInputs) + .accounts({}) + .signers([]) + .rpc(); +``` + +This works from any Typescript client, whether it's a frontend or integration +tests. In this lesson we'll go over how to use `@coral-xyz/anchor` to simplify +your client-side program interaction. + +### Anchor client-side structure + +Let's start by going over the basic structure of Anchor's Typescript library. +The primary object you'll be using is the `Program` object. A `Program` instance +represents a specific Solana program and provides a custom API for reading and +writing to the program. + +To create an instance of `Program`, you'll need the following: + +- `IDL` - file representing the structure of a program +- `Connection` - the cluster connection +- `Wallet` - default keypair used to pay for and sign transactions +- `Provider` - encapsulates the `Connection` to a Solana cluster and a `Wallet` + +![Anchor structure](/assets/courses/unboxed/anchor-client-structure.png) + +The above image shows how each of these pieces are combined to create a +`Program` instance. We'll go over each of them individually to get a better idea +of how everything ties together. + +#### Interface Description Language (IDL) + +When you build an Anchor program, Anchor generates both a JSON and Typescript +file representing your program's IDL. The IDL represents the structure of the +program and can be used by a client to infer how to interact with a specific +program. + +While it isn't automatic, you can also generate an IDL from a native Solana +program using tools like [shank](https://github.com/metaplex-foundation/shank) +by Metaplex. + +To get an idea of the information an IDL provides, here is the IDL for the +counter program you built previously: + +```json +{ + "address": "9sMy4hnC9MML6mioESFZmzpntt3focqwUq1ymPgbMf64", + "metadata": { + "name": "anchor_counter", + "version": "0.1.0", + "spec": "0.1.0", + "description": "Created with Anchor" + }, + "instructions": [ + { + "name": "increment", + "discriminator": [11, 18, 104, 9, 104, 174, 59, 33], + "accounts": [ + { + "name": "counter", + "writable": true + }, + { + "name": "user", + "signer": true + } + ], + "args": [] + }, + { + "name": "initialize", + "discriminator": [175, 175, 109, 31, 13, 152, 155, 237], + "accounts": [ + { + "name": "counter", + "writable": true, + "signer": true + }, + { + "name": "user", + "writable": true, + "signer": true + }, + { + "name": "system_program", + "address": "11111111111111111111111111111111" + } + ], + "args": [] + } + ], + "accounts": [ + { + "name": "Counter", + "discriminator": [255, 176, 4, 245, 188, 253, 124, 25] + } + ], + "types": [ + { + "name": "Counter", + "type": { + "kind": "struct", + "fields": [ + { + "name": "count", + "type": "u64" + } + ] + } + } + ] +} +``` + +Inspecting the IDL, you can see the `programId` and the `metadata` object which +have been added in anchor 0.30.0 + +This program contains two instruction handlers, `initialize` and `increment`. + +Notice that in addition to specifying the instruction handlers, it specifies the +accounts and inputs for each instruction. The `initialize` instruction requires +three accounts: + +1. `counter` - the new account being initialized in the instruction +2. `user` - the payer for the transaction and initialization +3. `systemProgram` - the system program is invoked to initialize a new account + +And the `increment` instruction requires two accounts: + +1. `counter` - an existing account to increment the count field +2. `user` - the payer from the transaction + +Looking at the IDL, you can see that in both instructions the `user` is required +as a signer because the `isSigner` flag is marked as `true`. Additionally, +neither instructions require any additional instruction data since the `args` +section is blank for both. + +Looking further down at the `accounts` section, you can see that the program +contains one account type named `Counter` with a the `discriminator` field, +which is used to distinguish between various type of accounts present. + +Last, we have the `types` section, which contains types of account in the +`accounts` section , in this case, for account type `Counter` it contains a +single field named `count` of type `u64` + +Although the IDL does not provide the implementation details for each +instruction, we can get a basic idea of how the onchain program expects +instructions to be constructed and see the structure of the program accounts. + +Regardless of how you get it, you _need_ an IDL file to interact with a program +using the `@coral-xyz/anchor` package. To use the IDL, you'll need to include +the IDL file along with the types in your project and then import the file. + +```typescript +import idl from "./idl.json"; +``` + +You would _ideally_ also require types for the IDL which would make it easier to +interact with the program. The types can be found at `/target/types` folder +after you have built your program. Here are the types for the above IDL which +when you notice has the exact same structure as the IDL but are just as type +helper. + +```typescript +/** + * Program IDL in camelCase format in order to be used in JS/TS. + * + * Note that this is only a type helper and is not the actual IDL. The original + * IDL can be found at `target/idl/anchor_counter.json`. + */ +export type AnchorCounter = { + address: "9sMy4hnC9MML6mioESFZmzpntt3focqwUq1ymPgbMf64"; + metadata: { + name: "anchorCounter"; + version: "0.1.0"; + spec: "0.1.0"; + description: "Created with Anchor"; + }; + instructions: [ + { + name: "increment"; + discriminator: [11, 18, 104, 9, 104, 174, 59, 33]; + accounts: [ + { + name: "counter"; + writable: true; + }, + { + name: "user"; + signer: true; + }, + ]; + args: []; + }, + { + name: "initialize"; + discriminator: [175, 175, 109, 31, 13, 152, 155, 237]; + accounts: [ + { + name: "counter"; + writable: true; + signer: true; + }, + { + name: "user"; + writable: true; + signer: true; + }, + { + name: "systemProgram"; + address: "11111111111111111111111111111111"; + }, + ]; + args: []; + }, + ]; + accounts: [ + { + name: "counter"; + discriminator: [255, 176, 4, 245, 188, 253, 124, 25]; + }, + ]; + types: [ + { + name: "counter"; + type: { + kind: "struct"; + fields: [ + { + name: "count"; + type: "u64"; + }, + ]; + }; + }, + ]; +}; +``` + +#### Provider + +Before you can create a `Program` object using the IDL, you first need to create +an Anchor `Provider` object. + +The `Provider` object combines two things: + +- `Connection` - the connection to a Solana cluster (i.e. localhost, devnet, + mainnet) +- `Wallet` - a specified address used to pay for and sign transactions + +The `Provider` is then able to send transactions to the Solana blockchain on +behalf of a `Wallet` by including the wallet's signature to outgoing +transactions. When using a frontend with a Solana wallet provider, all outgoing +transactions must still be approved by the user via their wallet browser +extension. + +Setting up the `Wallet` and `Connection` would look something like this: + +```typescript +import { useAnchorWallet, useConnection } from "@solana/wallet-adapter-react"; + +const { connection } = useConnection(); +const wallet = useAnchorWallet(); +``` + +To set up the connection, you can use the `useConnection` hook from +`@solana/wallet-adapter-react` to get the `Connection` to a Solana cluster. + +Note that the `Wallet` object provided by the `useWallet` hook from +`@solana/wallet-adapter-react` is not compatible with the `Wallet` object that +the Anchor `Provider` expects. However, `@solana/wallet-adapter-react` also +provides a `useAnchorWallet` hook. + +For comparison, here is the `AnchorWallet` from `useAnchorWallet`: + +```typescript +export interface AnchorWallet { + publicKey: PublicKey; + signTransaction(transaction: Transaction): Promise; + signAllTransactions(transactions: Transaction[]): Promise; +} +``` + +And the `WalletContextState` from `useWallet`: + +```typescript +export interface WalletContextState { + autoConnect: boolean; + wallets: Wallet[]; + wallet: Wallet | null; + publicKey: PublicKey | null; + connecting: boolean; + connected: boolean; + disconnecting: boolean; + select(walletName: WalletName): void; + connect(): Promise; + disconnect(): Promise; + sendTransaction( + transaction: Transaction, + connection: Connection, + options?: SendTransactionOptions, + ): Promise; + signTransaction: SignerWalletAdapterProps["signTransaction"] | undefined; + signAllTransactions: + | SignerWalletAdapterProps["signAllTransactions"] + | undefined; + signMessage: MessageSignerWalletAdapterProps["signMessage"] | undefined; +} +``` + +The `WalletContextState` provides much more functionality compared to the +`AnchorWallet`, but the `AnchorWallet` is required to set up the `Provider` +object. + +To create the `Provider` object you use `AnchorProvider` from +`@coral-xyz/anchor`. + +The `AnchorProvider` constructor takes three parameters: + +- `connection` - the `Connection` to the Solana cluster +- `wallet` - the `Wallet` object +- `opts` - optional parameter that specifies the confirmation options, using a + default setting if one is not provided + +Once you've created the `Provider` object, you then set it as the default +provider using `setProvider`. + +```typescript +import { useAnchorWallet, useConnection } from "@solana/wallet-adapter-react"; +import { AnchorProvider, setProvider } from "@coral-xyz/anchor"; + +const { connection } = useConnection(); +const wallet = useAnchorWallet(); +const provider = new AnchorProvider(connection, wallet, { + commitment: "confirmed", +}); +setProvider(provider); +``` + +#### Program + +Once you have the IDL and a provider, you can create an instance of `Program`. +The constructor requires three parameters: + +- `idl` - the IDL as type `Idl` +- `Provider` - the provider discussed in the previous section + +The `Program` object creates a custom API you can use to interact with a Solana +program. This API is the one stop shop for all things related to communicating +with onchain programs. Among other things, you can send transactions, fetch +deserialized accounts, decode instruction data, subscribe to account changes, +and listen to events. You can also +[learn more about the `Program` class](https://coral-xyz.github.io/anchor/ts/classes/Program.html#constructor). + +To create the `Program` object, first import `Program` and `Idl` from +`@coral-xyz/anchor`. `Idl` is a type you can use when working with Typescript. + +When creating the `Program` object, the default `Provider` is used if one is not +explicitly specified. + +To enable type support, import the types to your project from `/target/types` +present in your anchor project, and declare the type for the program object. + +All together, the final setup looks something like this: + +```typescript +import idl from "./idl.json"; +import type { CounterProgram } from "@/types"; +import { useAnchorWallet, useConnection } from "@solana/wallet-adapter-react"; +import { Program, Idl, AnchorProvider, setProvider } from "@coral-xyz/anchor"; + +const { connection } = useConnection(); +const wallet = useAnchorWallet(); + +const provider = new AnchorProvider(connection, wallet, {}); +setProvider(provider); + +const program = new Program(idl as CounterProgram); + +// we can also explicitly mention the provider +const program = new Program(idl as CounterProgram, provider); +``` + +### Anchor `MethodsBuilder` + +Once the `Program` object is set up, you can use the Anchor Methods Builder to +build instructions and transactions related to the program. The `MethodsBuilder` +uses the IDL to provide a simplified format for building transactions that +invoke program instructions. + +Note that the camel case naming convention is used when interacting with a +program from the client, compared to the snake case naming convention used when +the writing the program in rust. + +The basic `MethodsBuilder` format looks like this: + +```typescript +// sends transaction +await program.methods + .instructionName(instructionDataInputs) + .accounts({}) + .signers([]) + .rpc(); +``` + +Going step by step, you: + +1. Call `methods` on `program` - this is the builder API for creating + instruction calls related to the program's IDL +2. Call the instruction name as `.instructionName(instructionDataInputs)` - + simply call the instruction using dot syntax and the instruction's name, + passing in any instruction arguments as comma-separated values +3. Call `accounts` - using dot syntax, call `.accounts`, passing in an object + with each account the instruction expects based on the IDL +4. Optionally call `signers` - using dot syntax, call `.signers`, passing in an + array of additional signers required by the instruction +5. Call `rpc` - this method creates and sends a signed transaction with the + specified instruction and returns a `TransactionSignature`. When using + `.rpc`, the `Wallet` from the `Provider` is automatically included as a + signer and does not have to be listed explicitly. + +Note that if no additional signers are required by the instruction other than +the `Wallet` specified with the `Provider`, the `.signer([])` line can be +excluded. + +You can also build the transaction directly by changing `.rpc()` to +`.transaction()`. This builds a `Transaction` object using the instruction +specified. + +```typescript +// creates transaction +const transaction = await program.methods + .instructionName(instructionDataInputs) + .accounts({}) + .transaction(); + +await sendTransaction(transaction, connection); +``` + +Similarly, you can use the same format to build an instruction using +`.instruction()` and then manually add the instructions to a new transaction. +This builds a `TransactionInstruction` object using the instruction specified. + +```typescript +// creates first instruction +const instructionOne = await program.methods + .instructionOneName(instructionOneDataInputs) + .accounts({}) + .instruction(); + +// creates second instruction +const instructionTwo = await program.methods + .instructionTwoName(instructionTwoDataInputs) + .accounts({}) + .instruction(); + +// add both instruction to one transaction +const transaction = new Transaction().add(instructionOne, instructionTwo); + +// send transaction +await sendTransaction(transaction, connection); +``` + +In summary, the Anchor `MethodsBuilder` provides a simplified and more flexible +way to interact with onchain programs. You can build an instruction, a +transaction, or build and send a transaction using basically the same format +without having to manually serialize or deserialize the accounts or instruction +data. + +### Fetch program accounts + +The `Program` object also allows you to easily fetch and filter program +accounts. Simply call `account` on `program` and then specify the name of the +account type as reflected on the IDL. Anchor then deserializes and returns all +accounts as specified. + +The example below shows how you can fetch all existing `counter` accounts for +the Counter program. + +```typescript +const accounts = await program.account.counter.all(); +``` + +You can also apply a filter by using `memcmp` and then specifying an `offset` +and the `bytes` to filter for. + +The example below fetches all `counter` accounts with a `count` of 0. Note that +the `offset` of 8 is for the 8 byte discriminator Anchor uses to identify +account types. The 9th byte is where the `count` field begins. You can refer to +the IDL to see that the next byte stores the `count` field of type `u64`. Anchor +then filters for and returns all accounts with matching bytes in the same +position. + +```typescript +const accounts = await program.account.counter.all([ + { + memcmp: { + offset: 8, + bytes: bs58.encode(new BN(0, "le").toArray()), + }, + }, +]); +``` + +Alternatively, you can also get the deserialized account data for a specific +account using `fetch` if you know the address of the account you're looking for. + +```typescript +const account = await program.account.counter.fetch(ACCOUNT_ADDRESS); +``` + +Similarly, you can fetch multiple accounts using `fetchMultiple`. + +```typescript +const accounts = await program.account.counter.fetchMultiple([ + ACCOUNT_ADDRESS_ONE, + ACCOUNT_ADDRESS_TWO, +]); +``` + +## Lab + +Let's practice this together by building a frontend for the Counter program from +last lesson. As a reminder, the Counter program has two instructions: + +- `initialize` - initializes a new `Counter` account and sets the `count` to `0` +- `increment` - increments the `count` on an existing `Counter` account + +#### 1. Download the starter code + +Download +[the starter code for this project](https://github.com/solana-developers/anchor-ping-frontend/tree/starter). +Once you have the starter code, take a look around. Install the dependencies +with `npm install` and then run the app with `npm run dev`. + +This project is a simple Next.js application, created using +`npx create-next-dapp` + +The `idl.json` file for the Counter program, and the `Initialize` and +`Increment` components we'll be building throughout this lab. + +#### 2. `Initialize` + +To begin, let's complete the setup to create the `useCounterProgram` hook in +`components/counter/counter-data-access.tsx` component. + +Remember, we'll need an instance of `Program` to use the Anchor `MethodsBuilder` +to invoke the instructions on our program. `create-solana-dapp` already creates +a `getCounterProgram` for us, which will return us the `Program` instance. + +```typescript +// This is a helper function to get the Counter Anchor program. +export function getCounterProgram(provider: AnchorProvider) { + return new Program(CounterIDL as AnchorCounter, provider); +} +``` + +Now, in the `useCounterProgram` hook, we'll create a program instance + +```typescript +const provider = useAnchorProvider(); +const program = getCounterProgram(provider); +``` + +- `useAnchorProvider` is an helper function at + `components/solana/solana-provider` which returns the provider. + +Now that we've the program instance, we can actually invoke the program's +`initialize` instruction. We'll do this using `useMutation`. + +Remember, We'll need to generate a new `Keypair` for the new `Counter` account +since we are initializing an account for the first time. + +```typescript +const initialize = useMutation({ + mutationKey: ["counter", "initialize", { cluster }], + + mutationFn: (keypair: Keypair) => + program.methods + .initialize() + .accounts({ counter: keypair.publicKey }) + .signers([keypair]) + .rpc(), + + onSuccess: signature => { + transactionToast(signature); + return accounts.refetch(); + }, + onError: () => toast.error("Failed to initialize account"), +}); +``` + +Just focus on the `mutationFn` which accepts a `keypair` which we'll be passing. +We are using the Anchor `MethodsBuilder` to create and send a new transaction. +Remember, Anchor can infer some of the accounts required, like the `user` and +`systemAccount` accounts. However, it can't infer the `counter` account because +we generate that dynamically, so you'll need to add it with `.accounts`. You'll +also need to add that keypair as a sign with `.signers`. Lastly, you can use +`.rpc()` to submit the transaction to the user's wallet. + +Once the transaction goes through,we are calling `onSuccess` with the signature +and then fetching `accounts`. + +#### 3. `Accounts` + +In the above `initialize` mutation, we are calling `accounts.refetch()`. This is +a to refresh the accounts that we have stored, every time a new account is +initialized. + +```typescript +const accounts = useQuery({ + queryKey: ["counter", "all", { cluster }], + queryFn: () => program.account.counter.all(), +}); +``` + +We now use `account` from `program` instance to get all `counter` accounts +created. This method internally calls, `getProgramAccounts`. + +#### 4. `Increment` + +Next, let's move on the the `useCounterProgramAccount` hook. As we have earlier +already created `program` and `accounts` function in previous hook, we'll call +the hooks to access them and not redefine them. + +Add the following code for the initial set up: + +```typescript +export function useCounterProgramAccount({ account }: { account: PublicKey }) { + ... + + const { program, accounts } = useCounterProgram(); +} + +``` + +Next, let's use the Anchor `MethodsBuilder` to build a new instruction to invoke +the `increment` instruction. Again, Anchor can infer the `user` account from the +wallet so we only need to include the `counter` account. + +```typescript +const incrementMutation = useMutation({ + mutationKey: ["counter", "increment", { cluster, account }], + + mutationFn: () => + program.methods.increment().accounts({ counter: account }).rpc(), + + onSuccess: tx => { + transactionToast(tx); + return accountQuery.refetch(); + }, +}); +``` + +As the counter is getting updated, we'll update the counter count by calling +`accountQuery.refetch()` when the transaction is success. + +```typescript +const accountQuery = useQuery({ + queryKey: ["counter", "fetch", { cluster, account }], + queryFn: () => program.account.counter.fetch(account), +}); +``` + +#### 6. Test the frontend + +At this point, everything should work! You can test the frontend by running +`yarn dev`. + +1. Connect your wallet and head to `Counter Program` tab +2. Click the `Create` button, and then approve the transaction +3. You should then see a link at the bottom right of the screen to Solana + Explorer for the `initialize` transaction. The `Increment` button and the + count appear. +4. Click the `Increment` button, and then approve the transaction +5. Wait a few seconds . The count should increment on the screen. + +![Anchor Frontend Demo](/assets/courses/unboxed/anchor-frontend-demo.gif) + +Feel free to click the links to inspect the program logs from each transaction! + +![Initialize Program Log](/assets/courses/unboxed/anchor-frontend-initialize.png) + +![Increment Program Log](/assets/courses/unboxed/anchor-frontend-increment.png) + +Congratulations, you now know how to set up a frontend to invoke a Solana +program using an Anchor IDL. + +If you need more time with this project to feel comfortable with these concepts, +feel free to have a look at +the [solution code on the `solution-increment` branch](https://github.com/Unboxed-Software/anchor-ping-frontend/tree/solution-increment) before +continuing. + +## Challenge + +Now it's your turn to build something independently. Building on top of what +we've done in the lab, try to create a new component in the frontend that +implements a button to decrements the counter. + +Before building the component in the frontend, you'll first need to: + +1. Build and deploy a new program that implements a `decrement` instruction +2. Update the IDL file in the frontend with the one from your new program +3. Update the `programId` with the one from your new program + +If you need some help, feel free to +[reference this program](https://github.com/solana-developers/anchor-ping-frontend/tree/solution-increment). + +Try to do this independently if you can! But if you get stuck, feel free to +reference +the [solution code](https://github.com/solana-developers/anchor-ping-frontend/tree/solution-decrement). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=774a4023-646d-4394-af6d-19724a6db3db)! + + diff --git a/content/courses/onchain-development/intro-to-anchor.mdx b/content/courses/onchain-development/intro-to-anchor.mdx new file mode 100644 index 000000000..7bb66c14c --- /dev/null +++ b/content/courses/onchain-development/intro-to-anchor.mdx @@ -0,0 +1,826 @@ +--- +title: Intro to Anchor development +objectives: + - Use the Anchor framework to build a basic Solana program + - Describe the basic structure of an Anchor program + - Explain how to implement basic account validation and security checks with + Anchor +description: "Create your first Solana onchain program in Anchor." +--- + +## Summary + +- **Programs** on Solana have **instruction handlers**, which are functions that + take arguments from incoming instructions. They are the entry point for any + operation in a program. +- **Rust** is the most common language for building Solana programs. The + **Anchor** framework takes care of common grunt work - like reading data from + incoming instructions, and checking the right accounts are provided - so you + can focus on building your Solana program. + +## Lesson + +Before we begin, make sure you have Anchor installed. You can follow this lesson +on [local-setup](/developers/courses/onchain-development/local-setup). + +Solana's capacity to execute arbitrary code is a key part of its power. Solana +programs, (sometimes called "smart contracts"), are the very foundation of the +Solana ecosystem. And as developers and creators continuously conceive and +deploy new programs, the collection of Solana programs continues to expand +daily. + +Every popular Solana exchange, borrow-lend app, digital art auction house, perps +platform, and prediction market is a program. + +This lesson will give you a basic introduction to writing and deploying a Solana +program using the Rust programming language and the Anchor framework. + +> This and the further lessons in this course will give a good base to start +> building Solana programs with Anchor, however if you want to get more into +> Anchor, we would recommend checking out the +> [The Anchor Book](https://book.anchor-lang.com/). + +### What is Anchor? + +Anchor makes writing Solana programs easier, faster, and more secure, making it +the "go-to" framework for Solana development. It makes it easier to organize and +reason about your code, implements common security checks automatically, and +removes a significant amount of boilerplate code that is otherwise associated +with writing a Solana program. + +### Anchor program structure + +Anchor uses macros and traits to simplify Rust code for you. These provide a +clear structure to your program so you can focus more on its functionality. + +Some important macros provided by Anchor are: + +> From here on out, you'll see a lot of Rust. We assume that you are familiar +> with Rust, if not, we recommend you to check out +> [The Rust Book](https://doc.rust-lang.org/book/). + +- `declare_id!` - a macro for declaring the program’s onchain address +- `#[program]` - an attribute macro used to denote the module containing the + program’s instruction handlers. +- `Accounts` - a trait applied to structs representing the list of accounts + required for an instruction. +- `#[account]` - an attribute macro used to define custom account types for the + program. + +Let's talk about each of them before putting all the pieces together. + +### Declare your program ID + +The `declare_id` macro sets the onchain address of the Anchor program (i.e. the +`programId`). When you create a new Anchor program, the framework generates a +default keypair. This keypair is used to deploy the program unless specified +otherwise. The public key of this keypair is used as the `programId` in the +`declare_id!` macro. + +```rust +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); +``` + +### Define instruction logic + +The `#[program]` attribute macro defines the module containing all of your +program's instruction handlers. This is where you implement the business logic +for each operation in your program. + +Each public function in the module with the `#[program]` attribute will be +treated as a separate instruction handler. + +Each instruction handler (function) requires a parameter of type `Context` and +can include more parameters as needed. Anchor will automatically handle +instruction data deserialization so that you can work with instruction data as +Rust types. + +```rust +#[program] +mod program_module_name { + use super::*; + + pub fn instruction_one(ctx: Context, instruction_data: u64) -> Result<()> { + ctx.accounts.account_name.data = instruction_data; + Ok(()) + } +} +``` + +- The `#[program]` attribute macro is used to denote the module containing the + program’s instruction logic. +- `use super::*;` is used to bring all the items from the parent module into + scope, which are needed to define the instruction logic. +- Next, there is the instruction handler function. This function just writes + some data (`instruction_data` in this case) to an account. + +### Instruction `Context` + +The `Context` type exposes instruction metadata and accounts to your instruction +logic. + +```rust +pub struct Context<'a, 'b, 'c, 'info, T: Bumps> { + /// Currently executing program id. + pub program_id: &'a Pubkey, + /// Deserialized accounts. + pub accounts: &'b mut T, + /// Remaining accounts given but not deserialized or validated. + /// Be very careful when using this directly. + pub remaining_accounts: &'c [UncheckedAccount<'info>], + /// Bump seeds found during constraint validation. This is provided as a + /// convenience so that handlers don't have to recalculate bump seeds or + /// pass them in as arguments. + /// Type is the bumps struct generated by #[derive(Accounts)] + pub bumps: T::Bumps, +} +``` + +`Context` is a generic type where `T` defines the list of accounts an +instruction handler requires. When you use `Context`, you specify the concrete +type of `T` as a struct that adopts the `Accounts` trait. + +The first argument of every instruction handler must be `Context`. `Context` +takes a generic of your `Accounts` struct, eg, if `AddMovieReview` was the +struct holding the accounts, the context for the `add_movie_review()` function +would be `Context`. + + + Yes, the Accounts struct is typically named the same thing as the instruction handler, just in TitleCase. Eg, the struct with the accounts for add_movie_review() is called AddMovieReview! + + +Through this context argument the instruction can then access: + +- The accounts passed into the instruction (`ctx.accounts`) +- The program ID (`ctx.program_id`) of the executing program +- The remaining accounts (`ctx.remaining_accounts`). The `remaining_accounts` is + a vector that contains all accounts that were passed into the instruction + handler but are not declared in the `Accounts` struct. +- The bumps for any PDA accounts in the `Accounts` struct (`ctx.bumps`) +- The seeds for any PDA accounts in the `Accounts` struct (`ctx.seeds`) + +> The design of Contexts can be different across different programs to serve +> their purpose; and the name of the context could be anything (not limited to +> Context) to better reflect it's usage. This example is to help understand how +> contexts work in Anchor. + +### Define instruction accounts + +The `Accounts` trait: + +- Defines a structure of validated accounts for an instruction handler +- Makes accounts accessible through an instruction handler's `Context` +- Is typically applied using `#[derive(Accounts)]` +- Implements an `Accounts` deserializer on the struct +- Performs constraint checks for secure program execution + +Example: + +- `instruction_one` requires a `Context` +- `InstructionAccounts` struct is implemented with `#[derive(Accounts)]` +- It includes accounts like `account_name`, `user`, and `system_program` +- Constraints are specified using the `#account(..)` attribute + +```rust +#[program] +mod program_module_name { + use super::*; + pub fn instruction_one(ctx: Context, instruction_data: u64) -> Result<()> { + ... + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + init, + payer = user, + space = DISCRIMINATOR + AccountStruct::INIT_SPACE + )] + pub account_name: Account<'info, AccountStruct>, + + #[account(mut)] + pub user: Signer<'info>, + + pub system_program: Program<'info, System>, +} +``` + +When `instruction_one` is invoked, the program: + +- Checks that the accounts passed into the instruction handler match the account + types specified in the `InstructionAccounts` struct +- Checks the accounts against any additional constraints specified + +> If any accounts passed into `instruction_one` fail the account validation or +> security checks specified in the `InstructionAccounts` struct, then the +> instruction fails before even reaching the program logic. + +### Account validation + +You may have noticed in the previous example that one of the accounts in +`InstructionAccounts` was of type `Account`, one was of type `Signer`, and one +was of type `Program`. + +Anchor provides a number of account types that can be used to represent +accounts. Each type implements different account validation. We'll go over a few +of the common types you may encounter, but be sure to look through the +[full list of account types](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/index.html). + +#### `Account` + +`Account` is a wrapper around `UncheckedAccount` that verifies program ownership +and deserializes the underlying data into a Rust type. + +```rust +// Deserializes this info +pub struct UncheckedAccount<'a> { + pub key: &'a Pubkey, + pub is_signer: bool, + pub is_writable: bool, + pub lamports: Rc>, + pub data: Rc>, // <---- deserializes account data + pub owner: &'a Pubkey, // <---- checks owner program + pub executable: bool, + pub rent_epoch: u64, +} +``` + +Recall the previous example where `InstructionAccounts` had a field +`account_name`: + +```rust +pub account_name: Account<'info, AccountStruct> +``` + +The `Account` wrapper here does the following: + +- Deserializes the account `data` in the format of type `AccountStruct` +- Checks that the program owner of the account matches the program owner + specified for the `AccountStruct` type. + +When the account type specified in the `Account` wrapper is defined within the +same crate using the `#[account]` attribute macro, the program ownership check +is against the `programId` defined in the `declare_id!` macro. + +The following are the checks performed: + +```rust +// Checks +Account.info.owner == T::owner() +!(Account.info.owner == SystemProgram && Account.info.lamports() == 0) +``` + +#### `Signer` + +The `Signer` type validates that the given account signed the transaction. No +other ownership or type checks are done. You should only use the `Signer` when +the underlying account data is not required in the instruction. + +For the `user` account in the previous example, the `Signer` type specifies that +the `user` account must be a signer of the instruction. + +The following check is performed for you: + +```rust +// Checks +Signer.info.is_signer == true +``` + +#### `Program` + +The `Program` type validates that the account is a certain program. + +For the `system_program` account in the previous example, the `Program` type is +used to specify the program should be the system program. Anchor provides a +`System` type which includes the `programId` of the system program to check +against. + +The following checks are performed for you: + +```rust +//Checks +account_info.key == expected_program +account_info.executable == true +``` + +### Add constraints with Account + +The `#[account(..)]` attribute macro is used to apply constraints to accounts. +We'll go over a few constraint examples in this and future lessons, but at some +point be sure to look at the full +[list of possible constraints](https://docs.rs/anchor-lang/latest/anchor_lang/derive.Accounts.html). + +Recall again the `account_name` field from the `InstructionAccounts` example. + +```rust +#[account( + init, + payer = user, + space = DISCRIMINATOR + AccountStruct::INIT_SPACE +)] +pub account_name: Account<'info, AccountStruct>, +#[account(mut)] +pub user: Signer<'info>, +``` + +Notice that the `#[account(..)]` attribute contains three comma-separated +values: + +- `init` - creates the account via a CPI to the system program and initializes + it (sets its account discriminator) +- `payer` - specifies the payer for the account initialization to be the `user` + account defined in the struct +- `space`- the space allocated on the blockchain to store the account. + - `DISCRIMINATOR` is the first 8 bytes of an account, which Anchor uses to + save the type of the account. + - `AccountStruct::INIT_SPACE` is the total size of space required for all the + items in the `AccountStruct`. + - The very need of using this `space` constraint can be eliminated by using + `#[derive(InitSpace)]` macro. We'll see how to use that further in this + lesson. + +For `user` we use the `#[account(..)]` attribute to specify that the given +account is mutable. The `user` account must be marked as mutable because +lamports will be deducted from the account to pay for the initialization of +`account_name`. + +```rust +#[account(mut)] +pub user: Signer<'info>, +``` + +Note that the `init` constraint placed on `account_name` automatically includes +a `mut` constraint so that both `account_name` and `user` are mutable accounts. + +### Account + +The `#[account]` attribute is applied to structs representing the data structure +of a Solana account. It implements the following traits: + +- `AccountSerialize` +- `AccountDeserialize` +- `AnchorSerialize` +- `AnchorDeserialize` +- `Clone` +- `Discriminator` +- `Owner` + +You can read more about the +[details of each trait](https://docs.rs/anchor-lang/latest/anchor_lang/attr.account.html). +However, mostly what you need to know is that the `#[account]` attribute enables +serialization and deserialization, and implements the discriminator and owner +traits for an account. + +The discriminator is an 8-byte unique identifier for an account type derived +from the first 8 bytes of the SHA256 hash of the account type's name. The first +8 bytes are reserved for the account discriminator when implementing account +serialization traits (which is almost always in an Anchor program). + +As a result, any calls to `AccountDeserialize`'s `try_deserialize` will check +this discriminator. If it doesn't match, an invalid account was given, and the +account deserialization will exit with an error. + +The `#[account]` attribute also implements the `Owner` trait for a struct using +the `programId` declared by `declareId` of the crate `#[account]` is used in. In +other words, all accounts initialized using an account type defined using the +`#[account]` attribute within the program are also owned by the program. + +As an example, let's look at `AccountStruct` used by the `account_name` of +`InstructionAccounts` + +```rust +#[derive(Accounts)] +pub struct InstructionAccounts { + #[account(init, + payer = user, + space = DISCRIMINATOR + AnchorStruct::INIT_SPACE + )] + pub account_name: Account<'info, AccountStruct>, + ... +} + +#[account] +#[derive(InitSpace)] +pub struct AccountStruct { + data: u64 +} + +const DISCRIMINATOR: usize = 8; +``` + +The `#[account]` attribute ensures that it can be used as an account in +`InstructionAccounts`. + +When the `account_name` account is initialized: + +- The first 8 bytes is set as the `AccountStruct` discriminator using the + `DISCRIMINATOR` constant. +- The data field of the account will match `AccountStruct` +- The account owner is set as the `programId` from `declare_id` + +> It is considered a good practice to use the `#[derive(InitSpace)]` macro which +> makes the code more readable and maintainable. + +### Bring it all together + +When you combine all of these Anchor types you end up with a complete program. +Below is an example of a basic Anchor program with a single instruction that: + +- Initializes a new account +- Updates the data field on the account with the instruction data passed into + the instruction + +```rust +// Use this import to gain access to common anchor features +use anchor_lang::prelude::*; + +// Program onchain address +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +// Instruction logic +#[program] +mod program_module_name { + use super::*; + pub fn instruction_one(ctx: Context, instruction_data: u64) -> Result<()> { + ctx.accounts.account_name.data = instruction_data; + Ok(()) + } +} + +// Validate incoming accounts for instructions +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account(init, + payer = user, + space = DISCRIMINATOR + AccountStruct::INIT_SPACE + )] + pub account_name: Account<'info, AccountStruct>, + #[account(mut)] + pub user: Signer<'info>, + pub system_program: Program<'info, System>, +} + +// Define custom program account type +#[account] +#[derive(InitSpace)] +pub struct AccountStruct { + data: u64 +} + +const DISCRIMINATOR: usize = 8; +``` + +#### Key takeaways: + +- The whole program structure can be broadly divided into three parts: + 1. Account constraints: define the accounts required for the instructions, as + well as rules to apply to them - e.g., whether they need to sign the + transaction, if they should be created on demand, how addresses for PDAs, + etc. + 2. Instruction handlers: implement program logic, as functions inside + the`#[program]` module. + 3. Accounts: define the format used for data accounts. + +You are now ready to build your own Solana program using the Anchor framework! + +## Lab + +Before we begin, install Anchor by +[following the steps from the Anchor docs](https://www.anchor-lang.com/docs/installation). + +For this lab we'll create a simple counter program with two instructions: + +- The first instruction will initialize an account to store our counter +- The second instruction will increment the count stored in the counter + +#### 1. Setup + +Create a new project called `anchor-counter` by running `anchor init`: + +```shell +anchor init anchor-counter +``` + +Change into the new directory, then run `anchor build` + +```shell +cd anchor-counter +anchor build +``` + +Anchor build will also generate a keypair for your new program - the keys are +saved in the `target/deploy` directory. + +Open the file `lib.rs` and look at `declare_id!`: + +```rust +declare_id!("BouTUP7a3MZLtXqMAm1NrkJSKwAjmid8abqiNjUyBJSr"); +``` + +and then run... + +```shell +anchor keys sync +``` + +You'll see the Anchor updates both: + +- The key used in `declare_id!()` in `lib.rs` +- The key in `Anchor.toml` + +To match the key generated during `anchor build`: + +```shell +Found incorrect program id declaration in "anchor-counter/programs/anchor-counter/src/lib.rs" +Updated to BouTUP7a3MZLtXqMAm1NrkJSKwAjmid8abqiNjUyBJSr + +Found incorrect program id declaration in Anchor.toml for the program `anchor_counter` +Updated to BouTUP7a3MZLtXqMAm1NrkJSKwAjmid8abqiNjUyBJSr + +All program id declarations are synced. +``` + +Finally, delete the default code in `lib.rs` until all that is left is the +following: + +```rust +use anchor_lang::prelude::*; + +declare_id!("onchain-program-address"); + +#[program] +pub mod anchor_counter { + use super::*; +} +``` + +#### 2. Implement `Counter` + +First, let's use the `#[account]` attribute to define a new `Counter` account +type. The `Counter` struct defines one `count` field of type `u64`. This means +that we can expect any new accounts initialized as a `Counter` type to have a +matching data structure. The `#[account]` attribute also automatically sets the +discriminator for a new account and sets the owner of the account as the +`programId` from the `declare_id!` macro. We also use the `#[derive(InitSpace)]` +macro for convenient space allocation. + +```rust +#[account] +#[derive(InitSpace)] +pub struct Counter { + pub count: u64, +} + +const DISCRIMINATOR: usize = 8; +``` + +#### 3. Implement `Context` type `Initialize` + +Next, using the `#[derive(Accounts)]` macro, let's implement the `Initialize` +type that lists and validates the accounts used by the `initialize` instruction. +It'll need the following accounts: + +- `counter` - the counter account initialized in the instruction +- `user` - payer for the initialization +- `system_program` - the system program is required for the initialization of + any new accounts + +```rust +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, + payer = user, + space = DISCRIMINATOR + Counter::INIT_SPACE + )] + pub counter: Account<'info, Counter>, + #[account(mut)] + pub user: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +#### 4. Add the `initialize` instruction handler + +Now that we have our `Counter` account and `Initialize` type , let's implement +the `initialize` instruction handler within `#[program]`. This instruction +handler requires a `Context` of type `Initialize` and takes no additional +instruction data. In the instruction logic, we are simply setting the `counter` +account's `count` field to `0`. + +```rust +pub fn initialize(ctx: Context) -> Result<()> { + let counter = &mut ctx.accounts.counter; + counter.count = 0; + msg!("Counter Account Created"); + msg!("Current Count: { }", counter.count); + Ok(()) +} +``` + +#### 5. Implement `Context` type `Update` + +Now, using the `#[derive(Accounts)]` macro again, let's create the `Update` type +that lists the accounts that the `increment` instruction handler requires. It'll +need the following accounts: + +- `counter` - an existing counter account to increment +- `user` - payer for the transaction fee + +Again, we'll need to specify any constraints using the `#[account(..)]` +attribute: + +```rust +#[derive(Accounts)] +pub struct Update<'info> { + #[account(mut)] + pub counter: Account<'info, Counter>, + pub user: Signer<'info>, +} +``` + +#### 6. Add `increment` instruction handler + +Lastly, within `#[program]`, let's implement an `increment` instruction handler +to increment the `count` once a `counter` account is initialized by the first +instruction handler. This instruction handler requires a `Context` of type +`Update` (implemented in the next step) and takes no additional instruction +data. In the instruction logic, we are simply incrementing an existing `counter` +account's `count` field by `1`. + +```rust +pub fn increment(ctx: Context) -> Result<()> { + let counter = &mut ctx.accounts.counter; + msg!("Previous counter: {}", counter.count); + counter.count = counter.count.checked_add(1).unwrap(); + msg!("Counter incremented. Current count: {}", counter.count); + Ok(()) +} +``` + +#### 7. Build + +All together, the complete program will look like this: + +```rust +use anchor_lang::prelude::*; + +declare_id!("BouTUP7a3MZLtXqMAm1NrkJSKwAjmid8abqiNjUyBJSr"); + +#[program] +pub mod anchor_counter { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + let counter = &mut ctx.accounts.counter; + counter.count = 0; + msg!("Counter account created. Current count: {}", counter.count); + Ok(()) + } + + pub fn increment(ctx: Context) -> Result<()> { + let counter = &mut ctx.accounts.counter; + msg!("Previous counter: {}", counter.count); + counter.count = counter.count.checked_add(1).unwrap(); + msg!("Counter incremented. Current count: {}", counter.count); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, + payer = user, + space = DISCRIMINATOR + Counter::INIT_SPACE + )] + pub counter: Account<'info, Counter>, + #[account(mut)] + pub user: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[derive(Accounts)] +pub struct Update<'info> { + #[account(mut)] + pub counter: Account<'info, Counter>, + pub user: Signer<'info>, +} + +#[account] +#[derive(InitSpace)] +pub struct Counter { + pub count: u64, +} + +const DISCRIMINATOR: usize = 8; +``` + +Run `anchor build` to build the program. + +#### 8. Testing + +Anchor tests are typically Typescript integration tests that use the mocha test +framework. We'll learn more about testing later, but for now navigate to +`anchor-counter.ts` and replace the default test code with the following: + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { expect } from "chai"; +import { AnchorCounter } from "../target/types/anchor_counter"; + +describe("anchor-counter", () => { + // Configure the client to use the local cluster. + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + + const program = anchor.workspace.AnchorCounter as Program; + + const counter = anchor.web3.Keypair.generate(); + + it("Is initialized!", async () => {}); + + it("Incremented the count", async () => {}); +}); +``` + +The above code generates a new keypair for the `counter` account we'll be +initializing and creates placeholders for a test of each instruction. + +Next, create the first test for the `initialize` instruction: + +```typescript +it("Is initialized!", async () => { + // Add your test here. + const tx = await program.methods + .initialize() + .accounts({ counter: counter.publicKey }) + .signers([counter]) + .rpc(); + + const account = await program.account.counter.fetch(counter.publicKey); + expect(account.count.toNumber()).to.equal(0); +}); +``` + +Next, create the second test for the `increment` instruction: + +```typescript +it("Incremented the count", async () => { + const tx = await program.methods + .increment() + .accounts({ counter: counter.publicKey, user: provider.wallet.publicKey }) + .rpc(); + + const account = await program.account.counter.fetch(counter.publicKey); + expect(account.count.toNumber()).to.equal(1); +}); +``` + +Lastly, run `anchor test` and you should see the following output: + +```shell +anchor-counter +✔ Is initialized! (290ms) +✔ Incremented the count (403ms) + + +2 passing (696ms) +``` + +Running `anchor test` automatically spins up a local test validator, deploys +your program, and runs your mocha tests against it. Don't worry if you're +confused by the tests for now - we'll dig in more later. + +Congratulations, you just built a Solana program using the Anchor framework! +Feel free to reference the +[solution code](https://github.com/Unboxed-Software/anchor-counter-program/tree/solution-increment) +if you need some more time with it. + +## Challenge + +Now it's your turn to build something independently. Because we're starting with +simple programs, yours will look almost identical to what we just created. It's +useful to try and get to the point where you can write it from scratch without +referencing prior code, so try not to copy and paste here. + +1. Write a new program that initializes a `counter` account +2. Implement both an `increment` and `decrement` instruction +3. Build and deploy your program like we did in the lab +4. Test your newly deployed program and use Solana Explorer to check the program + logs + +As always, get creative with these challenges and take them beyond the basic +instructions if you want - and have fun! + +Try to do this independently if you can! But if you get stuck, feel free to +reference +the [solution code](https://github.com/Unboxed-Software/anchor-counter-program/tree/solution-decrement). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=334874b7-b152-4473-b5a5-5474c3f8f3f1)! + + diff --git a/content/courses/onchain-development/intro-to-onchain.mdx b/content/courses/onchain-development/intro-to-onchain.mdx new file mode 100644 index 000000000..249affacf --- /dev/null +++ b/content/courses/onchain-development/intro-to-onchain.mdx @@ -0,0 +1,185 @@ +--- +title: Intro to Solana Onchain Development +objectives: + - Understand how Solana onchain programs work + - Know about the structure and operation of Solana programs + - Build a basic program +description: + "Discover how onchain programs ( often called 'smart contracts') work on + Solana and learn to build your own." +--- + +## Summary + +- **Onchain programs** (sometimes called 'smart contracts') run directly on + Solana, just like programs on your computer. +- These programs consist of **instruction handlers** - functions that process + instructions from transactions. +- Programs interact with the blockchain by reading from and writing to Solana + **accounts**. +- Solana programs are most commonly written in **Rust**, often using the + **Anchor** framework for simplified development. +- Anchor generates **Interface Description Language (IDL)** files, which: + - Describe the program's structure and functionality + - Enable automatic creation of JavaScript/TypeScript client libraries +- Solana's architecture allows for parallel execution of non-overlapping + transactions, contributing to its high speed and efficiency. +- Rent is a concept in Solana where accounts must maintain a minimum balance to + stay alive on the blockchain. + +## Overview + +Solana operates on various clusters, each functioning as a unified, globally +synchronized system: + +- **mainnet-beta**: The main production network +- **testnet**: For testing new features +- **devnet**: For application development +- **localnet**: For local testing + +The programs that run on Solana - the ones that create tokens, swap tokens, art +marketplaces, escrows, market makers, DePIN apps, auctions, retail payments +platforms, etc - are called **Solana apps**. + +The most popular way to build onchain apps is using **Rust** language and the +**Anchor** framework. There is also another way of developing Solana programs +that is, by using the **native onchain program development**, however **Anchor** +makes things a lot simpler and safer. Some pros of using Anchor are: + +- Security checks are implemented automatically +- Automatic routing of incoming instructions to the correct instruction handler +- Automatic serialization and deserialization of the data inside transactions +- Account validation, including: + - Type checking + - Ensuring account uniqueness + +Regardless of the language and framework you choose, Solana works the same. +Let's refresh how programs work on Solana. + +![Diagram showing a transaction with two instructions](/assets/courses/unboxed/transaction-and-instructions.svg) + +### Programs are deployed at addresses + +In the same way that we can send tokens to users using their public key, we can +find programs using the program's public key. When using Anchor, a keypair is +created during `anchor init`, and the private key is saved in the +`target/deploy` directory of your project. + +A program's public key is sometimes called a 'program ID' or 'program address'. +Which can be seen in the `programs//src/lib.rs` and +`Anchor.toml` files. + +### Programs have instruction handlers + +For example, a Solana client making a transaction transferring some USDC with a +memo saying 'thanks' would have two instructions: + +- one instruction for the Token program's `transfer` instruction handler +- the other instruction for the Memo program's `memo` instruction handler. + +Both these instructions must be completed successfully for the transaction to +execute. + +Instruction handlers are how blockchain programs process the instructions from +clients. Every exchange, lending protocol, escrow, oracle, etc. provides their +functionality by instruction handlers. + +### Instruction handlers write their state to Solana accounts + +If you have done web development before, think of instruction handlers like HTTP +route handlers, and incoming instructions like HTTP requests. + +However, unlike HTTP route handlers, Solana instruction handlers don't return +data. Instead, they write their data to accounts on Solana. + +Programs on Solana can transfer tokens to user wallet addresses (for SOL) or +user token accounts (for other tokens). + +More importantly, programs can create additional addresses to store data as +needed. + +This is how Solana programs store their state. + +### Program Derived Addresses (PDAs): Solana's Key-Value Store + +Data for Solana programs are stored in **program-derived addresses (PDAs)**. +Solana's PDAs can be thought of as a **key/value store**. A PDA can be designed +to store any form of data as required by the program. + +#### Key Concepts + +1. **Structure** + + - **Key**: The PDA's address + - **Value**: Data stored in the account at that address + +2. **Address Generation** + + - **Seed**: chosen by the programmer + - **Bump**: An additional value to ensure unique PDA creation + - **Deterministic**: Same combination of seed and bump always produce the + same address. This helps the program and the client to accurately determine + the address of the data. + +3. **Data Storage** + + - Programmers define the structure of data stored in PDAs + - Can store any type of program-specific information + +4. **Some properties**: + - PDAs are off the Ed25519 elliptic curve. While the data type web3.js uses + is a `PublicKey`, PDA addresses are not public keys and do not have a + matching private key. + - A program's PDAs are unique so, they won't conflict with other programs. + - PDAs can also act as signers in an instruction. We'll learn more about this + in further lessons. + +#### Examples of PDA Usage + +| Use Case | Seeds | PDA (Key) | Value (Data Stored) | +| ----------------- | ------------------------------ | --------------- | ------------------------------------------ | +| Exchange Rate | `["USD", "AUD"]` | Derived address | Current USD to AUD exchange rate | +| User Relationship | `[user1_wallet, user2_wallet]` | Derived address | Relationship data (e.g., friends, blocked) | +| Movie Review | `[reviewer_wallet, "titanic"]` | Derived address | Review text, rating, timestamp | +| Global Config | `["config"]` | Derived address | Program-wide settings | + +#### Benefits + +1. **Uniqueness**: PDAs are specific to your program, avoiding conflicts +2. **Determinism**: Consistent address generation across clients and on-chain + programs +3. **Flexibility**: Can store various types of data structures +4. **Efficiency**: Quick lookup and access to program-specific data + +### Solana instructions need to specify all the accounts they will use + +As you may already know, Solana is fast because it can process transactions that +don't overlap at the same time i.e., just like in the real world, Alice sending +to Bob doesn't stop Chris from sending something to Diana. Your front-end apps +need to specify the addresses of all the accounts they will use. + +This includes the PDAs you make. Thankfully, you can calculate the address for +PDAs in your front-end code before you write data there! + +```typescript +// There's nothing at this address right now, but we're going to use in our transaction +const address = findProgramAddressSync(["seed", "another seed"], PROGRAM_ID); +``` + +### There are multiple ways to build onchain, but we recommend Anchor + +You currently have two options for onchain program development: + +- We recommend new onchain programmers + [start with Anchor](/developers/courses/onchain-development/intro-to-anchor). + Anchor's defaults make it easy to create safe programs. +- There's also a separate + [native onchain program development](/developers/courses/native-onchain-development) + course. + +Whichever way you pick, Solana Foundation maintains +[examples in both languages](https://github.com/solana-developers/program-examples), +and [Solana Stack Exchange](https://solana.stackexchange.com/) is there to help. + +For now, let's +[set up your computer](/developers/courses/onchain-development/local-setup)! diff --git a/content/courses/onchain-development/local-setup.mdx b/content/courses/onchain-development/local-setup.mdx new file mode 100644 index 000000000..b489dba6d --- /dev/null +++ b/content/courses/onchain-development/local-setup.mdx @@ -0,0 +1,212 @@ +--- +title: Local Program Development +objectives: + - Set up a local environment for Solana program development, with Solana CLI + tools, Rust, and Anchor. + - Ensure Anchor works out of the box with no errors or warnings. +description: + "Setup a local development environment for building onchain programs." +--- + +## Summary + +- To develop onchain programs locally, you need the **Solana CLI**, **Rust**, + and (optional, but recommended) **Anchor**. +- You can use `anchor init` to create a new blank Anchor project. +- `anchor test` runs your tests and also builds your code. + +## Lesson + +This lesson is a guide to installing the tools required for developing onchain +programs. Let's install Solana CLI tools, the Rust SDK, and Anchor, and create a +test program to ensure that our setup works. + +## Lab + +### Extra steps for Windows users + +> macOS and Linux users can skip this section. If you're on Windows, you can +> follow along with these extra steps. + +Firstly, make sure you have Windows Terminal installed, otherwise you can +install Windows Terminal from the +[Microsoft store](https://apps.microsoft.com/detail/9N0DX20HK701). + +Then, +[install Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install). +WSL provides a Linux environment that launches instantly when needed without +slowing down your computer. + +Open Windows Terminal, start an 'Ubuntu' session and proceed with the rest of +these steps. + +### Download Rust + +First, install Rust by +[following the instructions](https://www.rust-lang.org/tools/install): + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +### Download the Solana CLI tools + +Next, +[download the Solana CLI tools](/docs/intro/installation#install-the-solana-cli): + +```bash +sh -c "$(curl -sSfL https://release.anza.xyz/stable/install)" +``` + +After installation, `solana -V` should display `solana-cli 1.18.x` (where `x` +can be any number). + +### Running the Solana Test Validator + +The Solana Test Validator is a local emulator for the Solana blockchain. It +provides developers with a private and controlled environment to build and test +Solana programs without needing to connect to a public testnet or mainnet. + +To start the Solana Test Validator, run the following command: + +```bash +solana-test-validator +``` + +When running `solana-test-validator`, you should see output indicating that the +validator is working correctly. Below is an example of what the output should +look like: + +```bash +$ solana-test-validator +--faucet-sol argument ignored, ledger already exists +Ledger location: test-ledger +Log: test-ledger/validator.log +⠴ Initializing... +Waiting for fees to stabilize 1... +Identity: J8yKZJa5NtcmCQqmBRC6Fe8X6AECo8Vc3d7L3dF9JPiM +Genesis Hash: FTPnCMDzTEthZxE6DvHbsWWv83F2hFe1GFvpVFBMUoys +Version: 1.18.22 +Shred Version: 49491 +Gossip Address: 127.0.0.1:1024 +TPU Address: 127.0.0.1:1027 +JSON RPC URL: http://127.0.0.1:8899 +WebSocket PubSub URL: ws://127.0.0.1:8900 +⠄ 00:00:25 | Processed Slot: 114 | Confirmed Slot: 114 | Finalized Slot: 82 | Full Snapshot Slot: - | Incremental Snapshot Slot: - | Transactions: 111 | ◎499.999445000 +``` + +If you see this output, it means the Solana test validator is running correctly. +You should cancel the process by pressing CTRL + C, as you'll need to run the +anchor test command next. + +For more detailed information, you can refer to the +[Solana Test Validator guide](/developers/guides/getstarted/solana-test-validator). + +### Download Anchor + +Finally, [download Anchor](https://www.anchor-lang.com/docs/installation): + +```bash +cargo install --git https://github.com/coral-xyz/anchor avm --locked --force +``` + +you may need to install additional dependencies in Linux (or WSL): + +```bash +sudo apt-get update && \ +sudo apt-get upgrade && \ +sudo apt-get install -y pkg-config build-essential libudev-dev libssl-dev +``` + +proceed... + +```bash +avm install latest +avm use latest +``` + +After installation, `anchor -V` should display `anchor-cli 0.30.1`. For more +detailed information on Anchor, refer to +[The Anchor Book](https://book.anchor-lang.com). + +### Verify your Anchor Installation + +Create a temporary project with the default contents using Anchor and ensure it +compiles and runs: + +```bash +anchor init temp-project +cd temp-project +anchor test +``` + +**The `anchor test` command should complete with no errors or warnings**. + +**However you may encounter issues, and we'll fix them below:** + +#### `package `solana-program + +v1.18.12` cannot be built because it requires rustc 1.75.0 or newer` error + +This error is due to incompatible versions of `solana-program` and `solana-cli`. +Run `cargo add solana-program@"=1.18.x"`, where `x` matches your version of +`solana-cli`. Then re-run `anchor test`. + +#### Error: `Unable to read keypair file` + +Add a keypair to `.config/solana/id.json`. You can either copy a keypair from an +`.env` file (just the array of numbers) into a file or use the command +`solana-keygen new --no-bip39-passphrase` to create a new keypair file. Then +re-run `anchor test`. + +#### error: no such command: `build-sbf` + +If you see this message, this error typically occurs because the relevant +binaries are not in your shell's PATH variable. + +Run this command to add this folder to your shell, and also add this to your +`~/.zshrc` or `~/.bashrc` file to make the change permanent. + +```bash +export PATH=~"/.local/share/solana/install/active_release/bin:$PATH" +``` + +#### Unable to get latest blockhash. Test validator does not look started. + +There's multiple versions of the 'tar' (tape archiver) command Solana used for +archiving. macOS comes with BSD tar, but Solana CLI wants the GNU version +installed. + +- Install [Homebrew](https://brew.sh/) and use it to install GNU tar: + + ```bash + # Install Homebrew; you can skip this step if you already have Homebrew installed + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + # Install GNU tar + brew install gnu-tar + ``` + +- Add this to your ~/.zshrc or ~/.bashrc file to make the change permanent. + + ```bash + export PATH=/opt/homebrew/opt/gnu-tar/libexec/gnubin:$PATH + ``` + +#### Error: `Your configured rpc port: 8899 is already in use` + +If you are running `solana-test-validator`, you may encounter the error +`Error: Your configured rpc port: 8899 is already in use` when running +`anchor test`. To resolve this, stop the `solana-test-validator` before running +`anchor test`. + +### All done? + +Ensure `anchor test` completes successfully - with no warnings and no errors - +before continuing. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=aa0b56d6-02a9-4b36-95c0-a817e2c5b19d)! + + diff --git a/content/courses/onchain-development/meta.json b/content/courses/onchain-development/meta.json new file mode 100644 index 000000000..ac4523e03 --- /dev/null +++ b/content/courses/onchain-development/meta.json @@ -0,0 +1,10 @@ +{ + "pages": [ + "intro-to-onchain", + "local-setup", + "intro-to-anchor", + "intro-to-anchor-frontend", + "anchor-pdas", + "anchor-cpi" + ] +} diff --git a/content/courses/program-optimization/index.mdx b/content/courses/program-optimization/index.mdx new file mode 100644 index 000000000..eebafac36 --- /dev/null +++ b/content/courses/program-optimization/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Program Optimization +description: Learn how to efficiently design and optimize your programs. +--- diff --git a/content/courses/program-optimization/lookup-tables.mdx b/content/courses/program-optimization/lookup-tables.mdx new file mode 100644 index 000000000..9ce40df1d --- /dev/null +++ b/content/courses/program-optimization/lookup-tables.mdx @@ -0,0 +1,883 @@ +--- +title: Versioned Transactions and Lookup Tables +objectives: + - Create versioned transactions + - Create lookup tables + - Extend lookup tables + - Use lookup tables with versioned transactions +description: "Use large amounts of accounts by using lookup tables." +--- + +## Summary + +- **Versioned Transactions** in Solana allows support for both legacy and newer + transaction formats. The original format is referred to as "legacy," while new + formats begin at version 0. Versioned transactions were introduced to + accommodate the use of Address Lookup Tables (LUTs). +- **Address Lookup Tables** are special accounts that store the addresses of + other accounts. In versioned transactions, these addresses can be referenced + by a 1-byte index instead of the full 32-byte address. This optimization + enables more complex transactions than previously possible. + +## Lesson + +By design, Solana transactions are limited to 1232 bytes. Transactions exceeding +this limit will fail, which restricts the size of atomic operations that can be +performed. While this limit allows for optimizations at the network level, it +imposes restrictions on transaction complexity. + +To address transaction size limitations, Solana introduced a new transaction +format supporting multiple versions. Currently, two transaction versions are +supported: + +1. `legacy` - The original transaction format +2. `0` - The latest format, which supports Address Lookup Tables. + +Existing Solana programs do not require changes to support versioned +transactions. However, client-side code created prior to their introduction +should be updated. In this lesson, we'll cover the basics of versioned +transactions and how to use them, including: + +- Creating versioned transactions +- Creating and managing lookup tables +- Using lookup tables in versioned transactions + +### Versioned Transactions + +In Solana transactions, one of the largest space consumers is account addresses, +which are 32 bytes each. For transactions with 39 accounts, the size limit is +exceeded even before accounting for instruction data. Typically, transactions +become too large with around 20 accounts. + +Versioned transactions address this issue by introducing Address Lookup Tables, +which allow addresses to be stored separately and referenced via a 1-byte index. +This greatly reduces transaction size by minimizing the space needed for account +addresses. + +Even if Address Lookup Tables are not required for your use case, understanding +versioned transactions is crucial for maintaining compatibility with the latest +Solana features. The `@solana/web3.js` library provides all necessary tools to +work with versioned transactions and lookup tables. + +#### Create versioned transactions + +To create a versioned transaction, you first create a `TransactionMessage` with +the following parameters: + +- `payerKey` - the public key of the account that will pay for the transaction +- `recentBlockhash` - a recent blockhash from the network +- `instructions` - the instructions to be executed in the transaction. + +Once the message object is created, you can convert it into a version `0` +transaction using the `compileToV0Message()` method. + +```typescript +import * as web3 from "@solana/web3.js"; + +// Example transfer instruction +const transferInstruction = SystemProgram.transfer({ + fromPubkey: payer.publicKey, // Public key of the sender account + toPubkey: toAccount.publicKey, // Public key of the receiver account + lamports: 1 * LAMPORTS_PER_SOL, // Amount to transfer in lamports +}); + +// Get the latest blockhash +const { blockhash } = await connection.getLatestBlockhash(); + +// Create the transaction message +const message = new TransactionMessage({ + payerKey: payer.publicKey, // Public key of the payer account + recentBlockhash: blockhash, // Most recent blockhash + instructions: [transferInstruction], // Transaction instructions +}).compileToV0Message(); +``` + +Next, pass the compiled message into the `VersionedTransaction` constructor to +create a versioned transaction. The transaction is then signed and sent to the +network, similar to how legacy transactions are handled. + +```typescript +// Create the versioned transaction from the compiled message +const transaction = new VersionedTransaction(message); + +// Sign the transaction with the payer's keypair +transaction.sign([payer]); + +// Send the signed transaction to the network +const signature = await connection.sendTransaction(transaction); +``` + +### Address Lookup Table + +Address Lookup Tables (LUTs) are accounts that store references to other account +addresses. These LUT accounts, owned by the Address Lookup Table Program, +increase the number of accounts that can be included in a transaction. + +In versioned transactions, LUT addresses are included, and additional accounts +are referenced with a 1-byte index instead of the full 32-byte address, reducing +space used by the transaction. + +The `@solana/web3.js` library offers an `AddressLookupTableProgram` class, +providing methods to manage LUTs: + +- `createLookupTable` - creates a new LUT account. +- `freezeLookupTable` - makes a LUT immutable. +- `extendLookupTable` - adds addresses to an existing LUT. +- `deactivateLookupTable` - begins the deactivation period for an LUT. +- `closeLookupTable` - permanently closes an LUT account. + +#### Create a lookup table + +You can use the `createLookupTable` method to construct the instruction for +creating a lookup table. This requires the following parameters: + +- `authority` - the account authorized to modify the lookup table. +- `payer` - the account responsible for paying the account creation fees. +- `recentSlot` - a recent slot used to derive the lookup table's address. + +The function returns both the instruction for creating the LUT and its address. + +```typescript +// Get the current slot +const slot = await connection.getSlot(); + +// Create the lookup table creation instruction and retrieve its address +const [lookupTableInst, lookupTableAddress] = + AddressLookupTableProgram.createLookupTable({ + authority: user.publicKey, // Account authorized to modify the LUT + payer: user.publicKey, // Account paying for transaction fees + recentSlot: slot - 1, // Use a recent slot to derive the LUT address + }); +``` + +Under the hood, the lookup table address is a Program Derived Address (PDA) +generated using the `authority` and `recentSlot` as seeds. + +```typescript +const [lookupTableAddress, bumpSeed] = PublicKey.findProgramAddressSync( + [params.authority.toBuffer(), toBufferLE(BigInt(params.recentSlot), 8)], + this.programId, +); +``` + + + +Using the most recent slot sometimes results in errors when submitting the +transaction. To avoid this, it’s recommended to use a slot that is one slot +before the most recent one (`recentSlot: currentSlot - 1`). If you still +encounter errors when sending the transaction, try resubmitting it. + + + +``` +"Program AddressLookupTab1e1111111111111111111111111 invoke [1]", +"188115589 is not a recent slot", +"Program AddressLookupTab1e1111111111111111111111111 failed: invalid instruction data"; +``` + +#### Extend a lookup table + +The `extendLookupTable` method creates an instruction to add addresses to an +existing lookup table. It requires the following parameters: + +- `payer` - the account responsible for paying transaction fees and any + additional rent. +- `authority` - the account authorized to modify the lookup table. +- `lookupTable` - the address of the lookup table to be extended. +- `addresses` - the list of addresses to add to the lookup table. + +The function returns an instruction to extend the lookup table. + +```typescript +const addresses = [ + new PublicKey("31Jy3nFeb5hKVdB4GS4Y7MhU7zhNMFxwF7RGVhPc1TzR"), + new PublicKey("HKSeapcvwJ7ri6mf3HwBtspLFTDKqaJrMsozdfXfg5y2"), + // Add more addresses here +]; + +// Create the instruction to extend the lookup table with the provided addresses +const extendInstruction = AddressLookupTableProgram.extendLookupTable({ + payer: user.publicKey, // Account paying for transaction fees + authority: user.publicKey, // Account authorized to modify the lookup table + lookupTable: lookupTableAddress, // Address of the lookup table to extend + addresses: addresses, // Addresses to add to the lookup table +}); +``` + +Note that when extending a lookup table, the number of addresses that can be +added in a single instruction is limited by the transaction size limit of 1232 +bytes. You can add approximately 30 addresses in one transaction. If you need to +add more than that, multiple transactions are required. Each lookup table can +store up to 256 addresses. + +#### Send Transaction + +After creating the instructions, you can add them to a transaction and send it +to the network: + +```typescript +// Get the latest blockhash +const { blockhash } = await connection.getLatestBlockhash(); + +// Create the transaction message +const message = new TransactionMessage({ + payerKey: payer.publicKey, // Account paying for the transaction + recentBlockhash: blockhash, // Latest blockhash + instructions: [lookupTableInst, extendInstruction], // Instructions to be included in the transaction +}).compileToV0Message(); + +// Create the versioned transaction from the message +const transaction = new VersionedTransaction(message); + +// Sign the transaction +transaction.sign([payer]); + +// Send the signed transaction to the network +const transactionSignature = await connection.sendTransaction(transaction); +``` + +Note that after you create or extend a lookup table, it must "warm up" for one +slot before the lookup table or newly added addresses can be used in +transactions. You can only access lookup tables and addresses added in slots +prior to the current one. + +If you encounter the following error, it may indicate that you're trying to +access a lookup table or an address before the warm-up period has completed: + +```typescript +SendTransactionError: failed to send transaction: invalid transaction: Transaction address table lookup uses an invalid index +``` + +To avoid this issue, ensure you add a delay after extending the lookup table +before attempting to reference the table in a transaction. + +#### Deactivate a lookup table + +When a lookup table (LUT) is no longer needed, you can deactivate it to reclaim +its rent balance. Deactivating a LUT puts it into a "cool-down" period +(approximately 513 slots) during which it can still be used by transactions. +This prevents transactions from being censored by deactivating and recreating +LUTs within the same slot. + +To deactivate a LUT, use the `deactivateLookupTable` method with the following +parameters: + +- `lookupTable` - the address of the lookup table to be deactivated. +- `authority` - the account with the authority to deactivate the LUT. + +```typescript +const deactivateInstruction = AddressLookupTableProgram.deactivateLookupTable({ + lookupTable: lookupTableAddress, // Address of the lookup table to deactivate + authority: user.publicKey, // Authority to modify the lookup table +}); +``` + +#### Close a lookup table + +Once a LUT has been deactivated and the cool-down period has passed, you can +close the lookup table to reclaim its rent balance. Use the `closeLookupTable` +method, which requires the following parameters: + +- `lookupTable` - the address of the LUT to be closed. +- `authority` - the account with the authority to close the LUT. +- `recipient` - the account that will receive the reclaimed rent balance. + +```typescript +const closeInstruction = AddressLookupTableProgram.closeLookupTable({ + lookupTable: lookupTableAddress, // Address of the lookup table to close + authority: user.publicKey, // Authority to close the LUT + recipient: user.publicKey, // Recipient of the reclaimed rent balance +}); +``` + +Attempting to close a LUT before it has been fully deactivated will result in +the following error: + +``` +"Program AddressLookupTab1e1111111111111111111111111 invoke [1]", +"Table cannot be closed until it's fully deactivated in 513 blocks", +"Program AddressLookupTab1e1111111111111111111111111 failed: invalid program argument"; +``` + +#### Freeze a lookup table + +In addition to standard CRUD operations, you can "freeze" a lookup table. This +makes it immutable so that it can no longer be extended, deactivated, or closed. + +The `freezeLookupTable` method is used for this operation and takes the +following parameters: + +- `lookupTable` - the address of the LUT to freeze. +- `authority` - the account with the authority to freeze the LUT. + +```typescript +const freezeInstruction = AddressLookupTableProgram.freezeLookupTable({ + lookupTable: lookupTableAddress, // Address of the lookup table to freeze + authority: user.publicKey, // Authority to freeze the LUT +}); +``` + +Once a LUT is frozen, any attempt to modify it will result in an error like the +following: + +``` +"Program AddressLookupTab1e1111111111111111111111111 invoke [1]", +"Lookup table is frozen", +"Program AddressLookupTab1e1111111111111111111111111 failed: Account is immutable"; +``` + +#### Using lookup tables in versioned transactions + +To utilize a lookup table in a versioned transaction, first retrieve the lookup +table account using its address: + +```typescript +// Fetch the lookup table account from the blockchain using its address +const lookupTableAccount = ( + await connection.getAddressLookupTable(new PublicKey(lookupTableAddress)) +).value; +``` + +Once you have the lookup table account, you can create the list of instructions +for the transaction. When constructing the `TransactionMessage`, pass the lookup +table accounts as an array to the `compileToV0Message()` method. You can include +multiple lookup table accounts if needed. + +```typescript +const message = new web3.TransactionMessage({ + payerKey: payer.publicKey, // Public key of the account paying for the transaction + recentBlockhash: blockhash, // Blockhash of the most recent block + instructions: instructions, // Instructions to be included in the transaction +}).compileToV0Message([lookupTableAccount]); // Include lookup table accounts + +// Create a versioned transaction using the compiled message +const transaction = new web3.VersionedTransaction(message); + +// Sign the transaction +transaction.sign([payer]); + +// Send the signed transaction to the network +const transactionSignature = await connection.sendTransaction(transaction); +``` + +## Lab + +Let's go ahead and practice using lookup tables! + +This lab will guide you through creating, extending, and using a lookup table in +a versioned transaction. + +#### 1. Create the `try-large-transaction.ts` file + +To begin, create a new file named `try-large-transaction.ts` in your project +directory. This file will contain the code to illustrate a scenario where a +legacy transaction is created to transfer SOL to 22 recipients in a single +atomic transaction. The transaction will include 22 separate instructions, each +transferring SOL from the payer (signer) to a different recipient. + +This example highlights a key limitation of legacy transactions when trying to +accommodate many account addresses within a single transaction. As expected, +when attempting to send this transaction, it will likely fail due to exceeding +the transaction size limits. + +Here’s the code to include in `try-large-transaction.ts`: + +```typescript title="try-large-transaction.ts" +import { + Connection, + clusterApiUrl, + Keypair, + Transaction, + SystemProgram, + LAMPORTS_PER_SOL, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + initializeKeypair, + makeKeypairs, + getExplorerLink, +} from "@solana-developers/helpers"; +import dotenv from "dotenv"; +dotenv.config(); + +async function main() { + // Connect to the local Solana cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // Initialize the keypair from the environment variable or create a new one + const payer = await initializeKeypair(connection); + + // Generate 22 recipient keypairs using makeKeypairs + const recipients = makeKeypairs(22).map(keypair => keypair.publicKey); + + // Create a legacy transaction + const transaction = new Transaction(); + + // Add 22 transfer instructions to the transaction + recipients.forEach(recipient => { + transaction.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient, + lamports: LAMPORTS_PER_SOL * 0.01, // Transfer 0.01 SOL to each recipient + }), + ); + }); + + // Sign and send the transaction + try { + const signature = await sendAndConfirmTransaction(connection, transaction, [ + payer, + ]); + console.log( + `Transaction successful with signature: ${getExplorerLink("tx", signature, "devnet")}`, + ); + } catch (error) { + console.error("Transaction failed:", error); + } +} +``` + +To run the example, execute `npx esrun try-large-transaction.ts`. This process +will: + +- Generate a new keypair. +- Store the keypair details in the `.env` file. +- Request airdrop of devnet SOL to the generated keypair. +- Attempt to send the transaction. +- Since the transaction includes 22 instructions, it is expected to fail with + the error: "Transaction too large". + +``` +Creating .env file +Current balance is 0 +Airdropping 1 SOL... +New balance is 1 +PublicKey: 7YsGYC4EBs6Dxespe4ZM3wfCp856xULWoLw7QUcVb6VG +Error: Transaction too large: 1244 > 1232 +``` + +#### 2. Create the `use-lookup-tables.ts` File + +Next, we'll explore how to use lookup tables in combination with versioned +transactions to overcome the limitation of legacy transactions and include a +greater number of addresses in a single transaction. + +Create a new file named `use-lookup-tables.ts` in your project directory. This +file will contain the code to demonstrate the use of lookup tables. + +Here’s the starter code to include in `use-lookup-tables.ts` file: + +```typescript title="use-lookup-tables.ts" +import { + Connection, + clusterApiUrl, + Keypair, + TransactionInstruction, + AddressLookupTableAccount, + SystemProgram, + VersionedTransaction, + TransactionMessage, + AddressLookupTableProgram, + LAMPORTS_PER_SOL, + getSlot, +} from "@solana/web3.js"; +import { + initializeKeypair, + makeKeypairs, + getExplorerLink, +} from "@solana-developers/helpers"; +import dotenv from "dotenv"; +dotenv.config(); + +async function main() { + // Connect to the local Solana cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // Initialize the keypair from the environment variable or create a new one + const payer = await initializeKeypair(connection); + + // Generate 22 recipient keypairs using makeKeypairs + const recipients = makeKeypairs(22).map(keypair => keypair.publicKey); +} +``` + +Next, we will create a few helper functions that will be crucial for working +with versioned transactions and lookup tables. These functions will simplify our +process and make our code more modular and reusable. + +#### 3. Create a `sendV0Transaction` helper function + +To handle versioned transactions, we will create a helper function in +`use-lookup-tables.ts` file, called `sendV0Transaction`, to simplify the +process. This function will accept the following parameters: + +- `connection`: the solana connection to the cluster (e.g., devnet). +- `user`: the keypair of the user (payer) signing the transaction. +- `instructions`: an array of TransactionInstruction objects to include in the + transaction. +- `lookupTableAccounts` (optional): an array of lookup table accounts, if + applicable, to reference additional addresses. + +This helper function will: + +- Retrieve the latest blockhash and last valid block height from the Solana + network. +- Compile a versioned transaction message using the provided instructions. +- Sign the transaction using the user's keypair. +- Send the transaction to the network. +- Confirm the transaction and log the transaction's URL using Solana Explorer. + +```typescript title="use-lookup-tables.ts" +async function sendV0Transaction( + connection: Connection, + user: Keypair, + instructions: TransactionInstruction[], + lookupTableAccounts?: AddressLookupTableAccount[], +) { + // Get the latest blockhash and last valid block height + const { blockhash, lastValidBlockHeight } = + await connection.getLatestBlockhash(); + + // Create a new transaction message with the provided instructions + const messageV0 = new TransactionMessage({ + payerKey: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) + recentBlockhash: blockhash, // The blockhash of the most recent block + instructions, // The instructions to include in the transaction + }).compileToV0Message(lookupTableAccounts); + + // Create a versioned transaction from the message + const transaction = new VersionedTransaction(messageV0); + + // Use the helper function to send and confirm the transaction + const txid = await sendAndConfirmTransactionV0( + connection, + transaction, + [user], + { + commitment: "finalized", // Ensures the transaction is confirmed at the highest level + }, + ); + + // Log the transaction URL on the Solana Explorer using the helper + const explorerLink = getExplorerLink("tx", txid, "devnet"); + console.log( + `Transaction successful! View it on Solana Explorer: ${explorerLink}`, + ); +} +``` + +#### 4. Create a `waitForNewBlock` helper function + +When working with lookup tables, it's important to remember that newly created +or extended lookup tables cannot be referenced immediately. Therefore, before +submitting transactions that reference these tables, we need to wait for a new +block to be generated. + +We will create a `waitForNewBlock` helper function that accepts: + +- `connection`: the Solana network connection. +- `targetBlockHeight`: the target block height to wait for. + +This function will: + +- Start an interval that checks the current block height of the network every + second (1000ms). +- Resolve the promise once the current block height exceeds the target block + height. + +```typescript title="use-lookup-tables.ts" +async function waitForNewBlock( + connection: Connection, + targetHeight: number, +): Promise { + console.log(`Waiting for ${targetHeight} new blocks...`); + + // Get the initial block height of the blockchain + const { lastValidBlockHeight: initialBlockHeight } = + await connection.getLatestBlockhash(); + + return new Promise(resolve => { + const SECOND = 1000; + const checkInterval = 1 * SECOND; // Interval to check for new blocks (1000ms) + + // Set an interval to check for new block heights + const intervalId = setInterval(async () => { + try { + // Get the current block height + const { lastValidBlockHeight: currentBlockHeight } = + await connection.getLatestBlockhash(); + + // If the current block height exceeds the target, resolve and clear interval + if (currentBlockHeight >= initialBlockHeight + targetHeight) { + clearInterval(intervalId); + console.log(`New block height reached: ${currentBlockHeight}`); + resolve(); + } + } catch (error) { + console.error("Error fetching block height:", error); + clearInterval(intervalId); + resolve(); // Resolve to avoid hanging in case of errors + } + }, checkInterval); + }); +} +``` + +#### 5. Create an `initializeLookupTable` function + +Next, we need to initialize a lookup table to hold the addresses of the +recipients. The `initializeLookupTable` function will accept the following +parameters: + +- `user`: the user's keypair (payer and authority). +- `connection`: the Solana network connection. +- `addresses`: an array of recipient addresses (public keys) to add to the + lookup table. + +The function will: + +- Retrieve the current slot to derive the lookup table's address. +- Generate the necessary instructions to create and extend the lookup table with + the provided recipient addresses. +- Send and confirm a transaction that includes these instructions. +- Return the address of the newly created lookup table. + +Although the transaction includes the full recipient addresses, using the lookup +table allows Solana to reference those addresses with significantly fewer bytes +in the actual transaction. By including the lookup table in the versioned +transaction, the framework optimizes the transaction size, replacing addresses +with pointers to the lookup table. + +This design is crucial for enabling the transaction to support more recipients +by staying within Solana’s transaction size limits. + +```typescript title="use-lookup-tables.ts" +async function initializeLookupTable( + user: Keypair, + connection: Connection, + addresses: PublicKey[], +): Promise { + // Get the current slot using a helper function from @solana/web3.js + const slot = await getSlot(connection); + + // Create an instruction for creating a lookup table + // and retrieve the address of the new lookup table + const [lookupTableInst, lookupTableAddress] = + AddressLookupTableProgram.createLookupTable({ + authority: user.publicKey, // The authority to modify the lookup table + payer: user.publicKey, // The payer for transaction fees + recentSlot: slot - 1, // The slot for lookup table address derivation + }); + + console.log("Lookup Table Address:", lookupTableAddress.toBase58()); + + // Create an instruction to extend a lookup table with the provided addresses + const extendInstruction = AddressLookupTableProgram.extendLookupTable({ + payer: user.publicKey, // The payer of transaction fees + authority: user.publicKey, // The authority to extend the lookup table + lookupTable: lookupTableAddress, // Address of the lookup table to extend + addresses: addresses.slice(0, 30), // Add up to 30 addresses per instruction + }); + + // Use the helper function to send a versioned transaction + await sendVersionedTransaction(connection, user, [ + lookupTableInst, + extendInstruction, + ]); + + return lookupTableAddress; +} +``` + +#### 6. Modify `main` to use lookup tables + +With the helper functions in place, we are now ready to modify the `main` +function to utilize versioned transactions and address lookup tables. To do so, +we will follow these steps: + +1. Call `initializeLookupTable`: Create and extend the lookup table with the + recipients' addresses. +2. Call `waitForNewBlock`: Ensure the lookup table is activated by waiting for a + new block. +3. Retrieve the Lookup Table: Use `connection.getAddressLookupTable` to fetch + the lookup table and reference it in the transaction. +4. Create Transfer Instructions: Generate a transfer instruction for each + recipient. +5. Send the Versioned Transaction: Use `sendV0Transaction` to send a single + transaction with all transfer instructions, referencing the lookup table. + +```typescript title="use-lookup-tables.ts" +async function main() { + // Connect to the devnet Solana cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + + // Initialize the keypair from the environment variable or create a new one + const payer = await initializeKeypair(connection); + + // Generate 22 recipient keypairs using makeKeypairs + const recipients = makeKeypairs(22).map(keypair => keypair.publicKey); + // Initialize the lookup table with the generated recipients + const lookupTableAddress = await initializeLookupTable( + user, + connection, + recipients, + ); + + // Wait for a new block before using the lookup table + await waitForNewBlock(connection, 1); + + // Fetch the lookup table account + const lookupTableAccount = ( + await connection.getAddressLookupTable(lookupTableAddress) + ).value; + + // Check if the lookup table was successfully fetched + if (!lookupTableAccount) { + throw new Error("Lookup table not found"); + } + + // Create transfer instructions for each recipient + const transferInstructions = recipients.map(recipient => + SystemProgram.transfer({ + fromPubkey: user.publicKey, // The payer + toPubkey: recipient, // The recipient + lamports: LAMPORTS_PER_SOL * 0.01, // Amount to transfer + }), + ); + + // Send the versioned transaction including the lookup table + const txid = await sendVersionedTransaction( + connection, + user, + transferInstructions, + [lookupTableAccount], + ); + + // Log the transaction link for easy access + console.log(`Transaction URL: ${getExplorerLink("tx", txid, "devnet")}`); +} +``` + +Even though we will create transfer instructions with full recipient addresses, +the use of lookup tables allows the `@solana/web3.js` framework to optimize the +transaction size. The addresses in the transaction that match entries in the +lookup table will be replaced with compact pointers referencing the lookup +table. By doing this, addresses will be represented using only a single byte in +the final transaction, significantly reducing the transaction's size. + +Use `npx esrun use-lookup-tables.ts` in the command line to execute the `main` +function. You should see an output similar to the following: + +```bash +Current balance is 1.38866636 +PublicKey: 8iGVBt3dcJdp9KfyTRcKuHY6gXCMFdnSG2F1pAwsUTMX +lookup table address: Cc46Wp1mtci3Jm9EcH35JcDQS3rLKBWzy9mV1Kkjjw7M +https://explorer.solana.com/tx/4JvCo2azy2u8XK2pU8AnJiHAucKTrZ6QX7EEHVuNSED8B5A8t9GqY5CP9xB8fZpTNuR7tbUcnj2MiL41xRJnLGzV?cluster=devnet +Waiting for 1 new blocks +https://explorer.solana.com/tx/rgpmxGU4QaAXw9eyqfMUqv8Lp6LHTuTyjQqDXpeFcu1ijQMmCH2V3Sb54x2wWAbnWXnMpJNGg4eLvuy3r8izGHt?cluster=devnet +Finished successfully +``` + +The first transaction link in the console represents the transaction for +creating and extending the lookup table. The second transaction represents the +transfers to all recipients. Feel free to inspect these transactions in the +explorer. + +Remember, this same transaction was failing when you first downloaded the +starter code. Now that we're using lookup tables, we can do all 22 transfers in +a single transaction. + +#### 6. Add more addresses to the lookup table + +Keep in mind that the solution we've come up with so far only supports transfers +to up to 30 accounts since we only extend the lookup table once. When you factor +in the transfer instruction size, it's actually possible to extend the lookup +table with an additional 27 addresses and complete an atomic transfer to up to +57 recipients. Let's go ahead and add support for this now! + +All we need to do is go into `initializeLookupTable` and do two things: + +1. Modify the existing call to `extendLookupTable` to only add the first 30 + addresses (any more than that and the transaction will be too large) +2. Add a loop that will keep extending a lookup table of 30 addresses at a time + until all addresses have been added + +```typescript title="use-lookup-tables.ts" +async function initializeLookupTable( + user: Keypair, + connection: Connection, + addresses: PublicKey[], +): Promise { + // Get the current slot + const slot = await connection.getSlot(); + + // Create the lookup table and retrieve its address + const [lookupTableInst, lookupTableAddress] = + AddressLookupTableProgram.createLookupTable({ + authority: user.publicKey, // The authority to modify the lookup table + payer: user.publicKey, // The payer for the transaction fees + recentSlot: slot - 1, // Recent slot to derive lookup table's address + }); + console.log("Lookup table address:", lookupTableAddress.toBase58()); + + // Helper function to extend the lookup table in batches + const extendLookupTable = async (remainingAddresses: PublicKey[]) => { + while (remainingAddresses.length > 0) { + const toAdd = remainingAddresses.slice(0, 30); // Add up to 30 addresses + remainingAddresses = remainingAddresses.slice(30); + + const extendInstruction = AddressLookupTableProgram.extendLookupTable({ + payer: user.publicKey, + authority: user.publicKey, + lookupTable: lookupTableAddress, + addresses: toAdd, + }); + + // Send the transaction to extend the lookup table with the new addresses + await sendVersionedTransaction(connection, user, [extendInstruction]); + } + }; + + // Send the initial transaction to create the lookup table and add the first 30 addresses + const initialBatch = addresses.slice(0, 30); + const remainingAddresses = addresses.slice(30); + + await sendVersionedTransaction(connection, user, [lookupTableInst]); + + // Extend the lookup table with the remaining addresses, if any + await extendLookupTable(initialBatch); + await extendLookupTable(remainingAddresses); + + return lookupTableAddress; +} +``` + +Congratulations! If you feel good about this lab, you're probably ready to work +with lookup tables and versioned transactions on your own. If you want to take a +look at the final solution code you can +[find it on the solution branch](https://github.com/Unboxed-Software/solana-versioned-transactions/tree/solution). + +## Challenge + +As a challenge, experiment with deactivating, closing, and freezing lookup +tables. Remember that you need to wait for a lookup table to finish deactivating +before you can close it. Also, if a lookup table is frozen, it cannot be +modified (deactivated or closed), so you will have to test separately or use +separate lookup tables. + +1. Create a function for deactivating the lookup table. +2. Create a function for closing the lookup table +3. Create a function for freezing the lookup table +4. Test the functions by calling them in the `main()` function + +You can reuse the functions we created in the lab for sending the transaction +and waiting for the lookup table to activate/deactivate. Feel free to reference +this [solution code](https://github.com/Unboxed-Software/versioned-transaction/tree/challenge). + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=b58fdd00-2b23-4e0d-be55-e62677d351ef)! + + diff --git a/content/courses/program-optimization/meta.json b/content/courses/program-optimization/meta.json new file mode 100644 index 000000000..9ea9e4774 --- /dev/null +++ b/content/courses/program-optimization/meta.json @@ -0,0 +1,8 @@ +{ + "pages": [ + "program-architecture", + "program-configuration", + "rust-macros", + "lookup-tables" + ] +} diff --git a/content/courses/program-optimization/program-architecture.mdx b/content/courses/program-optimization/program-architecture.mdx new file mode 100644 index 000000000..c5417b206 --- /dev/null +++ b/content/courses/program-optimization/program-architecture.mdx @@ -0,0 +1,2167 @@ +--- +title: Program Architecture +objectives: + - Use Box and Zero Copy to work with large data onchain + - Make better PDA design decisions + - Future-proof your programs + - Deal with concurrency issues +description: "Design your Solana programs efficiently." +--- + +## Summary + +- If your data accounts are too large for the Stack, wrap them in `Box` to + allocate them to the Heap +- Use Zero-Copy to deal with accounts that are too large for `Box` (< 10MB) +- The size and the order of fields in an account matter; put the variable length + fields at the end +- Solana can process in parallel, but you can still run into bottlenecks; be + mindful of "shared" accounts that all users interacting with the program have + to write to + +## Lesson + +Program Architecture is what separates the hobbyist from the professional. +Crafting performant programs has more to do with system **design** than it does +with the code. And you, as the designer, need to think about: + + 1. What your code needs to do + 2. What possible implementations there are + 3. What are the tradeoffs between different implementations + +These questions are even more important when developing for a blockchain. Not +only are resources more limited than in a typical computing environment, you're +also dealing with people's assets. + +We'll leave most of the asset handling discussion to +[security course lesson](/developers/courses/program-security/security-intro), +but it's important to note the nature of resource limitations in Solana +development. There are, of course, limitations in a typical development +environment, but there are limitations unique to blockchain and Solana +development such as how much data can be stored in an account, the cost to store +that data, and how many compute units are available per transaction. You, the +program designer, have to be mindful of these limitations to create programs +that are affordable, fast, safe, and functional. Today we will be delving into +some of the more advanced considerations that should be taken when creating +Solana programs. + +### Dealing With Large Accounts + +In modern application programming, we don't often have to think about the size +of the data structures we are using. Do you want to make a string? You can put a +4000-character limit on it if you want to avoid abuse, but it's probably not an +issue. Want an integer? They're pretty much always 32-bit for convenience. + +In high-level languages, you are in the data-land-o-plenty! Now, in Solana land, +we pay per byte stored (rent) and have limits on heap, stack, and account sizes. +We have to be a little more crafty with our bytes. There are two main concerns +we are going to be looking at in this section: + +1. Since we pay-per-byte, we generally want to keep our footprint as small as + possible. We will delve more into optimization in another section, but we'll + introduce you to the concept of data sizes here. + +2. When operating on larger data, we run into + [Stack](/docs/programs/faq#stack) and + [Heap](/docs/programs/faq#heap-size) constraints - to get + around these, we'll look at using Box and Zero-Copy. + +#### Sizes + +In Solana, a transaction's fee payer pays for each byte stored onchain. This is +called [rent](/docs/core/fees#rent). + + + +Rent is a bit of a misnomer since it never gets permanently taken. Once you +deposit rent into the account, that data can stay there forever, or you can get +refunded the rent if you close the account. Previously, rent was paid in +intervals, similar to traditional rent, but now there's an enforced minimum +balance for rent exemption. You can read more about it in +[the Solana documentation](/docs/core/fees#rent-exempt). + + + +Putting data on the blockchain can be expensive, which is why NFT attributes and +associated files, like images, are stored offchain. The goal is to strike a +balance between keeping your program highly functional and ensuring that users +aren't discouraged by the cost of storing data onchain. + +The first step in optimizing for space in your program is understanding the size +of your structs. Below is a helpful reference from the +[Anchor Book](https://book.anchor-lang.com/anchor_references/space.html). + +{/* Edits note: this very wide table looks awful unless you made your window very wide */} + +| Types | Space in bytes | Details/Example | +| ----------- | ----------------------------- | ----------------------------------------------------------------------------------------------- | +| bool | 1 | would only require 1 bit but still uses 1 byte | +| u8/i8 | 1 | | +| u16/i16 | 2 | | +| u32/i32 | 4 | | +| u64/i64 | 8 | | +| u128/i128 | 16 | | +| [T;amount] | space(T) \* amount | e.g. space([u16;32]) = 2 \* 32 = 64 | +| Pubkey | 32 | | +| Vec\ | 4 + (space(T) \* amount) | Account size is fixed so account should be initialized with sufficient space from the beginning | +| String | 4 + length of string in bytes | Account size is fixed so account should be initialized with sufficient space from the beginning | +| Option\ | 1 + (space(T)) | | +| Enum | 1 + Largest Variant Size | e.g. Enum \{ A, B \{ val: u8 \}, C \{ val: u16 \} \} -> 1 + space(u16) = 3 | +| f32 | 4 | serialization will fail for NaN | +| f64 | 8 | serialization will fail for NaN | + +Knowing these, start thinking about little optimizations you might take in a +program. For example, if you have an integer field that will only ever reach +100, don't use a u64/i64, use a u8. Why? Because a u64 takes up 8 bytes, with a +max value of 2^64 or 1.84 \* 10^19. That's a waste of space since you only need +to accommodate numbers up to 100. A single byte will give you a max value of 255 +which, in this case, would be sufficient. Similarly, there's no reason to use i8 +if you'll never have negative numbers. + +Be careful with small number types, though. You can quickly run into unexpected +behavior due to overflow. For example, a u8 type that is iteratively incremented +will reach 255 and then go back to 0 instead of 256. For more real-world +context, look up the +**[Y2K bug](https://www.nationalgeographic.org/encyclopedia/Y2K-bug/#:~:text=As%20the%20year%202000%20approached%2C%20computer%20programmers%20realized%20that%20computers,would%20be%20damaged%20or%20flawed.).** + +If you want to read more about Anchor sizes, take a look at +[Sec3's blog post about it](https://www.sec3.dev/blog/all-about-anchor-account-size) +. + +#### Box + +Now that you know a little bit about data sizes, let's skip forward and look at +a problem you'll run into if you want to deal with larger data accounts. Say you +have the following data account: + +```rust +#[account] +pub struct SomeBigDataStruct { + pub big_data: [u8; 5000], +} + +#[derive(Accounts)] +pub struct SomeFunctionContext<'info> { + pub some_big_data: Account<'info, SomeBigDataStruct>, +} +``` + +If you try to pass `SomeBigDataStruct` into the function with the +`SomeFunctionContext` context, you'll run into the following compiler warning: + +`// Stack offset of XXXX exceeded max offset of 4096 by XXXX bytes, please minimize large stack variables` + +And if you try to run the program it will just hang and fail. + +Why is this? + +It has to do with the Stack. Every time you call a function in Solana it gets a +4KB stack frame. This is static memory allocation for local variables. This is +where that entire `SomeBigDataStruct` gets stored in memory and since 5000 +bytes, or 5KB, is greater than the 4KB limit, it will throw a stack error. So +how do we fix this? + +The answer is the +[**`Box`**](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/boxed/index.html) +type! + +```rust +#[account] +pub struct SomeBigDataStruct { + pub big_data: [u8; 5000], +} + +#[derive(Accounts)] +pub struct SomeFunctionContext<'info> { + pub some_big_data: Box>, // <- Box Added! +} +``` + +In Anchor, **`Box`** is used to allocate the account to the Heap, not the +Stack. Which is great since the Heap gives us 32KB to work with. The best part +is you don't have to do anything different within the function. All you need to +do is add `Box<…>` around all of your big data accounts. + +But Box is not perfect. You can still overflow the stack with sufficiently large +accounts. We'll learn how to fix this in the next section. + +#### Zero Copy + +Okay, so now you can deal with medium-sized accounts using `Box`. But what if +you need to use really big accounts like the max size of 10MB? Take the +following as an example: + +```rust +#[account] +pub struct SomeReallyBigDataStruct { + pub really_big_data: [u128; 1024], // 16,384 bytes +} +``` + +This account will make your program fail, even wrapped in a `Box`. To get around +this, you can use `zero_copy` and `AccountLoader`. Simply add `zero_copy` to +your account struct, add `zero` as a constraint in the account validation +struct, and wrap the account type in the account validation struct in an +`AccountLoader`. + +```rust +#[account(zero_copy)] +pub struct SomeReallyBigDataStruct { + pub really_big_data: [u128; 1024], // 16,384 bytes +} + +pub struct ConceptZeroCopy<'info> { + #[account(zero)] + pub some_really_big_data: AccountLoader<'info, SomeReallyBigDataStruct>, +} +``` + +**Note**: In older versions of anchor `< 0.28.0` you may have to use: +`zero_copy(unsafe))` ( +[Thanks @0xk2\_](https://github.com/Unboxed-Software/solana-course/issues/347) +for this find ) + +To understand what's happening here, take a look at the +[rust Anchor documentation](https://docs.rs/anchor-lang/latest/anchor_lang/attr.account.html) + +> Other than being more efficient, the most salient benefit [`zero_copy`] +> provides the ability to define account types larger than the max stack or heap +> size. When using borsh, the account has to be copied and deserialized into a +> new data structure and thus is constrained by stack and heap limits imposed by +> the BPF VM. With zero copy deserialization, all bytes from the account's +> backing `RefCell<&mut [u8]>` are simply re-interpreted as a reference to the +> data structure. No allocations or copies are necessary. Hence the ability to +> get around stack and heap limitations. + +Basically, your program never actually loads zero-copy account data into the +stack or heap. It instead gets pointer access to the raw data. The +`AccountLoader` ensures this doesn't change too much about how you interact with +the account from your code. + +There are a couple of caveats using `zero_copy`. First, you cannot use the +`init` constraint in the account validation struct like you may be used to. This +is due to there being a CPI limit on accounts bigger than 10KB. + +```rust +pub struct ConceptZeroCopy<'info> { + #[account(zero, init)] // <- Can't do this + pub some_really_big_data: AccountLoader<'info, SomeReallyBigDataStruct>, +} +``` + +Instead, your client has to create a large account and pay for its rent in a +separate instruction. + +```typescript +const accountSize = 16_384 + 8; +const ix = anchor.web3.SystemProgram.createAccount({ + fromPubkey: wallet.publicKey, + newAccountPubkey: someReallyBigData.publicKey, + lamports: + await program.provider.connection.getMinimumBalanceForRentExemption( + accountSize, + ), + space: accountSize, + programId: program.programId, +}); + +const txHash = await program.methods + .conceptZeroCopy() + .accounts({ + owner: wallet.publicKey, + someReallyBigData: someReallyBigData.publicKey, + }) + .signers([someReallyBigData]) + .preInstructions([ix]) + .rpc(); +``` + +The second caveat is that you'll have to call one of the following methods from +inside your rust instruction handler to load the account: + +- `load_init` when first initializing an account (this will ignore the missing + account discriminator that gets added only after the user's instruction code) +- `load` when the account is not mutable +- `load_mut` when the account is mutable + +For example, if you wanted to init and manipulate the `SomeReallyBigDataStruct` +from above, you'd call the following in the function + +```rust +let some_really_big_data = &mut ctx.accounts.some_really_big_data.load_init()?; +``` + +After you do that, then you can treat the account like normal! Go ahead and +experiment with this in the code yourself to see everything in action! + +For a better understanding of how this all works, Solana put together a really +nice [video](https://www.youtube.com/watch?v=zs_yU0IuJxc&feature=youtu.be) and +[code](https://github.com/solana-developers/anchor-zero-copy-example) explaining +Box and Zero-Copy in vanilla Solana. + +### Dealing with Accounts + +Now that you know the nuts and bolts of space consideration on Solana, let's +look at some higher-level considerations. In Solana, everything is an account, +so for the next couple sections, we'll look at some account architecture +concepts. + +#### Data Order + +This first consideration is fairly simple. As a rule of thumb, keep all variable +length fields at the end of the account. Take a look at the following: + +```rust +#[account] // Anchor hides the account discriminator +pub struct BadState { + pub flags: Vec, // 0x11, 0x22, 0x33 ... + pub id: u32 // 0xDEAD_BEEF +} +``` + +The `flags` field is variable length. This makes looking up a specific account +by the `id` field very difficult, as an update to the data in `flags` changes +the location of `id` on the memory map. + +To make this more clear, observe what this account's data looks like onchain +when `flags` has four items in the vector vs eight items. If you were to call +`solana account ACCOUNT_KEY` you'd get a data dump like the following: +`solana account ACCOUNT_KEY` you'd get a data dump like the following: + +```rust +0000: 74 e4 28 4e d9 ec 31 0a -> Account Discriminator (8) +0008: 04 00 00 00 11 22 33 44 -> Vec Size (4) | Data 4*(1) +0010: DE AD BE EF -> id (4) + +--- vs --- + +0000: 74 e4 28 4e d9 ec 31 0a -> Account Discriminator (8) +0008: 08 00 00 00 11 22 33 44 -> Vec Size (8) | Data 4*(1) +0010: 55 66 77 88 DE AD BE EF -> Data 4*(1) | id (4) +``` + +In both cases, the first eight bytes are the Anchor account discriminator. In +the first case, the next four bytes represent the size of the `flags` vector, +followed by another four bytes for the data, and finally the `id` field's data. + +In the second case, the `id` field moved from address 0x0010 to 0x0014 because +the data in the `flags` field took up four more bytes. + +The main problem with this is lookup. When you query Solana, you use filters +that look at the raw data of an account. These are called a `memcmp` filters, or +memory compare filters. You give the filter an `offset` and `bytes`, and the +filter then looks directly at the memory, offset from the start by the `offset` +you provide, and compares the bytes in memory to the `bytes` you provide. + +For example, you know that the `flags` struct will always start at the address +0x0008 since the first 8 bytes contain the account discriminator. Querying all +accounts where the `flags` length is equal to four is possible because we _know_ +that the four bytes at 0x0008 represent the length of the data in `flags`. Since +the account discriminator is + +```typescript +const states = await program.account.badState.all([ + { + memcmp: { + offset: 8, + bytes: bs58.encode([0x04]), + }, + }, +]); +``` + +However, if you wanted to query by the `id`, you wouldn't know what to put for +the `offset` since the location of `id` is variable based on the length of +`flags`. That doesn't seem very helpful. IDs are usually there to help with +`flags`. That doesn't seem very helpful. IDs are usually there to help with +queries! The simple fix is to flip the order. + +```rust +#[account] // Anchor hides the account discriminator +pub struct GoodState { + pub id: u32 // 0xDEAD_BEEF + pub flags: Vec, // 0x11, 0x22, 0x33 ... +} +``` + +With variable length fields at the end of the struct, you can always query +accounts based on all the fields up to the first variable length field. To echo +the beginning of this section: As a rule of thumb, keep all variable length +structs at the end of the account. + +#### Account Flexibility and Future-Proofing + +When developing Solana programs, it's crucial to design your account structures +with future upgrades and backward compatibility in mind. Solana offers powerful +features like account resizing and Anchor's `InitSpace` attribute to handle +these challenges efficiently. Let's explore a more dynamic and flexible approach +using a game state example: + +```rust +use anchor_lang::prelude::*; + +#[account] +#[derive(InitSpace)] +pub struct GameState { // V1 + pub version: u8, + pub health: u64, + pub mana: u64, + pub experience: Option, + #[max_len(50)] + pub event_log: Vec +} +``` + +In this GameState, we have: + +- A `version` field to track account structure changes +- Basic character attributes (`health`, `mana`) +- An `experience` field as `Option` for backward compatibility +- An `event_log` with a specified maximum length + +Key advantages of this approach: + +1. **Automatic Space Calculation**: The `InitSpace` attribute automatically + calculates the required account space. +2. **Versioning**: The `version` field allows for easy identification of account + structure versions. +3. **Flexible Fields**: Using `Option` for new fields maintains compatibility + with older versions. +4. **Defined Limits**: The `max_len` attribute on `Vec` fields clearly + communicates size constraints. + +When you need to upgrade your account structure, such as increasing the length +of `event_log` or adding new fields, you can use a single upgrade instruction +with Anchor's `realloc` constraint: + +1. Update the `GameState` struct with new fields or increased `max_len` + attributes: + + ```rust + #[account] + #[derive(InitSpace)] + pub struct GameState { + pub version: u8, + pub health: u64, + pub mana: u64, + pub experience: Option, + #[max_len(100)] // Increased from 50 + pub event_log: Vec, + pub new_field: Option, // Added new field + } + ``` + +2. Use a single `UpgradeGameState` context for all upgrades with Anchor's + `realloc` constraint for `GameState`: + + ```rust + #[derive(Accounts)] + pub struct UpgradeGameState<'info> { + #[account( + mut, + realloc = GameState::INIT_SPACE, + realloc::payer = payer, + realloc::zero = false, + )] + pub game_state: Account<'info, GameState>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, + } + ``` + +3. Implement the upgrade logic in a single function: + + ```rust + pub fn upgrade_game_state(ctx: Context) -> Result<()> { + let game_state = &mut ctx.accounts.game_state; + + match game_state.version { + 1 => { + game_state.version = 2; + game_state.experience = Some(0); + msg!("Upgraded to version 2"); + }, + 2 => { + game_state.version = 3; + game_state.new_field = Some(0); + msg!("Upgraded to version 3"); + }, + _ => return Err(ErrorCode::AlreadyUpgraded.into()), + } + + Ok(()) + } + ``` + +The example to demonstrate this approach: + +```rust +use anchor_lang::prelude::*; + +#[account] +#[derive(InitSpace)] +pub struct GameState { + pub version: u8, + pub health: u64, + pub mana: u64, + pub experience: Option, + #[max_len(100)] // Increased from 50 + pub event_log: Vec, + pub new_field: Option, +} + +#[derive(Accounts)] +pub struct UpgradeGameState<'info> { + #[account( + mut, + realloc = GameState::INIT_SPACE, + realloc::payer = payer, + realloc::zero = false, + )] + pub game_state: Account<'info, GameState>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[program] +pub mod your_program { + use super::*; + + // ... other instructions ... + + pub fn upgrade_game_state(ctx: Context) -> Result<()> { + let game_state = &mut ctx.accounts.game_state; + + match game_state.version { + 1 => { + game_state.version = 2; + game_state.experience = Some(0); + msg!("Upgraded to version 2"); + }, + 2 => { + game_state.version = 3; + game_state.new_field = Some(0); + msg!("Upgraded to version 3"); + }, + _ => return Err(ErrorCode::AlreadyUpgraded.into()), + } + + Ok(()) + } +} + +#[error_code] +pub enum ErrorCode { + #[msg("Account is already at the latest version")] + AlreadyUpgraded, +} +``` + +This approach: + +- Uses the Anchor's + [`realloc`](https://docs.rs/anchor-lang/latest/anchor_lang/derive.Accounts.html#normal-constraints) + constraint to automatically handle account resizing. +- The + [`InitSpace`](https://docs.rs/anchor-lang/latest/anchor_lang/derive.InitSpace.html) + derive macro automatically implements the `Space` trait for the `GameState` + struct. This trait includes the + [`INIT_SPACE`](https://docs.rs/anchor-lang/latest/anchor_lang/trait.Space.html#associatedconstant.INIT_SPACE) + associated constant , which calculates the total space required for the + account. +- Designates a payer for any additional rent with `realloc::payer = payer`. +- Keeps existing data with `realloc::zero = false`. + + + +Account data can be increased within a single call by up to +`solana_program::entrypoint::MAX_PERMITTED_DATA_INCREASE` bytes. + +Memory used to grow is already zero-initialized upon program entrypoint and +re-zeroing it wastes compute units. If within the same call a program reallocs +from larger to smaller and back to larger again the new space could contain +stale data. Pass `true` for `zero_init` in this case, otherwise compute units +will be wasted re-zero-initializing. + + + + + +While account resizing is powerful, use it judiciously. Consider the trade-offs +between frequent resizing and initial allocation based on your specific use case +and expected growth patterns. + +- Always ensure your account remains rent-exempt before resizing. +- The payer of the transaction is responsible for providing the additional + lamports. +- Consider the cost implications of frequent resizing in your program design. + + + + +In native Rust, you can resize accounts using the `realloc()` method. For more +details, refer to the +[account resizing program](/developers/cookbook/programs/change-account-size). + +#### Data Optimization + +The idea here is to be aware of wasted bits. For example, if you have a field +that represents the month of the year, don't use a `u64`. There will only ever +that represents the month of the year, don't use a `u64`. There will only ever +be 12 months. Use a `u8`. Better yet, use a `u8` Enum and label the months. + +To get even more aggressive on bit savings, be careful with booleans. Look at +the below struct composed of eight boolean flags. While a boolean _can_ be +represented as a single bit, borsh deserialization will allocate an entire byte +to each of these fields. That means that eight booleans wind up being eight +bytes instead of eight bits, an eight times increase in size. + +```rust +#[account] +pub struct BadGameFlags { // 8 bytes + pub is_frozen: bool, + pub is_poisoned: bool, + pub is_burning: bool, + pub is_blessed: bool, + pub is_cursed: bool, + pub is_stunned: bool, + pub is_slowed: bool, + pub is_bleeding: bool, +} +``` + +To optimize this, you could have a single field as a `u8`. Then you can use +bitwise operations to look at each bit and determine if it's "toggled on" or +not. + +```rust +const IS_FROZEN_FLAG: u8 = 1 << 0; +const IS_POISONED_FLAG: u8 = 1 << 1; +const IS_BURNING_FLAG: u8 = 1 << 2; +const IS_BLESSED_FLAG: u8 = 1 << 3; +const IS_CURSED_FLAG: u8 = 1 << 4; +const IS_STUNNED_FLAG: u8 = 1 << 5; +const IS_SLOWED_FLAG: u8 = 1 << 6; +const IS_BLEEDING_FLAG: u8 = 1 << 7; +const NO_EFFECT_FLAG: u8 = 0b00000000; +#[account] +pub struct GoodGameFlags { // 1 byte + pub status_flags: u8, +} +``` + +That saves you 7 bytes of data! The tradeoff, of course, is now you have to do +bitwise operations. But that's worth having in your toolkit. + +#### Indexing + +This last account concept is fun and illustrates the power of PDAs. When +creating program accounts, you can specify the seeds used to derive the PDA. +This is exceptionally powerful since it lets you derive your account addresses +rather than store them. + +The best example of this is good ‘ol Associated Token Accounts (ATAs)! + +```typescript +function findAssociatedTokenAddress( + walletAddress: PublicKey, + tokenMintAddress: PublicKey, +): PublicKey { + return PublicKey.findProgramAddressSync( + [ + walletAddress.toBuffer(), + TOKEN_PROGRAM_ID.toBuffer(), + tokenMintAddress.toBuffer(), + ], + SPL_ASSOCIATED_TOKEN_ACCOUNT_PROGRAM_ID, + )[0]; +} +``` + +This is how most of your SPL tokens are stored. Rather than keep a database +table of SPL token account addresses, the only thing you have to know is your +wallet address and the mint address. The ATA address can be calculated by +hashing these together and viola! You have your token account address. + +Depending on the seeding you can create all sorts of relationships: + +- One-Per-Program (Global Account) - If you create an account with a determined + `seeds=[b"ONE PER PROGRAM"]`, only one can ever exist for that seed in that + program. For example, if your program needs a lookup table, you could seed it + with `seeds=[b"Lookup"]`. Just be careful to provide appropriate access + restrictions. +- One-Per-Owner - Say you're creating a video game player account and you only + want one player account per wallet. Then you'd seed the account with + `seeds=[b"PLAYER", owner.key().as_ref()]`. This way, you'll always know where + to look for a wallet's player account **and** there can only ever be one of +- Multiple-Per-Owner - Okay, but what if you want multiple accounts per wallet? + Say you want to mint podcast episodes. Then you could seed your `Podcast` + account like this: + `seeds=[b"Podcast", owner.key().as_ref(), episode_number.to_be_bytes().as_ref()]`. + Now, if you want to look up episode 50 from a specific wallet, you can! And + you can have as many episodes as you want per owner. +- One-Per-Owner-Per-Account - This is effectively the ATA example we saw above. + Where we have one token account per wallet and mint account. + `seeds=[b"Mock ATA", owner.key().as_ref(), mint.key().as_ref()]` + +From there you can mix and match in all sorts of clever ways! But the preceding +list should give you enough to get started. + +The big benefit of really paying attention to this aspect of design is answering +the ‘indexing' problem. Without PDAs and seeds, all users would have to keep +track of all of the addresses of all of the accounts they've ever used. This +isn't feasible for users, so they'd have to depend on a centralized entity to +store their addresses in a database. In many ways that defeats the purpose of a +globally distributed network. PDAs are a much better solution. + +To drive this all home, here's an example of a scheme from a production +podcasting program. The program needed the following accounts: + +- **Channel Account** + - Name + - Episodes Created (u64) +- **Podcast Account(s)** + - Name + - Audio URL + +To properly index each account address, the accounts use the following seeds: + +```rust +// Channel Account +seeds=[b"Channel", owner.key().as_ref()] + +// Podcast Account +seeds=[b"Podcast", channel_account.key().as_ref(), episode_number.to_be_bytes().as_ref()] +``` + +You can always find the channel account for a particular owner. And since the +channel stores the number of episodes created, you always know the upper bound +of where to search for queries. Additionally, you always know what index to +create a new episode at: `index = episodes_created`. + +```rust +Podcast 0: seeds=[b"Podcast", channel_account.key().as_ref(), 0.to_be_bytes().as_ref()] +Podcast 1: seeds=[b"Podcast", channel_account.key().as_ref(), 1.to_be_bytes().as_ref()] +Podcast 2: seeds=[b"Podcast", channel_account.key().as_ref(), 2.to_be_bytes().as_ref()] +... +Podcast X: seeds=[b"Podcast", channel_account.key().as_ref(), X.to_be_bytes().as_ref()] +``` + +### Dealing with Concurrency + +One of the main reasons to choose Solana for your blockchain environment is its +parallel transaction execution. That is, Solana can run transactions in parallel +as long as those transactions aren't trying to write data to the same account. +This improves program throughput out of the box, but with some proper planning, +you can avoid concurrency issues and really boost your program's performance. + +#### Shared Accounts + +If you've been around crypto for a while, you may have experienced a big NFT +mint event. A new NFT project is coming out, everyone is really excited about +it, and then the candymachine goes live. It's a mad dash to click +`accept transaction` as fast as you can. If you were clever, you may have +written a bot to enter the transactions faster than the website's UI could. This +mad rush to mint creates a lot of failed transactions. But why? Because everyone +is trying to write data to the same Candy Machine account. + +Take a look at a simple example: + +Alice and Bob are trying to pay their friends Carol and Dean respectively. All +four accounts change, but neither depends on other. Both transactions can run at +the same time. + +```rust +Alice -- pays --> Carol + +Bob ---- pays --> Dean +``` + +But if Alice and Bob both try to pay Carol at the same time, they'll run into +issues. + +```rust +Alice -- pays --> | + -- > Carol +Bob -- pays --- | +``` + +Since both of these transactions write to Carol's token account, only one of +them can go through at a time. Fortunately, Solana is very fast, so it'll +probably seem like they get paid at the same time. But what happens if more than +just Alice and Bob try to pay Carol? + +```rust +Alice -- pays --> | + -- > Carol +x1000 -- pays --- | +Bob -- pays --- | +``` + +What if 1000 people try to pay Carol at the same time? Each of the 1000 +instructions will be queued up to run in sequence. To some of them, the payment +will seem like it went through right away. They'll be the lucky ones whose +instruction got included early. But some of them will end up waiting quite a +bit. And for some, their transaction will simply fail. + +While it seems unlikely for 1000 people to pay Carol at the same time, it's +actually very common to have an event, like an NFT mint, where many people are +trying to write data to the same account all at once. + +Imagine you create a super popular program and you want to take a fee on every +transaction you process. For accounting reasons, you want all of those fees to +go to one wallet. With that setup, on a surge of users, your protocol will +become slow and or become unreliable. Not great. So what's the solution? +Separate the data transaction from the fee transaction. + +For example, imagine you have a data account called `DonationTally`. Its only +function is to record how much you have donated to a specific hard-coded +community wallet. + +```rust +#[account] +pub struct DonationTally { + is_initialized: bool, + lamports_donated: u64, + lamports_to_redeem: u64, + owner: Pubkey, +} +``` + +First, let's look at the suboptimal solution. + +```rust +pub fn run_concept_shared_account_bottleneck(ctx: Context, lamports_to_donate: u64) -> Result<()> { + + let donation_tally = &mut ctx.accounts.donation_tally; + + if !donation_tally.is_initialized { + donation_tally.is_initialized = true; + donation_tally.owner = ctx.accounts.owner.key(); + donation_tally.lamports_donated = 0; + donation_tally.lamports_to_redeem = 0; + } + + let cpi_context = CpiContext::new( + ctx.accounts.system_program.to_account_info(), + Transfer { + from: ctx.accounts.owner.to_account_info(), + to: ctx.accounts.community_wallet.to_account_info(), + }); + transfer(cpi_context, lamports_to_donate)?; + + + donation_tally.lamports_donated = donation_tally.lamports_donated.checked_add(lamports_to_donate).unwrap(); + donation_tally.lamports_to_redeem = 0; + + Ok(()) +} +``` + +You can see that the transfer to the hardcoded `community_wallet` happens in the +same function that you update the tally information. This is the most +straightforward solution, but if you run the tests for this section, you'll see +a slowdown. + +Now look at the optimized solution: + +```rust +pub fn run_concept_shared_account(ctx: Context, lamports_to_donate: u64) -> Result<()> { + + let donation_tally = &mut ctx.accounts.donation_tally; + + if !donation_tally.is_initialized { + donation_tally.is_initialized = true; + donation_tally.owner = ctx.accounts.owner.key(); + donation_tally.lamports_donated = 0; + donation_tally.lamports_to_redeem = 0; + } + + let cpi_context = CpiContext::new( + ctx.accounts.system_program.to_account_info(), + Transfer { + from: ctx.accounts.owner.to_account_info(), + to: donation_tally.to_account_info(), + }); + transfer(cpi_context, lamports_to_donate)?; + + donation_tally.lamports_donated = donation_tally.lamports_donated.checked_add(lamports_to_donate).unwrap(); + donation_tally.lamports_to_redeem = donation_tally.lamports_to_redeem.checked_add(lamports_to_donate).unwrap(); + + Ok(()) +} + +pub fn run_concept_shared_account_redeem(ctx: Context) -> Result<()> { + let transfer_amount: u64 = ctx.accounts.donation_tally.lamports_donated; + + // Decrease balance in donation_tally account + **ctx.accounts.donation_tally.to_account_info().try_borrow_mut_lamports()? -= transfer_amount; + + // Increase balance in community_wallet account + **ctx.accounts.community_wallet.to_account_info().try_borrow_mut_lamports()? += transfer_amount; + + // Reset lamports_donated and lamports_to_redeem + ctx.accounts.donation_tally.lamports_to_redeem = 0; + + Ok(()) +} +``` + +Here, in the `run_concept_shared_account` function, instead of transferring to +the bottleneck, we transfer to the `donation_tally` PDA. This way, we're only +the bottleneck, we transfer to the `donation_tally` PDA. This way, we're only +effecting the donator's account and their PDA - so no bottleneck! Additionally, +we keep an internal tally of how many lamports need to be redeemed, i.e. be +transferred from the PDA to the community wallet at a later time. At some point +in the future, the community wallet will go around and clean up all the +straggling lamports. It's important to note that anyone should be able to sign +for the redeem function since the PDA has permission over itself. + +If you want to avoid bottlenecks at all costs, this is one way to tackle them. +Ultimately this is a design decision and the simpler, less optimal solution +might be okay for some programs. But if your program is going to have high +traffic, it's worth trying to optimize. You can always run a simulation to see +your worst, best, and median cases. + +### See it in Action + +All of the code snippets from this lesson are part of a +[Solana program we created to illustrate these concepts](https://github.com/Unboxed-Software/advanced-program-architecture.git). +Each concept has an accompanying program and test file. For example, the +**Sizes** concept can be found in: + +**program -** `programs/architecture/src/concepts/sizes.rs` + +**test -** `tests/sizes.ts` + +Now that you've read about each of these concepts, feel free to jump into the +code to experiment a little. You can change existing values, try to break the +program, and generally try to understand how everything works. + +You can fork and/or clone +[this program from Github](https://github.com/Unboxed-Software/advanced-program-architecture.git) +to get started. Before building and running the test suite, remember to update +the `lib.rs` and `Anchor.toml` with your local program ID. + +You can run the entire test suite or +[add `.only` to the `describe` call](https://mochajs.org/#exclusive-tests) in a +specific test file to only run that file's tests. Feel free to customize it and +make it your own. + +### Conclusion + +We've talked about quite a few program architecture considerations: bytes, +accounts, bottlenecks, and more. Whether you wind up running into any of these +specific considerations or not, hopefully, the examples and discussion sparked +some thought. At the end of the day, you're the designer of your system. Your +job is to weigh the pros and cons of various solutions. Be forward-thinking, but +be practical. There is no "one good way" to design anything. Just know the +trade-offs. + +## Lab + +Let's use all of these concepts to create a simple, but optimized, RPG game +engine in Solana. This program will have the following features: + +- Let users create a game (`Game` account) and become a "game master" (the + authority over the game) +- Game masters are in charge of their game's configuration +- Anyone from the public can join a game as a player - each player/game + combination will have a `Player` account +- Players can spawn and fight monsters (`Monster` account) by spending action + points; we'll use lamports as the action points +- Spent action points go to a game's treasury as listed in the `Game` account + +We'll walk through the tradeoffs of various design decisions as we go to give +you a sense of why we do things. Let's get started! + +### 1. Program Setup + +We'll build this from scratch. Start by creating a new Anchor project: + +```bash +anchor init rpg +``` + + + +This lab was created with Anchor version `0.30.1` in mind. If there are problems +compiling, please refer to the +[solution code](https://github.com/solana-developers/anchor-rpg/tree/main) for +the environment setup. + + + +Next, run the command `anchor keys sync` that will automatically sync your +program ID. This command updates the program IDs in your program files +(including `Anchor.toml`) with the actual `pubkey` from the program keypair +file. + +Finally, let's scaffold out the program in the `lib.rs` file. Copy the following +into your file before we get started: + +```rust title="lib.rs" +use anchor_lang::prelude::*; +use anchor_lang::solana_program::log::sol_log_compute_units; + +declare_id!("YOUR_KEY_HERE__YOUR_KEY_HERE"); + +#[program] +pub mod rpg { + use super::*; + + pub fn create_game(ctx: Context, max_items_per_player: u8) -> Result<()> { + run_create_game(ctx, max_items_per_player)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn create_player(ctx: Context) -> Result<()> { + run_create_player(ctx)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn spawn_monster(ctx: Context) -> Result<()> { + run_spawn_monster(ctx)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn attack_monster(ctx: Context) -> Result<()> { + run_attack_monster(ctx)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn deposit_action_points(ctx: Context) -> Result<()> { + run_collect_action_points(ctx)?; + sol_log_compute_units(); + Ok(()) + } +} +``` + +### 2. Create Account Structures + +Now that our initial setup is ready, let's create our accounts. We'll have 3: + +1. `Game` - This account represents and manages a game. It includes the treasury + for game participants to pay into and a configuration struct that game + masters can use to customize the game. It should include the following + fields: + - `game_master` - effectively the owner/authority + - `treasury` - the treasury to which players will send action points (we'll + just be using lamports for action points) + - `action_points_collected` - tracks the number of action points collected by + the treasury + - `game_config` - a config struct for customizing the game +2. `Player` - A PDA account whose address is derived using the game account + address and the player's wallet address as seeds. It has a lot of fields + needed to track the player's game state: + - `player` - the player's public key + - `game` - the address of the corresponding game account + - `action_points_spent` - the number of action points spent + - `action_points_to_be_collected` - the number of action points that still + need to be collected + - `status_flag` - the player's status + - `experience` - the player's experience + - `kills` - number of monsters killed + - `next_monster_index` - the index of the next monster to face + - `inventory` - a vector of the player's inventory +3. `Monster` - A PDA account whose address is derived using the game account + address, the player's wallet address, and an index (the one stored as + `next_monster_index` in the `Player` account). + - `player` - the player the monster is facing + - `game` - the game the monster is associated with + - `hitpoints` - how many hit points the monster has left + +This is the final project structure: + +```bash +src/ +├── constants.rs # Constants used throughout the program +├── error/ # Error module +│ ├── errors.rs # Custom error definitions +│ └── mod.rs # Module declarations for error handling +├── helpers.rs # Helper functions used across the program +├── instructions/ # Instruction handlers for different game actions +│ ├── attack_monster.rs # Handles attacking a monster +│ ├── collect_points.rs # Handles collecting points +│ ├── create_game.rs # Handles game creation +│ ├── create_player.rs # Handles player creation +│ ├── mod.rs # Module declarations for instructions +│ └── spawn_monster.rs # Handles spawning a new monster +├── lib.rs # Main entry point for the program +└── state/ # State module for game data structures + ├── game.rs # Game state representation + ├── mod.rs # Module declarations for state + ├── monster.rs # Monster state representation + └── player.rs # Player state representation +``` + +When added to the program, the accounts should look like this: + +```rust +// ----------- ACCOUNTS ---------- + +// Inside `state/game.rs` +use anchor_lang::prelude::*; +#[account] +#[derive(InitSpace)] +pub struct Game { + pub game_master: Pubkey, + pub treasury: Pubkey, + pub action_points_collected: u64, + pub game_config: GameConfig, +} + +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] +pub struct GameConfig { + pub max_items_per_player: u8 +} + +// Inside `state/player.rs` +use anchor_lang::prelude::*; +#[account] +#[derive(InitSpace)] +pub struct Player { // 8 bytes + pub player: Pubkey, // 32 bytes + pub game: Pubkey, // 32 bytes + + pub action_points_spent: u64, // 8 bytes + pub action_points_to_be_collected: u64, // 8 bytes + + pub status_flag: u8, // 8 bytes + pub experience: u64, // 8 bytes + pub kills: u64, // 8 bytes + pub next_monster_index: u64, // 8 bytes + + pub inventory: Vec, // Max 8 items +} + +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] +pub struct InventoryItem { + pub name: [u8; 32], // Fixed Name up to 32 bytes + pub amount: u64 +} + + +// Inside `state/monster.rs` +use anchor_lang::prelude::*; +#[account] +#[derive(InitSpace)] +pub struct Monster { + pub player: Pubkey, + pub game: Pubkey, + pub hitpoints: u64, +} +``` + +There aren't a lot of complicated design decisions here, but let's talk about +the `inventory` field on the `Player` struct. Since `inventory` is variable in +length we decided to place it at the end of the account to make querying easier. + +### 3. Create Ancillary Types + +The next thing we need to do is add some of the types our accounts reference +that we haven't created yet. + +Let's start with the game config struct. Technically, this could have gone in +the `Game` account, but it's nice to have some separation and encapsulation. +This struct should store the max items allowed per player. + +```rust title="game.rs" +// ----------- GAME CONFIG ---------- +// Inside `state/game.rs` +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] +pub struct GameConfig { + pub max_items_per_player: u8 +} +``` + +Reallocating accounts in Solana programs has become more flexible due to +Anchor's +[`realloc`](https://docs.rs/anchor-lang/latest/anchor_lang/derive.Accounts.html#normal-constraints) +account constraint and Solana's account resizing capabilities. While adding +fields at the end of an account structure remains straightforward, modern +practices allow for more adaptable designs: + +1. Use Anchor's `realloc` constraint in the `#[account()]` attribute to specify + resizing parameters: + + ```rust + #[account( + mut, + realloc = AccountStruct::INIT_SPACE, + realloc::payer = payer, + realloc::zero = false, + )] + ``` + +2. Use Anchor's `InitSpace` attribute to automatically calculate account space. +3. For variable-length fields like `Vec` or `String`, use the `max_len` + attribute to specify maximum size. +4. When adding new fields, consider using `Option` for backward + compatibility. +5. Implement a versioning system in your account structure to manage different + layouts. +6. Ensure the payer account is mutable and a signer to cover reallocation costs: + + ```rust + #[account(mut)] + pub payer: Signer<'info>, + ``` + +This approach allows for easier account structure evolution, regardless of where +new fields are added, while maintaining efficient querying and +serialization/deserialization through Anchor's built-in capabilities. It enables +resizing accounts as needed, automatically handling rent-exemption. + +Next, let's create our status flags. Remember, we _could_ store our flags as +booleans but we save space by storing multiple flags in a single byte. Each flag +takes up a different bit within the byte. We can use the `<<` operator to place +`1` in the correct bit. + +```rust title="constants.rs" +// ----------- STATUS ---------- + +pub const IS_FROZEN_FLAG: u8 = 1 << 0; +pub const IS_POISONED_FLAG: u8 = 1 << 1; +pub const IS_BURNING_FLAG: u8 = 1 << 2; +pub const IS_BLESSED_FLAG: u8 = 1 << 3; +pub const IS_CURSED_FLAG: u8 = 1 << 4; +pub const IS_STUNNED_FLAG: u8 = 1 << 5; +pub const IS_SLOWED_FLAG: u8 = 1 << 6; +pub const IS_BLEEDING_FLAG: u8 = 1 << 7; + +pub const NO_EFFECT_FLAG: u8 = 0b00000000; +pub const ANCHOR_DISCRIMINATOR: usize = 8; +pub const MAX_INVENTORY_ITEMS: usize = 8; +``` + +Finally, let's create our `InventoryItem`. This should have fields for the +item's name and amount. + +```rust title="player.rs" +// ----------- INVENTORY ---------- + +// Inside `state/player.rs` +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] +pub struct InventoryItem { + pub name: [u8; 32], // Fixed Name up to 32 bytes + pub amount: u64 +} + +``` + +### 4. Create a helper function for spending action points + +The last thing we'll do before writing the program's instructions is create a +helper function for spending action points. Players will send action points +(lamports) to the game treasury as payment for performing actions in the game. + +Since sending lamports to a treasury requires writing data to that treasury +account, we could easily end up with a performance bottleneck if many players +are trying to write to the same treasury concurrently (See +[Dealing With Concurrency](#dealing-with-concurrency)). + +Instead, we'll send them to the player PDA account and create an instruction +that will send the lamports from that account to the treasury in one fell swoop. +This alleviates any concurrency issues since every player has their own account, +but also allows the program to retrieve those lamports at any time. + +```rust title="helper.rs" +// ----------- HELPER ---------- + +// Inside /src/helpers.rs +use anchor_lang::{prelude::*, system_program}; + +use crate::{error::RpgError, Player}; + +pub fn spend_action_points<'info>( + action_points: u64, + player_account: &mut Account<'info, Player>, + player: &AccountInfo<'info>, + system_program: &AccountInfo<'info>, +) -> Result<()> { + player_account.action_points_spent = player_account + .action_points_spent + .checked_add(action_points) + .ok_or(error!(RpgError::ArithmeticOverflow))?; + + player_account.action_points_to_be_collected = player_account + .action_points_to_be_collected + .checked_add(action_points) + .ok_or(error!(RpgError::ArithmeticOverflow))?; + + system_program::transfer( + CpiContext::new( + system_program.to_account_info(), + system_program::Transfer { + from: player.to_account_info(), + to: player_account.to_account_info(), + }, + ), + action_points, + )?; + + msg!("Minus {} action points", action_points); + + Ok(()) +} +``` + +### 5. Create Game + +Our first instruction will create the `game` account. Anyone can be a +`game_master` and create their own game, but once a game has been created there +are certain constraints. + +For one, the `game` account is a PDA using its `treasury` wallet. This ensures +that the same `game_master` can run multiple games if they use a different +treasury for each. + + + +The `treasury` is a signer on the instruction. This is to make sure whoever is +creating the game has the private keys to the `treasury`. This is a design +decision rather than "the right way." Ultimately, it's a security measure to +ensure the game master will be able to retrieve their funds. + + + +```rust title="create_game.rs" +// ----------- CREATE GAME ---------- + +// Inside src/instructions/create_game.rs +use anchor_lang::prelude::*; + +use crate::{error::RpgError, Game, ANCHOR_DISCRIMINATOR}; + +#[derive(Accounts)] +pub struct CreateGame<'info> { + #[account( + init, + seeds = [b"GAME", treasury.key().as_ref()], + bump, + payer = game_master, + space = ANCHOR_DISCRIMINATOR + Game::INIT_SPACE + )] + pub game: Account<'info, Game>, + #[account(mut)] + pub game_master: Signer<'info>, + pub treasury: Signer<'info>, + pub system_program: Program<'info, System>, +} + +pub fn run_create_game(ctx: Context, max_items_per_player: u8) -> Result<()> { + if max_items_per_player == 0 { + return Err(error!(RpgError::InvalidGameConfig)); + } + + let game = &mut ctx.accounts.game; + game.game_master = ctx.accounts.game_master.key(); + game.treasury = ctx.accounts.treasury.key(); + game.action_points_collected = 0; + game.game_config.max_items_per_player = max_items_per_player; + + msg!("Game created!"); + Ok(()) +} +``` + +### 6. Create Player + +Our second instruction will create the `player` account. There are three +tradeoffs to note about this instruction: + +1. The player account is a PDA account derived using the `game` and `player` + wallet. This let's players participate in multiple games but only have one + player account per game. +2. We wrap the `game` account in a `Box` to place it on the heap, ensuring we + don't max out the Stack. +3. The first action any player makes is spawning themselves in, so we call + `spend_action_points`. Right now we hardcode `action_points_to_spend` to be + 100 lamports, but this could be something added to the game config in the + future. + +```rust title="create_player.rs" +// ----------- CREATE PLAYER ---------- + +// Inside src/instructions/create_player.rs +use anchor_lang::prelude::*; + +use crate::{ + error::RpgError, helpers::spend_action_points, Game, Player, ANCHOR_DISCRIMINATOR, + CREATE_PLAYER_ACTION_POINTS, NO_EFFECT_FLAG, +}; + +#[derive(Accounts)] +pub struct CreatePlayer<'info> { + pub game: Box>, + #[account( + init, + seeds = [ + b"PLAYER", + game.key().as_ref(), + player.key().as_ref() + ], + bump, + payer = player, + space = ANCHOR_DISCRIMINATOR + Player::INIT_SPACE + )] + pub player_account: Account<'info, Player>, + #[account(mut)] + pub player: Signer<'info>, + pub system_program: Program<'info, System>, +} + +pub fn run_create_player(ctx: Context) -> Result<()> { + let player_account = &mut ctx.accounts.player_account; + player_account.player = ctx.accounts.player.key(); + player_account.game = ctx.accounts.game.key(); + player_account.status_flag = NO_EFFECT_FLAG; + player_account.experience = 0; + player_account.kills = 0; + + msg!("Hero has entered the game!"); + + // Spend 100 lamports to create player + let action_points_to_spend = CREATE_PLAYER_ACTION_POINTS; + + spend_action_points( + action_points_to_spend, + player_account, + &ctx.accounts.player.to_account_info(), + &ctx.accounts.system_program.to_account_info(), + ) + .map_err(|_| error!(RpgError::InsufficientActionPoints))?; + + Ok(()) +} +``` + +### 7. Spawn Monster + +Now that we have a way to create players, we need a way to spawn monsters for +them to fight. This instruction will create a new `Monster` account whose +address is a PDA derived from the `game` account, `player` account, and an index +representing the number of monsters the player has faced. There are two design +decisions here we should talk about: + +1. The PDA seeds let us keep track of all the monsters a player has spawned +2. We wrap both the `game` and `player` accounts in `Box` to allocate them to + the Heap + +```rust title="spawn_monster.rs" +// ----------- SPAWN MONSTER ---------- + +// Inside src/instructions/spawn_monster.rs +use anchor_lang::prelude::*; + +use crate::{helpers::spend_action_points, Game, Monster, Player, SPAWN_MONSTER_ACTION_POINTS, ANCHOR_DISCRIMINATOR}; + +#[derive(Accounts)] +pub struct SpawnMonster<'info> { + pub game: Box>, + #[account( + mut, + has_one = game, + has_one = player, + )] + pub player_account: Box>, + #[account( + init, + seeds = [ + b"MONSTER", + game.key().as_ref(), + player.key().as_ref(), + player_account.next_monster_index.to_le_bytes().as_ref() + ], + bump, + payer = player, + space = ANCHOR_DISCRIMINATOR + Monster::INIT_SPACE + )] + pub monster: Account<'info, Monster>, + #[account(mut)] + pub player: Signer<'info>, + pub system_program: Program<'info, System>, +} + +pub fn run_spawn_monster(ctx: Context) -> Result<()> { + let monster = &mut ctx.accounts.monster; + monster.player = ctx.accounts.player.key(); + monster.game = ctx.accounts.game.key(); + monster.hitpoints = 100; + + let player_account = &mut ctx.accounts.player_account; + player_account.next_monster_index = player_account.next_monster_index.checked_add(1).unwrap(); + + msg!("Monster Spawned!"); + + // Spend 5 lamports to spawn monster + let action_point_to_spend = SPAWN_MONSTER_ACTION_POINTS; + spend_action_points( + action_point_to_spend, + player_account, + &ctx.accounts.player.to_account_info(), + &ctx.accounts.system_program.to_account_info(), + )?; + + Ok(()) +} +``` + +### 8. Attack Monster + +Now! Let's attack those monsters and start gaining some exp! + +The logic here is as follows: + +- Players spend 1 `action_point` to attack and gain 1 `experience` +- If the player kills the monster, their `kill` count goes up + +As far as design decisions, we've wrapped each of the rpg accounts in `Box` to +allocate them to the Heap. Additionally, we've used `saturating_add` when +incrementing experience and kill counts. + +The `saturating_add` function ensures the number will never overflow. Say the +`kills` was a u8 and my current kill count was 255 (0xFF). If I killed another +and added normally, e.g. `255 + 1 = 0 (0xFF + 0x01 = 0x00) = 0`, the kill count +would end up as 0. `saturating_add` will keep it at its max if it's about to +would end up as 0. `saturating_add` will keep it at its max if it's about to +roll over, so `255 + 1 = 255`. The `checked_add` function will throw an error if +it's about to overflow. Keep this in mind when doing math in Rust. Even though +`kills` is a u64 and will never roll with it's current programming, it's good +practice to use safe math and consider roll-overs. + +```rust title="attack_monster.rs" +// ----------- ATTACK MONSTER ---------- + +// Inside src/instructions/attack_monster.rs +use anchor_lang::prelude::*; +use crate::{helpers::spend_action_points, Monster, Player, ATTACK_ACTION_POINTS, error::RpgError}; + +#[derive(Accounts)] +pub struct AttackMonster<'info> { + #[account( + mut, + has_one = player, + )] + pub player_account: Box>, + #[account( + mut, + has_one = player, + constraint = monster.game == player_account.game @ RpgError::GameMismatch + )] + pub monster: Box>, + #[account(mut)] + pub player: Signer<'info>, + pub system_program: Program<'info, System>, +} + +pub fn run_attack_monster(ctx: Context) -> Result<()> { + let player_account = &mut ctx.accounts.player_account; + let monster = &mut ctx.accounts.monster; + + let hp_before_attack = monster.hitpoints; + let hp_after_attack = monster.hitpoints.saturating_sub(1); + let damage_dealt = hp_before_attack.saturating_sub(hp_after_attack); + monster.hitpoints = hp_after_attack; + + if damage_dealt > 0 { + msg!("Damage Dealt: {}", damage_dealt); + player_account.experience = player_account.experience.saturating_add(1); + msg!("+1 EXP"); + + if hp_after_attack == 0 { + player_account.kills = player_account.kills.saturating_add(1); + msg!("You killed the monster!"); + } + } else { + msg!("Stop it's already dead!"); + } + + // Spend 1 lamport to attack monster + let action_point_to_spend = ATTACK_ACTION_POINTS; + + spend_action_points( + action_point_to_spend, + player_account, + &ctx.accounts.player.to_account_info(), + &ctx.accounts.system_program.to_account_info() + )?; + + Ok(()) +} +``` + +### 9. Redeem to Treasury + +This is our last instruction. This instruction lets anyone send the spent +`action_points` to the `treasury` wallet. + +Again, let's box the rpg accounts and use safe math. + +```rust title="collect_points.rs" +// ----------- REDEEM TO TREASURY ---------- + +// Inside src/instructions/collect_points.rs +use anchor_lang::prelude::*; +use crate::{error::RpgError, Game, Player}; + +#[derive(Accounts)] +pub struct CollectActionPoints<'info> { + #[account( + mut, + has_one = treasury @ RpgError::InvalidTreasury + )] + pub game: Box>, + #[account( + mut, + has_one = game @ RpgError::PlayerGameMismatch + )] + pub player: Box>, + #[account(mut)] + /// CHECK: It's being checked in the game account + pub treasury: UncheckedAccount<'info>, + pub system_program: Program<'info, System>, +} + +// Literally anyone who pays for the TX fee can run this command - give it to a clockwork bot +pub fn run_collect_action_points(ctx: Context) -> Result<()> { + let transfer_amount = ctx.accounts.player.action_points_to_be_collected; + + // Transfer lamports from player to treasury + let player_info = ctx.accounts.player.to_account_info(); + let treasury_info = ctx.accounts.treasury.to_account_info(); + + **player_info.try_borrow_mut_lamports()? = player_info + .lamports() + .checked_sub(transfer_amount) + .ok_or(RpgError::InsufficientFunds)?; + + **treasury_info.try_borrow_mut_lamports()? = treasury_info + .lamports() + .checked_add(transfer_amount) + .ok_or(RpgError::ArithmeticOverflow)?; + + ctx.accounts.player.action_points_to_be_collected = 0; + + ctx.accounts.game.action_points_collected = ctx.accounts.game + .action_points_collected + .checked_add(transfer_amount) + .ok_or(RpgError::ArithmeticOverflow)?; + + msg!("The treasury collected {} action points", transfer_amount); + + Ok(()) +} +``` + +### 10. Error Handling + +Now, let's add all the errors that we have used till now in `errors.rs` file. + +```rust title="errors.rs" +// ------------RPG ERRORS-------------- + +// Inside src/error/errors.rs + +use anchor_lang::prelude::*; + +#[error_code] +pub enum RpgError { + #[msg("Arithmetic overflow occurred")] + ArithmeticOverflow, + #[msg("Invalid game configuration")] + InvalidGameConfig, + #[msg("Player not found")] + PlayerNotFound, + #[msg("Monster not found")] + MonsterNotFound, + #[msg("Insufficient action points")] + InsufficientActionPoints, + #[msg("Invalid attack")] + InvalidAttack, + #[msg("Maximum inventory size reached")] + MaxInventoryReached, + #[msg("Invalid item operation")] + InvalidItemOperation, + #[msg("Monster and player are not in the same game")] + GameMismatch, + #[msg("Invalid treasury account")] + InvalidTreasury, + #[msg("Player does not belong to the specified game")] + PlayerGameMismatch, + #[msg("Insufficient funds for transfer")] + InsufficientFunds +} +``` + +### 11. Module Declarations + +We need to declare all the modules used in the project as follows: + +```rust + +// Inside src/error/mod.rs +pub mod errors; +pub use errors::RpgError; // Expose the custom error type + +// Inside src/instructions/mod.rs +pub mod attack_monster; +pub mod collect_points; +pub mod create_game; +pub mod create_player; +pub mod spawn_monster; + +pub use attack_monster::*; // Expose attack_monster functions +pub use collect_points::*; // Expose collect_points functions +pub use create_game::*; // Expose create_game functions +pub use create_player::*; // Expose create_player functions +pub use spawn_monster::*; // Expose spawn_monster functions + +// Inside src/state/mod.rs +pub mod game; +pub mod monster; +pub mod player; + +pub use game::*; // Expose game state +pub use monster::*; // Expose monster state +pub use player::*; // Expose player state +``` + +### 12. Putting it all Together + +Now that all of our instruction logic is written, let's add these functions to +actual instructions in the program. It can also be helpful to log compute units +for each instruction. + +```rust title="lib.rs" + +// Insider src/lib.rs +use anchor_lang::prelude::*; +use anchor_lang::solana_program::log::sol_log_compute_units; + +mod state; +mod instructions; +mod constants; +mod helpers; +mod error; + +use state::*; +use constants::*; +use instructions::*; + +declare_id!("5Sc3gJv4tvPiFzE75boYMJabbNRs44zRhtT23fLdKewz"); + +#[program] +pub mod rpg { + use super::*; + + pub fn create_game(ctx: Context, max_items_per_player: u8) -> Result<()> { + run_create_game(ctx, max_items_per_player)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn create_player(ctx: Context) -> Result<()> { + run_create_player(ctx)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn spawn_monster(ctx: Context) -> Result<()> { + run_spawn_monster(ctx)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn attack_monster(ctx: Context) -> Result<()> { + run_attack_monster(ctx)?; + sol_log_compute_units(); + Ok(()) + } + + pub fn deposit_action_points(ctx: Context) -> Result<()> { + run_collect_action_points(ctx)?; + sol_log_compute_units(); + Ok(()) + } +} +``` + +If you added in all of the sections correctly, you should be able to build +successfully. + +```shell +anchor build +``` + +### Testing + +Now, let's put everything together and see it in action! + +We'll begin by setting up the `tests/rpg.ts` file. We will be writing each test +step by step. But before diving into the tests, we need to initialize a few +important accounts, specifically the `gameMaster` and the `treasury` accounts. + +```typescript title="rpg.ts" +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { Rpg } from "../target/types/rpg"; +import { assert } from "chai"; +import { + Keypair, + LAMPORTS_PER_SOL, + PublicKey, + TransactionSignature, + TransactionConfirmationStrategy, +} from "@solana/web3.js"; +import NodeWallet from "@coral-xyz/anchor/dist/cjs/nodewallet"; + +const GAME_SEED = "GAME"; +const PLAYER_SEED = "PLAYER"; +const MONSTER_SEED = "MONSTER"; +const MAX_ITEMS_PER_PLAYER = 8; +const INITIAL_MONSTER_HITPOINTS = 100; +const AIRDROP_AMOUNT = 10 * LAMPORTS_PER_SOL; +const CREATE_PLAYER_ACTION_POINTS = 100; +const SPAWN_MONSTER_ACTION_POINTS = 5; +const ATTACK_MONSTER_ACTION_POINTS = 1; +const MONSTER_INDEX_BYTE_LENGTH = 8; + +const provider = anchor.AnchorProvider.env(); +anchor.setProvider(provider); + +const program = anchor.workspace.Rpg as Program; +const wallet = provider.wallet as NodeWallet; +const gameMaster = wallet; +const player = wallet; + +const treasury = Keypair.generate(); + +const findProgramAddress = (seeds: Buffer[]): [PublicKey, number] => + PublicKey.findProgramAddressSync(seeds, program.programId); + +const confirmTransaction = async ( + signature: TransactionSignature, + provider: anchor.Provider, +) => { + const latestBlockhash = await provider.connection.getLatestBlockhash(); + const confirmationStrategy: TransactionConfirmationStrategy = { + signature, + blockhash: latestBlockhash.blockhash, + lastValidBlockHeight: latestBlockhash.lastValidBlockHeight, + }; + + try { + const confirmation = + await provider.connection.confirmTransaction(confirmationStrategy); + if (confirmation.value.err) { + throw new Error( + `Transaction failed: ${confirmation.value.err.toString()}`, + ); + } + } catch (error) { + throw new Error(`Transaction confirmation failed: ${error.message}`); + } +}; + +const createGameAddress = () => + findProgramAddress([Buffer.from(GAME_SEED), treasury.publicKey.toBuffer()]); + +const createPlayerAddress = (gameAddress: PublicKey) => + findProgramAddress([ + Buffer.from(PLAYER_SEED), + gameAddress.toBuffer(), + player.publicKey.toBuffer(), + ]); + +const createMonsterAddress = ( + gameAddress: PublicKey, + monsterIndex: anchor.BN, +) => + findProgramAddress([ + Buffer.from(MONSTER_SEED), + gameAddress.toBuffer(), + player.publicKey.toBuffer(), + monsterIndex.toArrayLike(Buffer, "le", MONSTER_INDEX_BYTE_LENGTH), + ]); + +describe("RPG game", () => { + it("creates a new game", async () => {}); + + it("creates a new player", async () => {}); + + it("spawns a monster", async () => {}); + + it("attacks a monster", async () => {}); + + it("deposits action points", async () => {}); +}); +``` + +Now lets add in the `creates a new game` test. Just call `createGame` with eight +items, be sure to pass in all the accounts, and make sure the `treasury` account +signs the transaction. + +```typescript +it("creates a new game", async () => { + try { + const [gameAddress] = createGameAddress(); + + const createGameSignature = await program.methods + .createGame(MAX_ITEMS_PER_PLAYER) + .accounts({ + game: gameAddress, + gameMaster: gameMaster.publicKey, + treasury: treasury.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([treasury]) + .rpc(); + + await confirmTransaction(createGameSignature, provider); + } catch (error) { + throw new Error(`Failed to create game: ${error.message}`); + } +}); +``` + +Go ahead and check that your test runs: + +```typescript +yarn install +anchor test +``` + +**Hacky workaround:** If for some reason, the `yarn install` command results in +some `.pnp.*` files and no `node_modules`, you may want to call `rm -rf .pnp.*` +followed by `npm i` and then `yarn install`. That should work. + +Now that everything is running, let's implement the `creates a new player`, +`spawns a monster`, and `attacks a monster` tests. Run each test as you complete +them to make sure things are running smoothly. + +```typescript +it("creates a new player", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); + + const createPlayerSignature = await program.methods + .createPlayer() + .accounts({ + game: gameAddress, + playerAccount: playerAddress, + player: player.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(createPlayerSignature, provider); + } catch (error) { + throw new Error(`Failed to create player: ${error.message}`); + } +}); + +it("spawns a monster", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); + + const playerAccount = await program.account.player.fetch(playerAddress); + const [monsterAddress] = createMonsterAddress( + gameAddress, + playerAccount.nextMonsterIndex, + ); + + const spawnMonsterSignature = await program.methods + .spawnMonster() + .accounts({ + game: gameAddress, + playerAccount: playerAddress, + monster: monsterAddress, + player: player.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(spawnMonsterSignature, provider); + } catch (error) { + throw new Error(`Failed to spawn monster: ${error.message}`); + } +}); + +it("attacks a monster", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); + + const playerAccount = await program.account.player.fetch(playerAddress); + const [monsterAddress] = createMonsterAddress( + gameAddress, + playerAccount.nextMonsterIndex.subn(1), + ); + + const attackMonsterSignature = await program.methods + .attackMonster() + .accounts({ + playerAccount: playerAddress, + monster: monsterAddress, + player: player.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(attackMonsterSignature, provider); + + const monsterAccount = await program.account.monster.fetch(monsterAddress); + assert( + monsterAccount.hitpoints.eqn(INITIAL_MONSTER_HITPOINTS - 1), + "Monster hitpoints should decrease by 1 after attack", + ); + } catch (error) { + throw new Error(`Failed to attack monster: ${error.message}`); + } +}); +``` + +Notice the monster that we choose to attack is +`playerAccount.nextMonsterIndex.subn(1).toBuffer('le', 8)`. This allows us to +attack the most recent monster spawned. Anything below the `nextMonsterIndex` +should be okay. Lastly, since seeds are just an array of bytes we have to turn +the index into the u64, which is a little endian `le` at 8 bytes. + +Run `anchor test` to deal some damage! + +Finally, let's write a test to gather all the deposited action points. This test +may feel complex for what it's doing. That's because we're generating some new +accounts to show that anyone could call the redeem function +`depositActionPoints`. We use names like `clockwork` for these because if this +game were running continuously, it probably makes sense to use something like +[clockwork](https://www.clockwork.xyz/) cron jobs. + +```typescript +it("deposits action points", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); + + // To show that anyone can deposit the action points + // Ie, give this to a clockwork bot + const clockworkWallet = anchor.web3.Keypair.generate(); + + // To give it a starting balance + const clockworkProvider = new anchor.AnchorProvider( + program.provider.connection, + new NodeWallet(clockworkWallet), + anchor.AnchorProvider.defaultOptions(), + ); + + // Have to give the accounts some lamports else the tx will fail + const amountToInitialize = 10000000000; + + const clockworkAirdropTx = + await clockworkProvider.connection.requestAirdrop( + clockworkWallet.publicKey, + amountToInitialize, + ); + + await confirmTransaction(clockworkAirdropTx, clockworkProvider); + + const treasuryAirdropTx = await clockworkProvider.connection.requestAirdrop( + treasury.publicKey, + amountToInitialize, + ); + + await confirmTransaction(treasuryAirdropTx, clockworkProvider); + + const depositActionPointsSignature = await program.methods + .depositActionPoints() + .accounts({ + game: gameAddress, + player: playerAddress, + treasury: treasury.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(depositActionPointsSignature, provider); + + const expectedActionPoints = + CREATE_PLAYER_ACTION_POINTS + + SPAWN_MONSTER_ACTION_POINTS + + ATTACK_MONSTER_ACTION_POINTS; + const treasuryBalance = await provider.connection.getBalance( + treasury.publicKey, + ); + assert( + treasuryBalance === AIRDROP_AMOUNT + expectedActionPoints, + "Treasury balance should match expected action points", + ); + + const gameAccount = await program.account.game.fetch(gameAddress); + assert( + gameAccount.actionPointsCollected.eqn(expectedActionPoints), + "Game action points collected should match expected", + ); + + const playerAccount = await program.account.player.fetch(playerAddress); + assert( + playerAccount.actionPointsSpent.eqn(expectedActionPoints), + "Player action points spent should match expected", + ); + assert( + playerAccount.actionPointsToBeCollected.eqn(0), + "Player should have no action points to be collected", + ); + } catch (error) { + throw new Error(`Failed to deposit action points: ${error.message}`); + } +}); +``` + +Finally, run `anchor test` to see everything working. + +```bash + +RPG game + ✔ creates a new game (317ms) + ✔ creates a new player (399ms) + ✔ spawns a monster (411ms) + ✔ attacks a monster (413ms) + ✔ deposits action points (1232ms) +``` + +Congratulations! This was a lot to cover, but you now have a mini RPG game +engine. If things aren't quite working, go back through the lab and find where +you went wrong. If you need to, you can refer to the +[`main` branch of the solution code](https://github.com/solana-developers/anchor-rpg). + +Be sure to put these concepts into practice in your own programs. Each little +optimization adds up! + +## Challenge + +Now it's your turn to practice independently. Go back through the lab code +looking for additional optimizations and/or expansions you can make. Think +through new systems and features you would add and how you would optimize them. + +You can find some example modifications on the +[`challenge-solution` branch of the RPG repository](https://github.com/solana-developers/anchor-rpg/tree/challenge-solution). + +Finally, go through one of your own programs and think about optimizations you +can make to improve memory management, storage size, and/or concurrency. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=4a628916-91f5-46a9-8eb0-6ba453aa6ca6)! + + diff --git a/content/courses/program-optimization/program-configuration.mdx b/content/courses/program-optimization/program-configuration.mdx new file mode 100644 index 000000000..84f923a6c --- /dev/null +++ b/content/courses/program-optimization/program-configuration.mdx @@ -0,0 +1,1159 @@ +--- +title: Program Configuration +objectives: + - Define program features in the `Cargo.toml` file + - Use the native Rust `cfg` attribute to conditionally compile code based on + which features are or are not enabled + - Use the native Rust `cfg!` macro to conditionally compile code based on + which features are or are not enabled + - Create an admin-only instruction to set up a program account that can be + used to store program configuration values +description: + "Create distinct environments, feature flags and admin-only instructions." +--- + +## Summary + +- There are no "out of the box" solutions for creating distinct environments in + an onchain program, but you can achieve something similar to environment + variables if you get creative. +- You can use the + [`cfg` attribute](https://doc.rust-lang.org/rust-by-example/attribute/cfg.html) + with **Rust features** (`#[cfg(feature = ...)]`) to run different code or + provide different variable values based on the Rust feature provided. _This + happens at compile-time and doesn't allow you to swap values after a program + has been deployed_. +- Similarly, you can use the + [`cfg!` **macro**](https://doc.rust-lang.org/std/macro.cfg.html) to compile + different code paths based on the enabled features. +- For environment-like variables post-deployment, create program accounts and + admin-only instructions accessible by the program's upgrade authority. + +## Lesson + +One of the difficulties engineers face across all types of software development +is that of writing testable code and creating distinct environments for local +development, testing, production, etc. + +This is especially difficult in Solana program development. For instance, +imagine building an NFT staking program where each staked NFT earns 10 reward +tokens daily. How can you test the ability to claim rewards when tests run in +just a few hundred milliseconds—not nearly long enough to accrue rewards? + +In traditional web development, this is often addressed through environment +variables, allowing different values in distinct "environments." However, Solana +programs currently lack a formal concept of environment variables. If they +existed, you could easily modify the rewards in your test environment to +something like 10,000,000 tokens per day, making it easier to test claiming +rewards. + +Luckily, you can mimic this functionality with a bit of creativity. The most +effective solution involves a combination of two techniques: + +1. **Native Rust** feature flags that let you specify the "environment" during + your build, allowing the code to adjust values based on the specified build. +2. **Admin-only** program accounts and instructions that are only accessible by + the program's upgrade `authority` for setting and managing configuration + values post-deployment. + +### Native Rust Feature Flags + +One of the simplest ways to create environments is to use Rust features. +Features are defined in the `[features]` table of the program's `Cargo.toml` +file. You may define multiple features for different use cases. + +```toml +[features] +feature-one = [] +feature-two = [] +``` + +It's important to note that the above simply defines a feature. To enable a +feature when testing your program, you can use the `--features` flag with the +`anchor test` command. + +```bash +anchor test -- --features "feature-one" +``` + +You can also specify multiple features by separating them with a comma. + +```bash +anchor test -- --features "feature-one", "feature-two" +``` + +#### Make Code Conditional Using the cfg Attribute + +With a feature defined, you can then use the `cfg` attribute within your code to +conditionally compile code based on whether or not a given feature is enabled. +This allows you to include or exclude certain code from your program. + +The syntax for using the `cfg` attribute is like any other attribute macro: +`#[cfg(feature=[FEATURE_HERE])]`. For example, the following code compiles the +function `function_for_testing` when the `testing` feature is enabled and the +`function_when_not_testing` otherwise: + +```rust +#[cfg(feature = "testing")] +fn function_for_testing() { + // code that will be included only if the "testing" feature flag is enabled +} + +#[cfg(not(feature = "testing"))] +fn function_when_not_testing() { + // code that will be included only if the "testing" feature flag is not enabled +} +``` + +This allows you to enable or disable certain functionality in your Anchor +program at compile time by enabling or disabling the feature. + +It's not a stretch to imagine wanting to use this to create distinct +"environments" for different program deployments. For example, not all tokens +have deployments across both Mainnet and Devnet. So you might hard-code one +token address for Mainnet deployments but hard-code a different address for +Devnet and Localnet deployments. That way you can quickly switch between +different environments without requiring any changes to the code itself. + +The code below shows an example of an Anchor program that uses the `cfg` +attribute to include different token addresses for local testing compared to +other deployments: + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{Mint, Token, TokenAccount}; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[cfg(feature = "local-testing")] +pub mod constants { + use solana_program::{pubkey, pubkey::Pubkey}; + pub const USDC_MINT_PUBKEY: Pubkey = pubkey!("WaoKNLQVDyBx388CfjaVeyNbs3MT2mPgAhoCfXyUvg8"); +} + +#[cfg(not(feature = "local-testing"))] +pub mod constants { + use solana_program::{pubkey, pubkey::Pubkey}; + pub const USDC_MINT_PUBKEY: Pubkey = pubkey!("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v"); +} + +#[program] +pub mod test_program { + use super::*; + + pub fn initialize_usdc_token_account(ctx: Context) -> Result<()> { + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account( + init, + payer = payer, + token::mint = mint, + token::authority = payer, + )] + pub token: Account<'info, TokenAccount>, + #[account(address = constants::USDC_MINT_PUBKEY)] + pub mint: Account<'info, Mint>, + #[account(mut)] + pub payer: Signer<'info>, + pub token_program: Program<'info, Token>, + pub system_program: Program<'info, System>, + pub rent: Sysvar<'info, Rent>, +} +``` + +In this example, the `cfg` attribute is used to conditionally compile two +different implementations of the `constants` module. This allows the program to +use different values for the `USDC_MINT_PUBKEY` constant depending on whether or +not the `local-testing` feature is enabled. + +#### Make Code Conditional using the cfg! Macro + +Similar to the `cfg` attribute, the `cfg!` **macro** in Rust allows you to check +the values of certain configuration flags at runtime. This can be useful if you +want to execute different code paths depending on the values of certain +configuration flags. + +You could use this to bypass or adjust the time-based constraints required in +the NFT staking app we mentioned previously. When running a test, you can +execute code that provides far higher staking rewards when compared to running a +production build. + +To use the `cfg!` macro in an Anchor program, you simply add a `cfg!` macro call +to the conditional statement in question: + +```rust +#[program] +pub mod my_program { + use super::*; + + pub fn test_function(ctx: Context) -> Result<()> { + if cfg!(feature = "local-testing") { + // This code will be executed only if the "local-testing" feature is enabled + // ... + } else { + // This code will be executed only if the "local-testing" feature is not enabled + // ... + } + // Code that should always be included goes here + ... + Ok(()) + } +} +``` + +In this example, the `test_function` uses the `cfg!` macro to check the value of +the `local-testing` feature at runtime. If the `local-testing` feature is +enabled, the first code path is executed. If the `local-testing` feature is not +enabled, the second code path is executed instead. + +### Admin-only Instructions + +Feature flags are great for adjusting values and code paths at compilation, but +they don't help much if you end up needing to adjust something after you've +already deployed your program. + +For example, if your NFT staking program has to pivot and use a different +rewards token, there'd be no way to update the program without redeploying. If +only there were a way for program admins to update certain program values... +Well, it's possible! + +First, you need to structure your program to store the values you anticipate +changing in an account rather than hard-coding them into the program code. + +Next, you need to ensure that this account can only be updated by some known +program authority, or what we're calling an admin. That means any instructions +that modify the data on this account need to have constraints limiting who can +sign for the instruction. This sounds fairly straightforward in theory, but +there is one main issue: how does the program know who is an authorized admin? + +Well, there are a few solutions, each with their own benefits and drawbacks: + +1. Hard-code an admin public key that can be used in the admin-only instruction + constraints. +2. Make the program's upgrade authority the admin. +3. Store the admin in the config account and set the first admin in an + `initialize` instruction. + +#### Create the config account + +The first step is adding what we'll call a "config" account to your program. You +can customize this to best suit your needs, but we suggest a single global PDA. +In Anchor, that simply means creating an account struct and using a single seed +to derive the account's address. + +```rust +pub const SEED_PROGRAM_CONFIG: &[u8] = b"program_config"; + +#[account] +pub struct ProgramConfig { + reward_token: Pubkey, + rewards_per_day: u64, +} +``` + +The example above shows a hypothetical config account for the NFT staking +program example we've referenced throughout the lesson. It stores data +representing the token that should be used for rewards and the amount of tokens +to give out for each day of staking. + +With the config account defined, simply ensure that the rest of your code +references this account when using these values. That way, if the data in the +account changes, the program adapts accordingly. + +#### Constrain config updates to hard-coded admins + +You'll need a way to initialize and update the config account data. That means +you need to have one or more instructions that only an admin can invoke. The +simplest way to do this is to hard-code an admin's public key in your code and +then add a simple signer check into your instruction's account validation +comparing the signer to this public key. + +In Anchor, constraining an `update_program_config` instruction handler to only +be usable by a hard-coded admin might look like this: + +```rust +#[program] +mod my_program { + pub fn update_program_config( + ctx: Context, + reward_token: Pubkey, + rewards_per_day: u64 + ) -> Result<()> { + ctx.accounts.program_config.reward_token = reward_token; + ctx.accounts.program_config.rewards_per_day = rewards_per_day; + + Ok(()) + } +} + +pub const SEED_PROGRAM_CONFIG: &[u8] = b"program_config"; + +#[constant] +pub const ADMIN_PUBKEY: Pubkey = pubkey!("ADMIN_WALLET_ADDRESS_HERE"); + +#[derive(Accounts)] +pub struct UpdateProgramConfig<'info> { + #[account(mut, seeds = SEED_PROGRAM_CONFIG, bump)] + pub program_config: Account<'info, ProgramConfig>, + #[account(constraint = authority.key() == ADMIN_PUBKEY)] + pub authority: Signer<'info>, +} +``` + +Before instruction handler logic even executes, a check will be performed to +make sure the instruction's signer matches the hard-coded `ADMIN_PUBKEY`. Notice +that the example above doesn't show the instruction handler that initializes the +config account, but it should have similar constraints to ensure that an +attacker can't initialize the account with unexpected values. + +While this approach works, it also means keeping track of an admin wallet on top +of keeping track of a program's upgrade authority. With a few more lines of +code, you could simply restrict an instruction to only be callable by the +upgrade authority. The only tricky part is getting a program's upgrade authority +to compare against. + +#### Constrain config updates to the program's upgrade authority + +Fortunately, every program has a program data account that translates to the +Anchor `ProgramData` account type and has the `upgrade_authority_address` field. +The program itself stores this account's address in its data in the field +`programdata_address`. + +So in addition to the two accounts required by the instruction in the hard-coded +admin example, this instruction requires the `program` and the `program_data` +accounts. + +The accounts then need the following constraints: + +1. A constraint on `program` ensuring that the provided `program_data` account + matches the program's `programdata_address` field +2. A constraint on the `program_data` account ensuring that the instruction's + signer matches the `program_data` account's `upgrade_authority_address` + field. + +When completed, that looks like this: + +```rust +... + +#[derive(Accounts)] +pub struct UpdateProgramConfig<'info> { + #[account(mut, seeds = SEED_PROGRAM_CONFIG, bump)] + pub program_config: Account<'info, ProgramConfig>, + #[account(constraint = program.programdata_address()? == Some(program_data.key()))] + pub program: Program<'info, MyProgram>, + #[account(constraint = program_data.upgrade_authority_address == Some(authority.key()))] + pub program_data: Account<'info, ProgramData>, + pub authority: Signer<'info>, +} +``` + +Again, the example above doesn't show the instruction that initializes the +config account, but it should have the same constraints to ensure that the +attacker can't initialize the account with unexpected values. + +If this is the first time you've heard about the program data account, it's +worth reading through +[this Notion doc](https://www.notion.so/29780c48794c47308d5f138074dd9838) about +program deploys. + +#### Constrain config updates to a provided admin + +Both of the previous options are fairly secure but also inflexible. What if you +want to update the admin to be someone else? For that, you can store the admin +on the config account. + +```rust +pub const SEED_PROGRAM_CONFIG: &[u8] = b"program_config"; + +#[account] +pub struct ProgramConfig { + admin: Pubkey, + reward_token: Pubkey, + rewards_per_day: u64, +} +``` + +Then you can constrain your "update" instructions with a signer check matching +against the config account's `admin` field. + +```rust +... + +pub const SEED_PROGRAM_CONFIG: &[u8] = b"program_config"; + +#[derive(Accounts)] +pub struct UpdateProgramConfig<'info> { + #[account(mut, seeds = SEED_PROGRAM_CONFIG, bump)] + pub program_config: Account<'info, ProgramConfig>, + #[account(constraint = authority.key() == program_config.admin)] + pub authority: Signer<'info>, +} +``` + +There's one catch here: in the time between deploying a program and initializing +the config account, _there is no admin_. This means that the instruction for +initializing the config account can't be constrained to only allow admins as +callers. That means it could be called by an attacker looking to set themselves +as the admin. + +While this sounds bad, it really just means that you shouldn't treat your +program as "initialized" until you've initialized the config account yourself +and verified that the admin listed on the account is who you expect. If your +deploy script deploys and then immediately calls `initialize`, it's very +unlikely that an attacker is even aware of your program's existence much less +trying to make themselves the admin. If by some crazy stroke of bad luck someone +"intercepts" your program, you can close the program with the upgrade authority +and redeploy. + +## Lab + +Now let's go ahead and try this out together. For this lab, we'll be working +with a simple program that enables USDC payments. The program collects a small +fee for facilitating the transfer. Note that this is somewhat contrived since +you can do direct transfers without an intermediary contract, but it simulates +how some complex DeFi programs work. + +We'll quickly learn while testing our program that it could benefit from the +flexibility provided by an admin-controlled configuration account and some +feature flags. + +### 1. Starter + +Download the starter code from +the [`starter` branch of this repository](https://github.com/solana-developers/admin-instructions/tree/starter). +The code contains a program with a single instruction handler and a single test +in the `tests` directory. + +Let's quickly walk through how the program works. + +The `lib.rs` file includes a constant for the USDC address and a single +`payment` instruction. The `payment` instruction simply calls the +`payment_handler` instruction handler in the `instructions/payment.rs` file +where the instruction handler logic is contained. + +The `instructions/payment.rs` file contains both the `payment_handler` function +as well as the `Payment` account validation struct representing the accounts +required by the `payment` instruction. The `payment_handler` instruction handler +calculates a 1% fee from the payment amount, transfers the fee to a designated +token account, and transfers the remaining amount to the payment recipient. + +Finally, the `tests` directory has a single test file, `config.ts` that simply +invokes the `payment` instruction and asserts that the corresponding token +account balances have been debited and credited accordingly. + +Before we continue, take a few minutes to familiarize yourself with these files +and their contents. + +### 2. Run the existing test + +Let's start by running the existing test. + +Make sure you use `yarn` or `npm install` to install the dependencies laid out +in the `package.json` file. Then be sure to run `anchor keys list` to get the +public key for your program printed to the console. This differs based on the +keypair you have locally, so you need to update `lib.rs` and `Anchor.toml` to +use _your_ key. + +Finally, run `anchor test` to start the test. It should fail with the following +output: + +```shell +Error: failed to send transaction: Transaction simulation failed: Error processing Instruction 0: incorrect program id for instruction +``` + +The reason for this error is that we're attempting to use the mainnet USDC mint +address (as hard-coded in the `lib.rs` file of the program), but that mint +doesn't exist in the local environment. + +### 3. Adding a local-testing feature + +To fix this, we need a mint we can use locally _and_ hard-code into the program. +Since the local environment is reset often during testing, you'll need to store +a keypair that you can use to recreate the same mint address every time. + +Additionally, you don't want to have to change the hard-coded address between +local and mainnet builds since that could introduce human error (and is just +annoying). So we'll create a `local-testing` feature that, when enabled, will +make the program use our local mint but otherwise use the production USDC mint. + +Generate a new keypair by running `solana-keygen grind`. Run the following +command to generate a keypair with a public key that begins with "env". + +```shell +solana-keygen grind --starts-with env:1 +``` + +Once a keypair is found, you should see an output similar to the following: + +```shell +Wrote keypair to env9Y3szLdqMLU9rXpEGPqkjdvVn8YNHtxYNvCKXmHe.json +``` + + + +Make sure to add the generated keypair file +(`env9Y3szLdqMLU9rXpEGPqkjdvVn8YNHtxYNvCKXmHe.json`) to your `.gitignore` file +to prevent accidentally committing and leaking your keypair to GitHub or other +version control platforms. If you plan to use the keypair later, securing it +properly is critical. + + + +The keypair is written to a file in your working directory. Now that we have a +placeholder USDC address, let's modify the `lib.rs` file. Use the `cfg` +attribute to define the `USDC_MINT_PUBKEY` constant depending on whether the +`local-testing` feature is enabled or disabled. Remember to set the +`USDC_MINT_PUBKEY` constant for `local-testing` with the one generated in the +previous step rather than copying the one below. + +```rust +use anchor_lang::prelude::*; +mod instructions; +use instructions::*; + +declare_id!("BC3RMBvVa88zSDzPXnBXxpnNYCrKsxnhR3HwwHhuKKei"); + +#[cfg(feature = "local-testing")] +#[constant] +pub const USDC_MINT_PUBKEY: Pubkey = pubkey!("..."); + +#[cfg(not(feature = "local-testing"))] +#[constant] +pub const USDC_MINT_PUBKEY: Pubkey = pubkey!("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v"); + +#[program] +pub mod config { + use super::*; + + pub fn payment(ctx: Context, amount: u64) -> Result<()> { + instructions::payment_handler(ctx, amount) + } +} +``` + +Next, add the `local-testing` feature to the `Cargo.toml` file located in +`/programs`. + +```shell +[features] +... +local-testing = [] +``` + +Next, update the `config.ts` test file to create a mint using the generated +keypair. Start by deleting the `mint` constant. + +```typescript +const USDC_MINT = new PublicKey("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v"); +``` + + + +The `anchor test` command, when run on a local network, starts a new test +validator using `solana-test-validator`. This test validator uses a +non-upgradeable loader. The non-upgradeable loader makes it so the program's +`program_data` account isn't initialized when the validator starts. You'll +recall from the lesson that this account is how we access the upgrade authority +from the program. + + + +To work around this, you can add a `deploy` function to the test file that runs +the deploy command for the program with an upgradeable loader. To use it, run +`anchor test --skip-deploy`, and call the `deploy` function within the test to +run the deploy command after the test validator has started. + +```typescript +import { execSync } from "child_process"; +import path from "path"; + +... + +const deploy = () => { + const workingDirectory = process.cwd(); + const programKeypairPath = path.join( + workingDirectory, + "target", + "deploy", + "config-keypair.json", + ); + const programBinaryPath = path.join( + workingDirectory, + "target", + "deploy", + "config.so", + ); + + const deploy_command = `solana program deploy --url localhost -v --program-id "${programKeypairPath}" "${programBinaryPath}"`; + + try { + execSync(deploy_command, { stdio: "inherit" }); + console.log("Program deployed successfully"); + } catch (error) { + console.error("Error deploying program:", error.message); + throw error; + } +}; + +... + +before(async () => { + deploy(); + ... +}); +``` + +For example, the command to run the test with features would look like this: + +```shell +anchor test --skip-deploy -- --features "local-testing" +``` + +Next, update the test to create a mint using the keypair, which will enable us +to reuse the same mint address each time the tests are run. Remember to replace +the file name with the one generated in the previous step. + +```typescript +let tokenMint: PublicKey; + +const deploy = () => { + const workingDirectory = process.cwd(); + const programKeypairPath = path.join( + workingDirectory, + "target", + "deploy", + "config-keypair.json", + ); + const programBinaryPath = path.join( + workingDirectory, + "target", + "deploy", + "config.so", + ); + + const deploy_command = `solana program deploy --url localhost -v --program-id "${programKeypairPath}" "${programBinaryPath}"`; + + try { + execSync(deploy_command, { stdio: "inherit" }); + console.log("Program deployed successfully"); + } catch (error) { + console.error("Error deploying program:", error.message); + throw error; + } +}; + +before(async () => { + try { + deploy(); + const mintKeypairData = fs.readFileSync( + "envYcAnc9BvWEqDy4VKJsiECCbbc72Fynz87rBih6DV.json" + ); + const mintKeypair = Keypair.fromSecretKey( + new Uint8Array(JSON.parse(mintKeypairData)) + ); + + tokenMint = await createMint( + connection, + walletAuthority.payer, + walletAuthority.publicKey, + null, + 0, + mintKeypair + ); +... +``` + +Lastly, run the test with the `local-testing` feature enabled. + +```shell +anchor test --skip-deploy -- --features "local-testing" +``` + +You should see the following output: + +```shell +Config + ✔ completes payment successfully (432ms) + + + 1 passing (21s) +``` + +Boom. Just like that, you've used features to run two different code paths for +different environments. + +### 4. Program Config + +Features are great for setting different values at compilation, but what if you +wanted to be able to dynamically update the fee percentage used by the program? +Let's make that possible by creating a Program Config account that allows us to +update the fee without upgrading the program. + +To begin, let's first update the `lib.rs` file to: + +1. Include a `SEED_PROGRAM_CONFIG` constant, which will be used to generate the + PDA for the program config account. +2. Include an `ADMIN` constant, which will be used as a constraint when + initializing the program config account. Run the `solana address` command to + get your address to use as the constant's value. +3. Include a `state` module that we'll implement shortly. +4. Include the `initialize_program_config` and `update_program_config` + instructions and calls to their "handlers," both of which we'll implement in + another step. + +```rust +use anchor_lang::prelude::*; +mod instructions; +use instructions::*; +mod state; + +declare_id!("FF3eGbZnharYruJNwRV7jqnDYvpLkyvgbSv5gsGbJHps"); + +#[cfg(not(feature = "local-testing"))] +#[constant] +pub const USDC_MINT_PUBKEY: Pubkey = pubkey!("EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v"); + +#[cfg(feature = "local-testing")] +#[constant] +pub const USDC_MINT_PUBKEY: Pubkey = pubkey!("envYcAnc9BvWEqDy4VKJsiECCbbc72Fynz87rBih6DV"); + +pub const SEED_PROGRAM_CONFIG: &[u8] = b"program_config"; + +#[constant] +pub const ADMIN: Pubkey = pubkey!("GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM"); + +#[program] +pub mod config { + + use super::*; + + pub fn payment(ctx: Context, amount: u64) -> Result<()> { + instructions::payment_handler(ctx, amount) + } + + pub fn initialize_program_config(ctx: Context) -> Result<()> { + instructions::initialize_program_config_handler(ctx) + } + + pub fn update_program_config(ctx: Context, new_fee: u64) -> Result<()> { + instructions::update_program_config_handler(ctx, new_fee) + } +} +``` + +### 5. Program Config State + +Next, let's define the structure for the `ProgramConfig` state. This account +will store the admin, the token account where fees are sent, and the fee rate. +We'll also specify the number of bytes required to store this structure. + +Create a new file called `state.rs` in the `/src` directory and add the +following code. + +```rust +use anchor_lang::prelude::*; + +#[account] +#[derive(InitSpace)] +pub struct ProgramConfig { + pub admin: Pubkey, + pub fee_destination: Pubkey, + pub fee_basis_points: u64, +} +``` + +### 6. Add Initialize Program Config Account Instruction + +Now let's create the instruction logic for initializing the program config +account. It should only be callable by a transaction signed by the `ADMIN` key +and should set all the properties on the `ProgramConfig` account. + +Create a folder called `program_config` at the path +`/src/instructions/program_config`. This folder will store all instructions +related to the program config account. + +Within the `program_config` folder, create a file called +`initialize_program_config.rs` and add the following code. + +```rust +use crate::state::ProgramConfig; +use crate::{ADMIN, SEED_PROGRAM_CONFIG, USDC_MINT_PUBKEY}; +use anchor_lang::prelude::*; +use anchor_spl::token::TokenAccount; + +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[derive(Accounts)] +pub struct InitializeProgramConfig<'info> { + #[account( + init, + seeds = [SEED_PROGRAM_CONFIG], + bump, + payer = authority, + space = DISCRIMINATOR_SIZE + ProgramConfig::INIT_SPACE + )] + pub program_config: Account<'info, ProgramConfig>, + #[account(token::mint = USDC_MINT_PUBKEY)] + pub fee_destination: Account<'info, TokenAccount>, + #[account(mut, address = ADMIN)] + pub authority: Signer<'info>, + pub system_program: Program<'info, System>, +} + +pub fn initialize_program_config_handler(ctx: Context) -> Result<()> { + ctx.accounts.program_config.set_inner(ProgramConfig { + admin: ctx.accounts.authority.key(), + fee_destination: ctx.accounts.fee_destination.key(), + fee_basis_points: 100, + }); + Ok(()) +} +``` + +### 7. Add Update Program Config Fee Instruction + +Next, implement the instruction logic for updating the config account. The +instruction should require that the signer match the `admin` stored in the +`program_config` account. + +Within the `program_config` folder, create a file called +`update_program_config.rs` and add the following code. + +```rust +use crate::state::ProgramConfig; +use crate::{SEED_PROGRAM_CONFIG, USDC_MINT_PUBKEY}; +use anchor_lang::prelude::*; +use anchor_spl::token::TokenAccount; + +#[derive(Accounts)] +pub struct UpdateProgramConfig<'info> { + #[account(mut, seeds = [SEED_PROGRAM_CONFIG], bump)] + pub program_config: Account<'info, ProgramConfig>, + #[account(token::mint = USDC_MINT_PUBKEY)] + pub fee_destination: Account<'info, TokenAccount>, + #[account(mut, address = program_config.admin)] + pub admin: Signer<'info>, + /// CHECK: arbitrarily assigned by existing admin + pub new_admin: UncheckedAccount<'info>, +} + +pub fn update_program_config_handler( + ctx: Context, + new_fee: u64, +) -> Result<()> { + ctx.accounts.program_config.admin = ctx.accounts.new_admin.key(); + ctx.accounts.program_config.fee_destination = ctx.accounts.fee_destination.key(); + ctx.accounts.program_config.fee_basis_points = new_fee; + Ok(()) +} +``` + +### 8. Add mod.rs and update instructions.rs + +Next, let's expose the instruction handlers we created so that the call from +`lib.rs` doesn't show an error. Start by adding a file `mod.rs` in the +`program_config` folder. Add the code below to make the two modules, +`initialize_program_config` and `update_program_config` accessible. + +```rust +mod initialize_program_config; +pub use initialize_program_config::*; + +mod update_program_config; +pub use update_program_config::*; +``` + +Now, update `instructions.rs` at the path `/src/instructions.rs`. Add the code +below to make the two modules, `program_config` and `payment` accessible. + +```rust +mod program_config; +pub use program_config::*; + +mod payment; +pub use payment::*; +``` + +### 9. Update Payment Instruction + +Lastly, let's update the payment instruction to check that the `fee_destination` +account in the instruction matches the `fee_destination` stored in the program +config account. Then update the instruction's fee calculation to be based on the +`fee_basis_point` stored in the program config account. + +```rust +use crate::state::ProgramConfig; +use crate::{SEED_PROGRAM_CONFIG, USDC_MINT_PUBKEY}; +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Token, TokenAccount}; + +#[derive(Accounts)] +pub struct Payment<'info> { + #[account( + seeds = [SEED_PROGRAM_CONFIG], + bump, + has_one = fee_destination + )] + pub program_config: Account<'info, ProgramConfig>, + #[account(mut, token::mint = USDC_MINT_PUBKEY)] + pub fee_destination: Account<'info, TokenAccount>, + #[account(mut, token::mint = USDC_MINT_PUBKEY)] + pub sender_token_account: Account<'info, TokenAccount>, + #[account(mut, token::mint = USDC_MINT_PUBKEY)] + pub receiver_token_account: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, + #[account(mut)] + pub sender: Signer<'info>, +} + +pub fn payment_handler(ctx: Context, amount: u64) -> Result<()> { + let fee_amount = amount + .checked_mul(ctx.accounts.program_config.fee_basis_points) + .ok_or(ProgramError::ArithmeticOverflow)? + .checked_div(10000) + .ok_or(ProgramError::ArithmeticOverflow)?; + let remaining_amount = amount.checked_sub(fee_amount).ok_or(ProgramError::ArithmeticOverflow)?; + + msg!("Amount: {}", amount); + msg!("Fee Amount: {}", fee_amount); + msg!("Remaining Transfer Amount: {}", remaining_amount); + + token::transfer( + CpiContext::new( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.sender_token_account.to_account_info(), + authority: ctx.accounts.sender.to_account_info(), + to: ctx.accounts.fee_destination.to_account_info(), + }, + ), + fee_amount, + )?; + + token::transfer( + CpiContext::new( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.sender_token_account.to_account_info(), + authority: ctx.accounts.sender.to_account_info(), + to: ctx.accounts.receiver_token_account.to_account_info(), + }, + ), + remaining_amount, + )?; + + Ok(()) +} +``` + +### 10. Test + +Now that we're done implementing our new program configuration struct and +instructions, let's move on to testing our updated program. To begin, add the +PDA for the program config account to the test file. + +```typescript +describe("Config", () => { + ... + const programConfig = findProgramAddressSync( + [Buffer.from("program_config")], + program.programId + )[0] +... +``` + +Next, update the test file with three more tests testing that: + +1. The program config account is initialized correctly +2. The payment instruction is functioning as intended +3. The config account can be updated successfully by the admin +4. The config account cannot be updated by someone other than the admin + +The first test initializes the program config account and verifies that the +correct fee is set and that the correct admin is stored on the program config +account. + +```typescript +it("initializes program config account", async () => { + try { + await program.methods + .initializeProgramConfig() + .accounts({ + programConfig: programConfig, + feeDestination: feeDestination, + authority: walletAuthority.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + const configAccount = + await program.account.programConfig.fetch(programConfig); + expect(configAccount.feeBasisPoints.toNumber()).to.equal( + INITIAL_FEE_BASIS_POINTS, + ); + expect(configAccount.admin.toString()).to.equal( + walletAuthority.publicKey.toString(), + ); + } catch (error) { + console.error("Program config initialization failed:", error); + throw error; + } +}); +``` + +The second test verifies that the payment instruction is working correctly, with +the fee being sent to the fee destination and the remaining balance being +transferred to the receiver. Here we update the existing test to include the +`programConfig` account. + +```typescript +it("completes payment successfully", async () => { + try { + const transaction = await program.methods + .payment(new anchor.BN(PAYMENT_AMOUNT)) + .accounts({ + programConfig: programConfig, + feeDestination: feeDestination, + senderTokenAccount: senderTokenAccount, + receiverTokenAccount: receiverTokenAccount, + sender: sender.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(connection, transaction, [ + sender, + ]); + + const senderBalance = await getAccount(connection, senderTokenAccount); + const feeDestinationBalance = await getAccount(connection, feeDestination); + const receiverBalance = await getAccount(connection, receiverTokenAccount); + + expect(Number(senderBalance.amount)).to.equal(0); + expect(Number(feeDestinationBalance.amount)).to.equal( + (PAYMENT_AMOUNT * INITIAL_FEE_BASIS_POINTS) / 10000, + ); + expect(Number(receiverBalance.amount)).to.equal( + (PAYMENT_AMOUNT * (10000 - INITIAL_FEE_BASIS_POINTS)) / 10000, + ); + } catch (error) { + console.error("Payment failed:", error); + throw error; + } +}); +``` + +The third test attempts to update the fee on the program config account, which +should be successful. + +```typescript +it("updates program config account", async () => { + try { + await program.methods + .updateProgramConfig(new anchor.BN(UPDATED_FEE_BASIS_POINTS)) + .accounts({ + programConfig: programConfig, + admin: walletAuthority.publicKey, + feeDestination: feeDestination, + newAdmin: walletAuthority.publicKey, + }) + .rpc(); + + const configAccount = + await program.account.programConfig.fetch(programConfig); + expect(configAccount.feeBasisPoints.toNumber()).to.equal( + UPDATED_FEE_BASIS_POINTS, + ); + } catch (error) { + console.error("Program config update failed:", error); + throw error; + } +}); +``` + +The fourth test tries to update the fee on the program config account, where the +admin is not the one stored on the program config account, and this should fail. + +```typescript +it("fails to update program config account with unauthorized admin", async () => { + try { + const transaction = await program.methods + .updateProgramConfig(new anchor.BN(300)) + .accounts({ + programConfig: programConfig, + admin: sender.publicKey, + feeDestination: feeDestination, + newAdmin: sender.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(connection, transaction, [ + sender, + ]); + throw new Error("Expected transaction to fail, but it succeeded"); + } catch (error) { + expect(error).to.exist; + console.log("Transaction failed as expected:", error.message); + } +}); +``` + +Finally, run the test using the following command: + +```shell +anchor test --skip-deploy -- --features "local-testing" +``` + +You should see the following output: + +```shell +Config + ✔ initializes program config account (430ms) + ✔ completes payment successfully (438ms) + ✔ updates program config account (416ms) +Transaction failed as expected: Simulation failed. +Message: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x7dc. +Logs: +[ + "Program FF3eGbZnharYruJNwRV7jqnDYvpLkyvgbSv5gsGbJHps invoke [1]", + "Program log: Instruction: UpdateProgramConfig", + "Program log: AnchorError caused by account: admin. Error Code: ConstraintAddress. Error Number: 2012. Error Message: An address constraint was violated.", + "Program log: Left:", + "Program log: F32dEMPn4BtQjHBgXXwfuEMo5qBQJySs8cCDrtwWQdBr", + "Program log: Right:", + "Program log: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM", + "Program FF3eGbZnharYruJNwRV7jqnDYvpLkyvgbSv5gsGbJHps consumed 7868 of 200000 compute units", + "Program FF3eGbZnharYruJNwRV7jqnDYvpLkyvgbSv5gsGbJHps failed: custom program error: 0x7dc" +]. +Catch the `SendTransactionError` and call `getLogs()` on it for full details. + ✔ fails to update program config account with unauthorized admin + + + 4 passing (22s) +``` + +And that's it! You've made the program a lot easier to work with moving forward. +If you want to take a look at the final solution code you can find it on +the [`solution` branch of the same](https://github.com/solana-developers/admin-instructions/tree/solution). + +## Challenge + +Now it's time for you to do some of this on your own. We mentioned being able to +use the program's upgrade authority as the initial admin. Go ahead and update +the lab's `initialize_program_config` so that only the upgrade authority can +call it rather than having a hardcoded `ADMIN`. + +Try doing this on your own, but if you get stuck, feel free to reference the +[`challenge` branch of the same repository](https://github.com/solana-developers/admin-instructions/tree/challenge) +to see one possible solution. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=02a7dab7-d9c1-495b-928c-a4412006ec20)! + + diff --git a/content/courses/program-optimization/rust-macros.mdx b/content/courses/program-optimization/rust-macros.mdx new file mode 100644 index 000000000..4f46660a1 --- /dev/null +++ b/content/courses/program-optimization/rust-macros.mdx @@ -0,0 +1,1128 @@ +--- +title: Rust Procedural Macros +objectives: + - Create and use **Procedural Macros** in Rust + - Explain and work with a Rust Abstract Syntax Tree (AST) + - Describe how procedural macros are used in the Anchor framework +description: "Use Rust macros to generate code at compile time." +--- + +## Summary + +- **Procedural macros** are a special kind of Rust macro that allows the + programmer to generate code at compile time based on custom input. +- In the Anchor framework, procedural macros generate code that reduces the + boilerplate required when writing Solana programs. +- An **Abstract Syntax Tree (AST)** represents the syntax and structure of the + input code that is passed to a procedural macro. When creating a macro, you + use elements of the AST, like tokens and items, to generate the appropriate + code. +- A **Token** is the smallest source code unit that the Rust compiler can parse. +- An **Item** is a declaration that defines something that can be used in a Rust + program, such as a struct, an enum, a trait, a function, or a method. +- A **TokenStream** is a sequence of tokens representing a piece of source code. + It can be passed to a procedural macro, allowing it to access and manipulate + the individual tokens in the code. + +## Lesson + +In Rust, a macro is a piece of code you can write once and then "expand" to +generate code at compile time. This code generation can be helpful when you need +to generate repetitive or complex code or when you want to use the same code in +multiple places in your program. + +There are two different types of macros: declarative macros and procedural +macros. + +- Declarative macros are defined using the `macro_rules!` macro, which allows + you to match against code patterns and generate code based on the matching + pattern. +- Procedural macros in Rust are defined using Rust code and operate on the + abstract syntax tree (AST) of the input TokenStream, which allows them to + manipulate and generate code at a finer level of detail. + +This lesson will focus on procedural macros, which are standard in the Anchor +framework. + +### Rust concepts + +Before we discuss macros specifically, let's review some of the important +terminology, concepts, and tools we'll use throughout the lesson. + +### Token + +In Rust programming, a [token](https://doc.rust-lang.org/reference/tokens.html) +is an essential element of the language syntax, like an identifier or literal +value. Tokens represent the smallest unit of source code recognized by the Rust +compiler, and they are used to build more complex expressions and statements in +a program. + +Examples of Rust tokens include: + +- [Keywords](https://doc.rust-lang.org/reference/keywords.html), such as `fn`, + `let`, and `match`, are reserved words in the Rust language with special + meanings. +- [Identifiers](https://doc.rust-lang.org/reference/identifiers.html), such as + variable and function names, refer to values and functions. +- [Punctuation](https://doc.rust-lang.org/reference/tokens.html#punctuation) + marks, such as `{`, `}`, and `;`, are used to structure and delimit blocks of + code. +- [Literals](https://doc.rust-lang.org/reference/tokens.html#literals), such as + numbers and strings, represent constant values in a Rust program. + +You can +[read more about Rust tokens](https://doc.rust-lang.org/reference/tokens.html). + +### Item + +Items are named self-contained pieces of code in Rust. They provide a way to +group related code and give it a name by which the group can be referenced, +allowing you to reuse and organize your code modularly. + +There are several different kinds of items, such as: + +- Functions +- Structs +- Enums +- Traits +- Modules +- Macros + +You can +[read more about Rust items](https://doc.rust-lang.org/reference/items.html). + +### Token Streams + +The `TokenStream` data type represents a sequence of tokens. It is defined in +the `proc_macro` crate and is surfaced so that macros can be written based on +other code in the codebase. + +When defining a procedural macro, the macro input is passed to the macro as a +`TokenStream`, which can then be parsed and transformed. The resulting +`TokenStream` can then be expanded into the final code output by the macro. + +```rust +use proc_macro::TokenStream; + +#[proc_macro] +pub fn my_macro(input: TokenStream) -> TokenStream { + ... +} +``` + +### Abstract syntax tree + +In a Rust procedural macro context, an abstract syntax tree (AST) is a data +structure that represents the hierarchical structure of the input tokens and +their meaning in the Rust language. It's typically used as an intermediate +representation of the input that can be quickly processed and transformed by the +procedural macro. + +The macro can use the AST to analyze the input code and make changes to it, such +as adding or removing tokens or transforming the meaning of the code. It can +then use this transformed AST to generate new code, which can be returned as the +output of the proc macro. + +### The `syn` crate + +The `syn` crate is available to help parse a token stream into an AST that macro +code can traverse and manipulate. When a procedural macro is invoked in a Rust +program, the macro function is called with a token stream as the input. Parsing +this input is the first step to virtually any macro. + +Take as an example a proc macro that you invoke using `my_macro!` as follows: + +```rust +my_macro!("hello, world"); +``` + +When the above code is executed, the Rust compiler passes the input tokens +(`"hello, world"`) as a `TokenStream` to the `my_macro` proc macro. + +```rust +use proc_macro::TokenStream; +use syn::parse_macro_input; + +#[proc_macro] +pub fn my_macro(input: TokenStream) -> TokenStream { + let ast = parse_macro_input!(input as syn::LitStr); + eprintln!("{:#?}", ast.token()); + ... +} +``` + +Inside the proc macro, the code uses the `parse_macro_input!` macro from the +`syn` crate to parse the input `TokenStream` into an abstract syntax tree (AST). +Specifically, this example parses it as an instance of `LitStr` representing a +UTF-8 string literal in Rust. Call the `.token()` method to return a +[Literal](https://docs.rs/proc-macro2/1.0.86/proc_macro2/struct.Literal.html) +that we pass to the `eprintln!` to print the AST for debugging purposes. + +```rust +Literal { + kind: Str, + symbol: "hello, world", + suffix: None, + // Shows the byte offsets 31 to 45 of the literal "hello, world" + // in the portion of the source code from which the `TokenStream` was parsed. + span: #0 bytes(31..45), +} +``` + +The output of the `eprintln!` macro shows the structure of the `Literal` AST +that was generated from the input tokens. It shows the string literal value +(`"hello, world"`) and other metadata about the token, such as its kind (`Str`), +suffix (`None`), and span. + +### The `quote` crate + +Another important crate is the `quote` crate, which is pivotal in the code +generation portion of the macro. + +Once a proc macro has finished analyzing and transforming the AST, it can use +the `quote` crate or a similar code generation library to convert it back into a +token stream. After that, it returns the `TokenStream`, which the Rust compiler +uses to replace the original stream in the source code. + +Take the below example of `my_macro`: + +```rust +use proc_macro::TokenStream; +use syn::parse_macro_input; +use quote::quote; + +#[proc_macro] +pub fn my_macro(input: TokenStream) -> TokenStream { + let ast = parse_macro_input!(input as syn::LitStr); + eprintln!("{:#?}", ast.token()); + let expanded = quote! {println!("The input is: {}", #ast)}; + expanded.into() +} +``` + +This example uses the `quote!` macro to generate a new `TokenStream` consisting +of a `println!` macro call with the `LitStr` AST as its argument. + +Note that the `quote!` macro generates a `TokenStream` of type +`proc_macro2::TokenStream`. To return this `TokenStream` to the Rust compiler, +use the `.into()` method to convert it to `proc_macro::TokenStream`. The Rust +compiler will then use this `TokenStream` to replace the original proc macro +call in the source code. + +```text +The input is: hello, world +``` + +Using procedural macros allows you to create procedural macros that perform +powerful code generation and metaprogramming tasks. + +### Procedural Macro + +Procedural macros in Rust are a powerful way to extend the language and create +custom syntax. These macros are written in Rust and compiled with the rest of +the code. There are three types of procedural macros: + +- Function-like macros - `custom!(...)` +- Derive macros - `#[derive(CustomDerive)]` +- Attribute macros - `#[CustomAttribute]` + +This section will discuss the three types of procedural macros and provide an +example implementation of one. Writing a procedural macro is consistent across +all three types, making this example adaptable to the other types. + +### Function-like macros + +Function-like procedural macros are the simplest of the three types of +procedural macros. These macros are defined using a function preceded by the +`#[proc_macro]` attribute. The function must take a `TokenStream` as input and +return a new `TokenStream` as output to replace the original code. + +```rust +#[proc_macro] +pub fn my_macro(input: TokenStream) -> TokenStream { + ... +} +``` + +These macros are invoked using the function's name followed by the `!` operator. +They can be used in various places in a Rust program, such as in expressions, +statements, and function definitions. + +```rust +my_macro!(input); +``` + +Function-like procedural macros are best suited for simple code generation tasks +that require only a single input and output stream. They are easy to understand +and use and provide a straightforward way to generate code at compile time. + +### Attribute macros + +Attribute macros define new attributes that are attached to items in a Rust +program, such as functions and structs. + +```rust +#[my_macro] +fn my_function() { + ... +} +``` + +Attribute macros are defined with a function preceded by the +`#[proc_macro_attribute]` attribute. The function requires two token streams as +input and returns a single `TokenStream` output that replaces the original item +with an arbitrary number of new items. + +```rust +#[proc_macro_attribute] +pub fn my_macro(attr: TokenStream, input: TokenStream) -> TokenStream { + ... +} +``` + +The first token stream input represents attribute arguments. The second token +stream is the rest of the item that the attribute is attached to, including any +other attributes that may be present. + +```rust +#[my_macro(arg1, arg2)] +fn my_function() { + ... +} +``` + +For example, an attribute macro could process the arguments passed to it to turn +certain features on or off and then use the second token stream to modify the +original item. With access to both token streams, attribute macros can provide +greater flexibility and functionality than using only a single token stream. + +### Derive macros + +Derive macros are invoked using the `#[derive]` attribute on a struct, enum, or +union. They are typically used to implement traits for the input types +automatically. + +```rust +#[derive(MyMacro)] +struct Input { + field: String +} +``` + +Derive macros are defined with a function preceded by the `#[proc_macro_derive]` +attribute. They're limited to generating code for structs, enums, and unions. +They take a single token stream as input and return a single token stream as +output. + +Unlike the other procedural macros, the returned token stream doesn't replace +the original code. Instead, it gets appended to the module or block to which the +original item belongs, allowing developers to extend the functionality of the +original item without modifying the original code. + +```rust +#[proc_macro_derive(MyMacro)] +pub fn my_macro(input: TokenStream) -> TokenStream { + ... +} +``` + +In addition to implementing traits, derive macros can define helper attributes. +Helper attributes can be used in the scope of the item to which the derive macro +is applied and customize the code generation process. + +```rust +#[proc_macro_derive(MyMacro, attributes(helper))] +pub fn my_macro(body: TokenStream) -> TokenStream { + ... +} +``` + +Helper attributes are inert, which means they have no effect on their own. Their +only purpose is to be used as input to the derive macro that defined them. + +```rust +#[derive(MyMacro)] +struct Input { + #[helper] + field: String +} +``` + +For example, a derive macro could define a helper attribute to perform +additional operations depending on its presence, allowing developers to extend +the functionality of derive macros and customize the code they generate more +flexibly. + +### Example of a procedural macro + +This example shows how to use a derive procedural macro to automatically +generate an implementation of a `describe()` method for a struct. + +```rust +use example_macro::Describe; + +#[derive(Describe)] +struct MyStruct { + my_string: String, + my_number: u64, +} + +fn main() { + MyStruct::describe(); +} +``` + +The `describe()` method will print a description of the struct's fields to the +console. + +```text +MyStruct is a struct with these named fields: my_string, my_number. +``` + +The first step is to define the procedural macro using the +`#[proc_macro_derive]` attribute. To extract the struct's identifier and data, +the input `TokenStream` is parsed using the `parse_macro_input!()` macro. + +```rust +use proc_macro::{self, TokenStream}; +use quote::quote; +use syn::{parse_macro_input, DeriveInput, FieldsNamed}; + +#[proc_macro_derive(Describe)] +pub fn describe_struct(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input); + ... +} +``` + +The next step is to use the `match` keyword to perform pattern matching on the +`data` value to extract the names of the fields in the struct. + +The first `match` has two arms: one for the `syn::Data::Struct` variant and one +for the "catch-all" `_` arm that handles all other variants of `syn::Data`. + +The second `match` has two arms as well: one for the `syn::Fields::Named` +variant, and one for the "catch-all" `_` arm that handles all other variants of +`syn::Fields`. + +The `#(#idents), *` syntax specifies that the `idents` iterator will be +"expanded" to create a comma-separated list of the elements in the iterator. + +```rust +use proc_macro::{self, TokenStream}; +use quote::quote; +use syn::{parse_macro_input, DeriveInput, FieldsNamed}; + +#[proc_macro_derive(Describe)] +pub fn describe_struct(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input); + + let field_names = match data { + syn::Data::Struct(s) => match s.fields { + syn::Fields::Named(FieldsNamed { named, .. }) => { + let idents = named.iter().map(|f| &f.ident); + format!( + "a struct with these named fields: {}", + quote! {#(#idents), *}, + ) + } + _ => panic!("The syn::Fields variant is not supported"), + }, + _ => panic!("The syn::Data variant is not supported"), + }; + ... +} +``` + +The last step implements a `describe()` method for a struct. The `expanded` +variable is defined using the `quote!` macro and the `impl` keyword to create an +implementation for the struct name stored in the `#ident` variable. + +This implementation defines the `describe()` method that uses the `println!` +macro to print the name of the struct and its field names. + +Finally, the `expanded` variable is converted into a `TokenStream` using the +`into()` method. + +```rust +use proc_macro::{self, TokenStream}; +use quote::quote; +use syn::{parse_macro_input, DeriveInput, FieldsNamed}; + +#[proc_macro_derive(Describe)] +pub fn describe(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input); + + let field_names = match data { + syn::Data::Struct(s) => match s.fields { + syn::Fields::Named(FieldsNamed { named, .. }) => { + let idents = named.iter().map(|f| &f.ident); + format!( + "a struct with these named fields: {}", + quote! {#(#idents), *}, + ) + } + _ => panic!("The syn::Fields variant is not supported"), + }, + _ => panic!("The syn::Data variant is not supported"), + }; + + let expanded = quote! { + impl #ident { + fn describe() { + println!("{} is {}.", stringify!(#ident), #field_names); + } + } + }; + + expanded.into() +} +``` + +Now, when the `#[derive(Describe)]` attribute is added to a struct, the Rust +compiler automatically generates an implementation of the `describe()` method +that can be called to print the name of the struct and the names of its fields. + +```rust +#[derive(Describe)] +struct MyStruct { + my_string: String, + my_number: u64, +} +``` + +The `cargo expand` command from the `cargo-expand` crate can expand Rust code +that uses procedural macros. For example, the code for the `MyStruct` struct +generated using the `#[derive(Describe)]` attribute looks like this: + +```rust +struct MyStruct { + my_string: String, + my_number: f64, +} +impl MyStruct { + fn describe() { + { + ::std::io::_print( + ::core::fmt::Arguments::new_v1( + &["", " is ", ".\n"], + &[ + ::core::fmt::ArgumentV1::new_display(&"MyStruct"), + ::core::fmt::ArgumentV1::new_display( + &"a struct with these named fields: my_string, my_number", + ), + ], + ), + ); + }; + } +} +``` + +### Anchor procedural macros + +Procedural macros are the magic behind the Anchor library commonly used in +Solana development. Anchor macros allow for more concise code, standard security +checks, and more. Let's go through a few examples of how Anchor uses procedural +macros. + +### Function-like macro + +The `declare_id` macro shows how function-like macros are used in Anchor. This +macro takes in a string of characters representing a program's ID as input and +converts it into a `Pubkey` type that can be used in the Anchor program. + +```rust +declare_id!("G839pmstFmKKGEVXRGnauXxFgzucvELrzuyk6gHTiK7a"); +``` + +The `declare_id` macro is defined using the `#[proc_macro]` attribute, +indicating that it's a function-like proc macro. + +```rust +#[proc_macro] +pub fn declare_id(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let address = input.clone().to_string(); + + let id = parse_macro_input!(input as id::Id); + let ret = quote! { #id }; + ... + let idl_print = anchor_syn::idl::gen_idl_print_fn_address(address); + return proc_macro::TokenStream::from(quote! { + #ret + #idl_print + }); + ... +} +``` + +### Derive macro + +The `#[derive(Accounts)]` is an example of just one of many derive macros used +in Anchor. + +The `#[derive(Accounts)]` macro generates code that implements the `Accounts` +trait for the given struct. This trait does several things, including validating +and deserializing the accounts passed into an instruction, allowing the struct +to be used as a list of accounts required by an instruction in an Anchor +program. + +Any constraints specified on fields by the `#[account(..)]` attribute are +applied during deserialization. The `#[instruction(..)]` attribute can also be +added to specify the instruction's arguments and make them accessible to the +macro. + +```rust +#[derive(Accounts)] +#[instruction(input: String)] +pub struct Initialize<'info> { + #[account(init, payer = payer, space = MyData::DISCRIMINATOR.len() + MyData::INIT_SPACE + input.len())] + pub data_account: Account<'info, MyData>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +This macro is defined using the `proc_macro_derive` attribute, which allows it +to be used as a derive macro that can be applied to a struct. The line +`#[proc_macro_derive(Accounts, attributes(account, instruction))]` indicates +that this is a derive macro that processes `account` and `instruction` helper +attributes. + +The INIT_SPACE is used to calculate the initial size of an account. It is +implemented by derive macro on `MyData` automatically implementing the +[anchor_lang::Space](https://docs.rs/anchor-lang/latest/anchor_lang/trait.Space.html#associatedconstant.INIT_SPACE). + +```rust +#[account] +#[derive(InitSpace)] +pub struct NewAccount { + data: u64, +} +``` + +The `#[account]` macro also automatically derives the _DISCRIMINANT_ of an +anchor account which implements the +[anchor_lang::Discriminator](https://docs.rs/anchor-lang/latest/anchor_lang/trait.Discriminator.html) +trait. This trait exposes an array of 8 bytes containing the discriminator, +which can be exposed using `NewAccount::DISCRIMINATOR`. Calling the `.len()` on +this array of 8 bytes gives us the length of the discriminator; + +```rust +#[proc_macro_derive(Accounts, attributes(account, instruction))] +pub fn derive_anchor_deserialize(item: TokenStream) -> TokenStream { + parse_macro_input!(item as anchor_syn::AccountsStruct) + .to_token_stream() + .into() +} +``` + +### Attribute macro `#[program]` + +The `#[program]` attribute macro is an example of an attribute macro used in +Anchor to define the module containing instruction handlers for a Solana +program. + +```rust +#[program] +pub mod my_program { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + ... + } +} +``` + +In this case, the `#[program]` attribute is applied to a module to specify that +it contains instruction handlers for a Solana program. + +```rust +#[proc_macro_attribute] +pub fn program( + _args: proc_macro::TokenStream, + input: proc_macro::TokenStream, +) -> proc_macro::TokenStream { + parse_macro_input!(input as anchor_syn::Program) + .to_token_stream() + .into() +} +``` + +Overall, using proc macros in Anchor dramatically reduces the repetitive code +that Solana developers have to write. By reducing the boilerplate code, +developers can focus on their program's core functionality and avoid mistakes +caused by manual repetition, resulting in a faster and more efficient +development process. + +## Lab + +Let's practice this by creating a new derive macro! Our new macro will let us +automatically generate instruction logic for updating each field on an account +in an Anchor program. + +### 1. Starter + +To get started, download the starter code from the `starter` branch of +[the `anchor-custom-macro` repository](https://github.com/solana-developers/anchor-custom-macro/tree/starter). + +The starter code includes a simple Anchor program that allows you to initialize +and update a `Config` account, similar to what we did with the +[Program Configuration lesson](/developers/courses/program-optimization/program-configuration). + +The account in question is structured as follows: + +```rust +use anchor_lang::{Discriminator, prelude::*}; + +#[account] +#[derive(InitSpace)] +pub struct Config { + pub auth: Pubkey, + pub bool: bool, + pub first_number: u8, + pub second_number: u64, +} + +impl Config { + pub const LEN: usize = Config::DISCRIMINATOR.len() + Config::INIT_SPACE; +} +``` + +The `programs/admin/src/lib.rs` file contains the program entrypoint with the +definitions of the program's instructions. Currently, the program has +instructions to initialize this account and then one instruction per account +field for updating the field. + +The `programs/admin/src/admin_config` directory contains the program's +instruction logic and state. Take a look through each of these files. You'll +notice that the instruction logic for each field is duplicated for each +instruction. + +The goal of this lab is to implement a procedural macro that will allow us to +replace all of the instruction logic functions and automatically generate +functions for each instruction. + +### 2. Set up the custom macro declaration + +Let's get started by creating a separate crate for our custom macro. Run +`cargo new- lib custom-macro` in the project's root directory. The command +creates a new `custom-macro` directory with its own `Cargo.toml`. Update the new +`Cargo.toml` file to be the following: + +```text +[package] +name = "custom-macro" +version = "0.1.0" +edition = "2021" + +[lib] +proc-macro = true + +[dependencies] +syn = "2.0.77" +quote = "1.0.73" +proc-macro2 = "1.0.86" +anchor-lang.workspace = true +``` + +The `proc-macro = true` line defines this crate as containing a procedural +macro. The dependencies are all crates we'll use to create our derive macro. + +Next, update the project root's `Cargo.toml` file's `members` field to include +`"custom-macro"`: + +```text +[workspace] +members = [ + "programs/*", + "custom-macro" +] + +[workspace.dependencies] +anchor-lang = "0.30.1" +``` + +The `[workspace.dependencies]` has _anchor-lang_ as a dependency, which allows +us to define the version of _anchor-lang_ in the root project configuration and +then inherit that version in all other members of the workspace that depend on +it, by registering `.workspace = true`, like the _custom-macro_ +crate and _custom-macro-test_ crate which will be defined next. + +Now, our crate is set up and ready to go. But before we move on, let's create +one more crate at the root level that we can use to test out our macro as we +create it. Use `cargo new custom-macro-test` at the project root. Then update +the newly created `Cargo.toml` to add `anchor-lang` and the `custom-macro` +crates as dependencies: + +```text +[package] +name = "custom-macro-test" +version = "0.1.0" +edition = "2021" + +[dependencies] +anchor-lang.workspace = true +custom-macro = { path = "../custom-macro" } +``` + +Next, update the root project's `Cargo.toml` to include the new +`custom-macro-test` crate as before: + +```text +[workspace] +members = [ + "programs/*", + "custom-macro", + "custom-macro-test" +] +``` + +Finally, replace the code in `custom-macro-test/src/main.rs` with the following +code. We'll use this later for testing: + +```rust +use anchor_lang::prelude::*; +use custom_macro::InstructionBuilder; + +#[derive(InstructionBuilder)] +pub struct Config { + pub auth: Pubkey, + pub bool: bool, + pub first_number: u8, + pub second_number: u64, +} +``` + +### 3. Define the custom macro + +Now, in the `custom-macro/src/lib.rs` file, let's add our new macro's +declaration. In this file, we'll use the `parse_macro_input!` macro to parse the +input `TokenStream` and extract the `ident` and `data` fields from a +`DeriveInput` struct. Then, we'll use the `eprintln!` macro to print the values +of `ident` and `data`. We will now use `TokenStream::new()` to return an empty +`TokenStream`. + +```rust +use proc_macro::TokenStream; +use quote::*; +use syn::*; + +#[proc_macro_derive(InstructionBuilder)] +pub fn instruction_builder(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input); + + eprintln! ("{:#?}", ident); + eprintln! ("{:#?}", data); + + TokenStream::new() +} +``` + +Let's test what this prints. To do this, you first need to install the +`cargo-expand` command by running `cargo install cargo-expand`. You'll also need +to install the nightly version of Rust by running `rustup install nightly`. + +Once you've done this, you can see the code output described above by navigating +to the `custom-macro-test` directory and running `cargo expand`. + +This command expands macros in the crate. Since the `main.rs` file uses the +newly created `InstructionBuilder` macro, this will print the syntax tree for +the `ident` and `data` of the struct to the console. Once you confirm that the +input `TokenStream` parses correctly, remove the `eprintln!` statements. + +### 4. Get the struct's fields + +Next, let's use `match` statements to get the named fields from the `data` of +the struct. Then we'll use the `eprintln!` macro to print the values of the +fields. + +```rust +use proc_macro::TokenStream; +use quote::*; +use syn::*; + +#[proc_macro_derive(InstructionBuilder)] +pub fn instruction_builder(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input); + + let fields = match data { + syn::Data::Struct(s) => match s.fields { + syn::Fields::Named(n) => n.named, + _ => panic!("The syn::Fields variant is not supported: {:#?}", s.fields), + }, + _ => panic!("The syn::Data variant is not supported: {:#?}", data), + }; + + eprintln! ("{:#?}", fields); + + TokenStream::new() +} +``` + +Once again, use `cargo expand` in the terminal to see the output of this code. +Once you have confirmed that the fields are being extracted and printed +correctly, you can remove the `eprintln!` statement. + +### 5. Build update instructions + +Next, let's iterate over the fields of the struct and generate an update +instruction for each field. The instruction will be generated using the `quote!` +macro, including the field's name and type and a new function name for the +update instruction. + +```rust +use proc_macro::TokenStream; +use quote::*; +use syn::*; + +#[proc_macro_derive(InstructionBuilder)] +pub fn instruction_builder(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input); + + let fields = match data { + syn::Data::Struct(s) => match s.fields { + syn::Fields::Named(n) => n.named, + _ => panic!("The syn::Fields variant is not supported: {:#?}", s.fields), + }, + _ => panic!("The syn::Data variant is not supported: {:#?}", data), + }; + + let update_instruction = fields.into_iter().map(|f| { + let name = &f.ident; + let ty = &f.ty; + let fname = format_ident!("update_{}", name.clone().unwrap()); + + quote! { + pub fn #fname(ctx: Context, new_value: #ty) -> Result<()> { + let admin_account = &mut ctx.accounts.admin_account; + admin_account.#name = new_value; + Ok(()) + } + } + }); + + TokenStream::new() +} +``` + +### 6. Return new `TokenStream` + +Lastly, let's use the `quote!` macro to generate an implementation for the +struct with the name specified by the `ident` variable. The implementation +includes the update instructions generated for each field in the struct. The +generated code is then converted to a `TokenStream` using the `into()` method +and returned as the result of the macro. + +```rust +use proc_macro::TokenStream; +use quote::*; +use syn::*; + +#[proc_macro_derive(InstructionBuilder)] +pub fn instruction_builder(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input); + + let fields = match data { + syn::Data::Struct(s) => match s.fields { + syn::Fields::Named(n) => n.named, + _ => panic!("The syn::Fields variant is not supported: {:#?}", s.fields), + }, + _ => panic!("The syn::Data variant is not supported: {:#?}", data), + }; + + let update_instruction = fields.into_iter().map(|f| { + let name = &f.ident; + let ty = &f.ty; + let fname = format_ident!("update_{}", name.clone().unwrap()); + + quote! { + pub fn #fname(ctx: Context, new_value: #ty) -> Result<()> { + let admin_account = &mut ctx.accounts.admin_account; + admin_account.#name = new_value; + Ok(()) + } + } + }); + + let expanded = quote! { + impl #ident { + #(#update_instruction)* + } + }; + expanded.into() +} +``` + +To verify that the macro is generating the correct code, use the `cargo expand` +command to see the expanded form of the macro. The output of this looks like the +following: + +```rust +use anchor_lang::prelude::*; +use custom_macro::InstructionBuilder; +pub struct Config { + pub auth: Pubkey, + pub bool: bool, + pub first_number: u8, + pub second_number: u64, +} +impl Config { + pub fn update_auth( + ctx: Context, + new_value: Pubkey, + ) -> Result<()> { + let admin_account = &mut ctx.accounts.admin_account; + admin_account.auth = new_value; + Ok(()) + } + pub fn update_bool(ctx: Context, new_value: bool) -> Result<()> { + let admin_account = &mut ctx.accounts.admin_account; + admin_account.bool = new_value; + Ok(()) + } + pub fn update_first_number( + ctx: Context, + new_value: u8, + ) -> Result<()> { + let admin_account = &mut ctx.accounts.admin_account; + admin_account.first_number = new_value; + Ok(()) + } + pub fn update_second_number( + ctx: Context, + new_value: u64, + ) -> Result<()> { + let admin_account = &mut ctx.accounts.admin_account; + admin_account.second_number = new_value; + Ok(()) + } +} +``` + +### 7. Update the program to use your new macro + +To use the new macro to generate update instructions for the `Config` struct, +first add the `custom-macro` crate as a dependency to the program in its +`Cargo.toml`: + +```text +[dependencies] +anchor-lang.workspace = true +custom-macro = { path = "../../custom-macro" } +``` + +Then, navigate to the `state.rs` file in the Anchor program and update it with +the following code: + +```rust +use crate::admin_update::UpdateAdminAccount; +use anchor_lang::prelude::*; +use custom_macro::InstructionBuilder; + +#[derive(InstructionBuilder)] +#[account] +pub struct Config { + pub auth: Pubkey, + pub bool: bool, + pub first_number: u8, + pub second_number: u64, +} + +impl Config { + pub const LEN: usize = Config::DISCRIMINATOR.len() + Config::INIT_SPACE; +} +``` + +Next, navigate to the `admin_update.rs` file and delete the existing update +instructions, leaving only the `UpdateAdminAccount` context struct in the file. + +```rust +use crate::state::Config; +use anchor_lang::prelude::*; + +#[derive(Accounts)] +pub struct UpdateAdminAccount<'info> { + pub auth: Signer<'info>, + #[account( + mut, + has_one = auth, + )] + pub admin_account: Account<'info, Config>, +} +``` + +Next, update `lib.rs` in the Anchor program to use the update instructions +generated by the `InstructionBuilder` macro. + +```rust +use anchor_lang::prelude::*; +mod admin_config; +use admin_config::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod admin { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + Initialize::initialize(ctx) + } + + pub fn update_auth(ctx: Context, new_value: Pubkey) -> Result<()> { + Config::update_auth(ctx, new_value) + } + + pub fn update_bool(ctx: Context, new_value: bool) -> Result<()> { + Config::update_bool(ctx, new_value) + } + + pub fn update_first_number(ctx: Context, new_value: u8) -> Result<()> { + Config::update_first_number(ctx, new_value) + } + + pub fn update_second_number(ctx: Context, new_value: u64) -> Result<()> { + Config::update_second_number(ctx, new_value) + } +} +``` + +Lastly, navigate to the `admin` directory and run the `anchor test` to verify +that the update instructions generated by the `InstructionBuilder` macro are +working correctly. + +``` + admin + ✔ Is initialized! (160ms) + ✔ Update bool! (409ms) + ✔ Update u8! (403ms) + ✔ Update u64! (406ms) + ✔ Update Admin! (405ms) + + + 5 passing (2s) +``` + +Nice work! At this point, you can create procedural macros to help in your +development process. We encourage you to make the most of the Rust language and +use macros where they make sense. But even if you don't know how they work, it +helps you understand what's happening with Anchor under the hood. + +If you need more time with the solution code, reference the `solution` branch of +[the `anchor-custom-macro` repository](https://github.com/solana-developers/anchor-custom-macro/tree/solution). + +## Challenge + +To solidify what you've learned: Create another procedural macro. Think about +code you've written that could be reduced or improved by a macro, and try it +out! Since this is still practice, it's okay if it doesn't work out how you want +or expect. Just jump in and experiment! + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=eb892157-3014-4635-beac-f562af600bf8)! + + diff --git a/content/courses/program-security/account-data-matching.mdx b/content/courses/program-security/account-data-matching.mdx new file mode 100644 index 000000000..e8ac173af --- /dev/null +++ b/content/courses/program-security/account-data-matching.mdx @@ -0,0 +1,570 @@ +--- +title: Account Data Matching +objectives: + - Explain the security risks associated with missing data validation checks + - Implement data validation checks using long-form Rust + - Implement data validation checks using Anchor constraints +description: + "How to check your program's data accounts in both Anchor and Native Rust." +--- + +## Summary + +- Use **data validation checks** to verify that account data matches an expected + value. Without appropriate data validation checks, unexpected accounts may be + used in an instruction handler. +- To implement data validation checks in Rust, simply compare the data stored on + an account to an expected value. + + ```rust + if ctx.accounts.user.key() != ctx.accounts.user_data.user { + return Err(ProgramError::InvalidAccountData.into()); + } + ``` + +- In Anchor, you can use a + [`constraint`](https://www.anchor-lang.com/docs/account-constraints) to check + whether the given expression evaluates to true. Alternatively, you can use + `has_one` to check that a target account field stored on the account matches + the key of an account in the `Accounts` struct. + +## Lesson + +Account data matching refers to data validation checks used to verify the data +stored on an account matches an expected value. Data validation checks provide a +way to include additional constraints to ensure the appropriate accounts are +passed into an instruction handler. + +This can be useful when accounts required by an instruction handler have +dependencies on values stored in other accounts or if an instruction handler is +dependent on the data stored in an account. + +### Missing data validation check + +The example below includes an `update_admin` instruction handler that updates +the `admin` field stored on an `admin_config` account. + +The instruction handler is missing a data validation check to verify the `admin` +account signing the transaction matches the `admin` stored on the `admin_config` +account. This means any account signing the transaction and passed into the +instruction handler as the `admin` account can update the `admin_config` +account. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod data_validation { + use super::*; + ... + pub fn update_admin(ctx: Context) -> Result<()> { + ctx.accounts.admin_config.admin = ctx.accounts.new_admin.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct UpdateAdmin<'info> { + #[account(mut)] + pub admin_config: Account<'info, AdminConfig>, + #[account(mut)] + pub admin: Signer<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, +} + +#[account] +pub struct AdminConfig { + admin: Pubkey, +} +``` + +### Add Data Validation Check + +The basic Rust approach to solve this problem is to simply compare the passed in +`admin` key to the `admin` key stored in the `admin_config` account, throwing an +error if they don't match. + +```rust +if ctx.accounts.admin.key() != ctx.accounts.admin_config.admin { + return Err(ProgramError::InvalidAccountData.into()); +} +``` + +By adding a data validation check, the `update_admin` instruction handler would +only process if the `admin` signer of the transaction matched the `admin` stored +on the `admin_config` account. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod data_validation { + use super::*; + ... + pub fn update_admin(ctx: Context) -> Result<()> { + if ctx.accounts.admin.key() != ctx.accounts.admin_config.admin { + return Err(ProgramError::InvalidAccountData.into()); + } + ctx.accounts.admin_config.admin = ctx.accounts.new_admin.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct UpdateAdmin<'info> { + #[account(mut)] + pub admin_config: Account<'info, AdminConfig>, + #[account(mut)] + pub admin: Signer<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, +} + +#[account] +pub struct AdminConfig { + admin: Pubkey, +} +``` + +### Use Anchor Constraints + +Anchor simplifies this with the `has_one` constraint. You can use the `has_one` +constraint to move the data validation check from the instruction handler logic +to the `UpdateAdmin` struct. + +In the example below, `has_one = admin` specifies that the `admin` account +signing the transaction must match the `admin` field stored on the +`admin_config` account. To use the `has_one` constraint, the naming convention +of the data field on the account must be consistent with the naming on the +account validation struct. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod data_validation { + use super::*; + ... + pub fn update_admin(ctx: Context) -> Result<()> { + ctx.accounts.admin_config.admin = ctx.accounts.new_admin.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct UpdateAdmin<'info> { + #[account( + mut, + has_one = admin + )] + pub admin_config: Account<'info, AdminConfig>, + #[account(mut)] + pub admin: Signer<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, +} + +#[account] +pub struct AdminConfig { + admin: Pubkey, +} +``` + +Alternatively, you can use `constraint` to manually add an expression that must +evaluate to true in order for execution to continue. This is useful when for +some reason naming can't be consistent or when you need a more complex +expression to fully validate the incoming data. + +```rust +#[derive(Accounts)] +pub struct UpdateAdmin<'info> { + #[account( + mut, + constraint = admin_config.admin == admin.key() + )] + pub admin_config: Account<'info, AdminConfig>, + #[account(mut)] + pub admin: Signer<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, +} +``` + +## Lab + +For this lab, we'll create a simple “vault” program similar to the program we +used in the Signer Authorization lesson and the Owner Check lesson. Similar to +those labs, we'll show in this lab how a missing data validation check could +allow the vault to be drained. + +### 1. Starter + +To get started, download the starter code from the +[`starter` branch of this repository](https://github.com/solana-developers/account-data-matching/tree/starter). +The starter code includes a program with two instructions and the boilerplate +setup for the test file. + +The `initialize_vault` instruction handler initializes a new `Vault` account and +a new `TokenAccount`. The `Vault` account will store the address of a token +account, the authority of the vault, and a withdraw destination token account. + +The authority of the new token account will be set as the `vault`, a PDA of the +program. This allows the `vault` account to sign for the transfer of tokens from +the token account. + +The `insecure_withdraw` instruction handler transfers all the tokens in the +`vault` account's token account to a `withdraw_destination` token account. + + + +Notice that this instruction handler **does** have a signer check for +`authority` and an owner check for `vault`. However, nowhere in the account +validation or instruction handler logic is there code that checks that the +`authority` account passed into the instruction handler matches the `authority` +account on the `vault`. + + + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Mint, Token, TokenAccount}; + +declare_id!("J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu"); + +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod account_data_matching { + use super::*; + + pub fn initialize_vault(ctx: Context) -> Result<()> { + ctx.accounts.vault.token_account = ctx.accounts.token_account.key(); + ctx.accounts.vault.authority = ctx.accounts.authority.key(); + ctx.accounts.vault.withdraw_destination = ctx.accounts.withdraw_destination.key(); + Ok(()) + } + + pub fn insecure_withdraw(ctx: Context) -> Result<()> { + let amount = ctx.accounts.token_account.amount; + + let seeds = &[b"vault".as_ref(), &[ctx.bumps.vault]]; + let signer = [&seeds[..]]; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.vault.to_account_info(), + to: ctx.accounts.withdraw_destination.to_account_info(), + }, + &signer, + ); + + token::transfer(cpi_ctx, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InitializeVault<'info> { + #[account( + init, + payer = authority, + space = DISCRIMINATOR_SIZE + Vault::INIT_SPACE, + seeds = [b"vault"], + bump, + )] + pub vault: Account<'info, Vault>, + #[account( + init, + payer = authority, + token::mint = mint, + token::authority = vault, + seeds = [b"token"], + bump, + )] + pub token_account: Account<'info, TokenAccount>, + pub withdraw_destination: Account<'info, TokenAccount>, + pub mint: Account<'info, Mint>, + #[account(mut)] + pub authority: Signer<'info>, + pub token_program: Program<'info, Token>, + pub system_program: Program<'info, System>, + pub rent: Sysvar<'info, Rent>, +} + +#[derive(Accounts)] +pub struct InsecureWithdraw<'info> { + #[account( + seeds = [b"vault"], + bump, + )] + pub vault: Account<'info, Vault>, + #[account( + mut, + seeds = [b"token"], + bump, + )] + pub token_account: Account<'info, TokenAccount>, + #[account(mut)] + pub withdraw_destination: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, + pub authority: Signer<'info>, +} + +#[account] +#[derive(Default, InitSpace)] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, + withdraw_destination: Pubkey, +} +``` + +### 2. Test insecure_withdraw Instruction Handler + +To prove that this is a problem, let's write a test where an account other than +the vault's `authority` tries to withdraw from the vault. + +The test file includes the code to invoke the `initialize_vault` instruction +handler using the provider wallet as the `authority` and then mints 100 tokens +to the `vault` token account. + +Add a test to invoke the `insecure_withdraw` instruction handler. Use +`fakeWithdrawDestination` as the `withdrawDestination` account and `fakeWallet` +as the `authority`. Then send the transaction using `fakeWallet`. + +Since there are no checks the verify the `authority` account passed into the +instruction handler matches the values stored on the `vault` account initialized +in the first test, the instruction handler will process successfully and the +tokens will be transferred to the `fakeWithdrawDestination` account. + +```typescript +describe("Account Data Matching", () => { + ... + it("allows insecure withdrawal", async () => { + try { + const tx = await program.methods + .insecureWithdraw() + .accounts({ + vault: vaultPDA, + tokenAccount: tokenPDA, + withdrawDestination: fakeWithdrawDestination, + authority: fakeWallet.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(provider.connection, tx, [ + fakeWallet, + ]); + + const tokenAccount = await getAccount(provider.connection, tokenPDA); + expect(Number(tokenAccount.amount)).to.equal(0); + } catch (error) { + throw new Error( + `Insecure withdraw failed unexpectedly: ${error.message}`, + ); + } + }); +}) +``` + +Run `anchor test` to see that both transactions will complete successfully. + +```bash +Account Data Matching + ✔ initializes the vault and mints tokens (879ms) + ✔ allows insecure withdrawal (431ms) +``` + +### 3. Add secure_withdraw Instruction Handler + +Let's go implement a secure version of this instruction handler called +`secure_withdraw`. + +This instruction handler will be identical to the `insecure_withdraw` +instruction handler, except we'll use the `has_one` constraint in the account +validation struct (`SecureWithdraw`) to check that the `authority` account +passed into the instruction handler matches the `authority` account on the +`vault` account. That way only the correct authority account can withdraw the +vault's tokens. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Mint, Token, TokenAccount}; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod account_data_matching { + use super::*; + ... + pub fn secure_withdraw(ctx: Context) -> Result<()> { + let amount = ctx.accounts.token_account.amount; + + let seeds = &[b"vault".as_ref(), &[ctx.bumps.vault]]; + let signer = [&seeds[..]]; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.vault.to_account_info(), + to: ctx.accounts.withdraw_destination.to_account_info(), + }, + &signer, + ); + + token::transfer(cpi_ctx, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct SecureWithdraw<'info> { + #[account( + seeds = [b"vault"], + bump, + has_one = token_account, + has_one = authority, + has_one = withdraw_destination, + )] + pub vault: Account<'info, Vault>, + #[account( + mut, + seeds = [b"token"], + bump, + )] + pub token_account: Account<'info, TokenAccount>, + #[account(mut)] + pub withdraw_destination: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, + pub authority: Signer<'info>, +} +``` + +### 4. Test secure_withdraw Instruction Handler + +Now let's test the `secure_withdraw` instruction handler with two tests: one +that uses `fakeWallet` as the authority and one that uses `wallet` as the +authority. We expect the first invocation to return an error and the second to +succeed. + +```typescript +describe("account-data-matching", () => { + ... + it("prevents unauthorized secure withdrawal", async () => { + try { + const tx = await program.methods + .secureWithdraw() + .accounts({ + vault: vaultPDA, + tokenAccount: tokenPDA, + withdrawDestination: fakeWithdrawDestination, + authority: fakeWallet.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(provider.connection, tx, [ + fakeWallet, + ]); + + throw new Error("Secure withdraw should have failed but didn't"); + } catch (error) { + expect(error).to.be.an("error"); + console.log("Expected error occurred:", error.message); + } + }); + + it("allows secure withdrawal by authorized user", async () => { + try { + await new Promise((resolve) => setTimeout(resolve, 1000)); + + await mintTo( + provider.connection, + wallet.payer, + mint, + tokenPDA, + wallet.payer, + 100, + ); + + await program.methods + .secureWithdraw() + .accounts({ + vault: vaultPDA, + tokenAccount: tokenPDA, + withdrawDestination, + authority: wallet.publicKey, + }) + .rpc(); + + const tokenAccount = await getAccount(provider.connection, tokenPDA); + expect(Number(tokenAccount.amount)).to.equal(0); + } catch (error) { + throw new Error(`Secure withdraw failed unexpectedly: ${error.message}`); + } + }); +}) +``` + +Run `anchor test` to see that the transaction using an incorrect authority +account will now return an Anchor Error while the transaction using the correct +accounts complete successfully. + +```bash +"Program J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu invoke [1]", +"Program log: Instruction: SecureWithdraw", +"Program log: AnchorError caused by account: vault. Error Code: ConstraintHasOne. Error Number: 2001. Error Message: A has one constraint was violated.", +"Program log: Left:", +"Program log: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM", +"Program log: Right:", +"Program log: 2jTDDwaPzbpG2oFnnqtuHJpiS9k9dDVqzzfA2ofcqfFS", +"Program J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu consumed 11790 of 200000 compute units", +"Program J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu failed: custom program error: 0x7d1" +``` + +Note that Anchor specifies in the logs the account that causes the error +(`AnchorError caused by account: vault`). + +```bash +✔ prevents unauthorized secure withdrawal +✔ allows secure withdrawal by authorized user (1713ms) +``` + +And just like that, you've closed up the security loophole. The theme across +most of these potential exploits is that they're quite simple. However, as your +programs grow in scope and complexity, it becomes increasingly easy to miss +possible exploits. It's great to get in a habit of writing tests that send +instructions that _shouldn't_ work. The more the better. That way you catch +problems before you deploy. + +If you want to take a look at the final solution code you can find it on the +[`solution` branch of the repository](https://github.com/solana-developers/account-data-matching/tree/solution). + +## Challenge + +Just as with other lessons in this unit, your opportunity to practice avoiding +this security exploit lies in auditing your own or other programs. + +Take some time to review at least one program and ensure that proper data checks +are in place to avoid security exploits. + +Remember, if you find a bug or exploit in somebody else's program, please alert +them! If you find one in your own program, be sure to patch it right away. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=a107787e-ad33-42bb-96b3-0592efc1b92f)! + + diff --git a/content/courses/program-security/arbitrary-cpi.mdx b/content/courses/program-security/arbitrary-cpi.mdx new file mode 100644 index 000000000..dd239ac3c --- /dev/null +++ b/content/courses/program-security/arbitrary-cpi.mdx @@ -0,0 +1,476 @@ +--- +title: Arbitrary CPI +objectives: + - Explain the security risks associated with invoking a CPI to an unknown + program + - Showcase how Anchor's CPI module prevents this from happening when making a + CPI from one Anchor program to another + - Safely and securely make a CPI from an Anchor program to an arbitrary + non-anchor program +description: "How to safely invoke Solana programs from other Solana programs." +--- + +## Summary + +- To generate a CPI, the target program must be passed into the invoking + instruction handler as an account. This means that any target program could be + passed into the instruction handler. Your program should check for incorrect + or unexpected programs. +- Perform program checks in native programs by simply comparing the public key + of the passed-in program to the program you expected. +- If a program is written in Anchor, then it may have a publicly available CPI + module. This makes invoking the program from another Anchor program simple and + secure. The Anchor CPI module automatically checks that the address of the + program passed in matches the address of the program stored in the module. + +## Lesson + +A cross program invocation (CPI) is when one program invokes an instruction +handler on another program. An “arbitrary CPI” is when a program is structured +to issue a CPI to whatever program is passed into the instruction handler rather +than expecting to perform a CPI to one specific program. Given that the callers +of your program's instruction handler can pass any program they'd like into the +instruction's list of accounts, failing to verify the address of a passed-in +program results in your program performing CPIs to arbitrary programs. + +This lack of program checks creates an opportunity for a malicious user to pass +in a different program than expected, causing the original program to call an +instruction handler on this mystery program. There's no telling what the +consequences of this CPI could be. It depends on the program logic (both that of +the original program and the unexpected program), as well as what other accounts +are passed into the original instruction handler. + +### Missing Program Checks + +Take the following program as an example. The `cpi` instruction handler invokes +the `transfer` instruction handler on `token_program`, but there is no code that +checks whether or not the `token_program` account passed into the instruction +handler is, in fact, the SPL Token Program. + +```rust +use anchor_lang::prelude::*; +use anchor_lang::solana_program; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod arbitrary_cpi_insecure { + use super::*; + + pub fn cpi(ctx: Context, amount: u64) -> ProgramResult { + solana_program::program::invoke( + &spl_token::instruction::transfer( + ctx.accounts.token_program.key, + ctx.accounts.source.key, + ctx.accounts.destination.key, + ctx.accounts.authority.key, + &[], + amount, + )?, + &[ + ctx.accounts.source.clone(), + ctx.accounts.destination.clone(), + ctx.accounts.authority.clone(), + ], + ) + } +} + +#[derive(Accounts)] +pub struct Cpi<'info> { + source: UncheckedAccount<'info>, + destination: UncheckedAccount<'info>, + authority: UncheckedAccount<'info>, + token_program: UncheckedAccount<'info>, +} +``` + +An attacker could easily call this instruction handler and pass in a duplicate +token program that they created and control. + +### Add Program Checks + +It's possible to fix this vulnerability by simply adding a few lines to the +`cpi` instruction handler to check whether or not `token_program`'s public key +is that of the SPL Token Program. + +```rust +pub fn cpi_secure(ctx: Context, amount: u64) -> ProgramResult { + if &spl_token::ID != ctx.accounts.token_program.key { + return Err(ProgramError::IncorrectProgramId); + } + solana_program::program::invoke( + &spl_token::instruction::transfer( + ctx.accounts.token_program.key, + ctx.accounts.source.key, + ctx.accounts.destination.key, + ctx.accounts.authority.key, + &[], + amount, + )?, + &[ + ctx.accounts.source.clone(), + ctx.accounts.destination.clone(), + ctx.accounts.authority.clone(), + ], + ) +} +``` + +Now, if an attacker passes in a different token program, the instruction handler +will return the `ProgramError::IncorrectProgramId` error. + +Depending on the program you're invoking with your CPI, you can either hard code +the address of the expected program ID or use the program's Rust crate to get +the address of the program, if available. In the example above, the `spl_token` +crate provides the address of the SPL Token Program. + +### Use an Anchor CPI Module + +A simpler way to manage program checks is to use +[Anchor CPI](https://book.anchor-lang.com/anchor_in_depth/CPIs.html) module. We +learned in a +[previous lesson of Anchor CPI](/developers/courses/onchain-development/anchor-cpi) +that Anchor can automatically generate CPI modules to make CPIs into the program +simpler. These modules also enhance security by verifying the public key of the +program that's passed into one of its public instructions. + +Every Anchor program uses the `declare_id()` macro to define the address of the +program. When a CPI module is generated for a specific program, it uses the +address passed into this macro as the "source of truth" and will automatically +verify that all CPIs made using its CPI module target this program id. + +While at the core no different than manual program checks, using CPI modules +avoids the possibility of forgetting to perform a program check or accidentally +typing in the wrong program ID when hard-coding it. + +The program below shows an example of using a CPI module for the SPL Token +Program to perform the transfer shown in the previous examples. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Token, TokenAccount}; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod arbitrary_cpi_recommended { + use super::*; + + pub fn cpi(ctx: Context, amount: u64) -> ProgramResult { + token::transfer(ctx.accounts.transfer_ctx(), amount) + } +} + +#[derive(Accounts)] +pub struct Cpi<'info> { + source: Account<'info, TokenAccount>, + destination: Account<'info, TokenAccount>, + authority: Signer<'info>, + token_program: Program<'info, Token>, +} + +impl<'info> Cpi<'info> { + pub fn transfer_ctx(&self) -> CpiContext<'_, '_, '_, 'info, token::Transfer<'info>> { + let program = self.token_program.to_account_info(); + let accounts = token::Transfer { + from: self.source.to_account_info(), + to: self.destination.to_account_info(), + authority: self.authority.to_account_info(), + }; + CpiContext::new(program, accounts) + } +} +``` + + + +Like the example above, Anchor has created a few +[wrappers for popular native programs](https://github.com/coral-xyz/anchor/tree/master/spl/src) +that allow you to issue CPIs into them as if they were Anchor programs. + + + +Additionally and depending on the program you're making the CPI to, you may be +able to use Anchor's +[`Program` account type](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/program/struct.Program.html) +to validate the passed-in program in your account validation struct. Between +the [`anchor_lang`](https://docs.rs/anchor-lang/latest/anchor_lang) and [`anchor_spl`](https://docs.rs/anchor_spl/latest/) crates, +the following `Program` types are provided out of the box: + +- [`System`](https://docs.rs/anchor-lang/latest/anchor_lang/system_program/struct.System.html) +- [`AssociatedToken`](https://docs.rs/anchor-spl/latest/anchor_spl/associated_token/struct.AssociatedToken.html) +- [`Token`](https://docs.rs/anchor-spl/latest/anchor_spl/token/struct.Token.html) + +If you have access to an Anchor program's CPI module, you typically can import +its program type with the following, replacing the program name with the name of +the actual program: + +```rust +use other_program::program::OtherProgram; +``` + +## Lab + +To show the importance of checking with program you use for CPIs, we're going to +work with a simplified and somewhat contrived game. This game represents +characters with PDA accounts, and uses a separate "metadata" program to manage +character metadata and attributes like health and power. + +While this example is somewhat contrived, it's actually almost identical +architecture to how NFTs on Solana work: the SPL Token Program manages the token +mints, distribution, and transfers, and a separate metadata program is used to +assign metadata to tokens. So the vulnerability we go through here could also be +applied to real tokens. + +### 1. Setup + +We'll start with the +[`starter` branch of this repository](https://github.com/solana-developers/arbitrary-cpi/tree/starter). +Clone the repository and then open it on the `starter` branch. + +Notice that there are three programs: + +1. `gameplay` +2. `character-metadata` +3. `fake-metadata` + +Additionally, there is already a test in the `tests` directory. + +The first program, `gameplay`, is the one that our test directly uses. Take a +look at the program. It has two instructions: + +1. `create_character_insecure` - creates a new character and CPI's into the + metadata program to set up the character's initial attributes +2. `battle_insecure` - pits two characters against each other, assigning a "win" + to the character with the highest attributes + +The second program, `character-metadata`, is meant to be the "approved" program +for handling character metadata. Have a look at this program. It has a single +instruction handler for `create_metadata` that creates a new PDA and assigns a +pseudo-random value between 0 and 20 for the character's health and power. + +The last program, `fake-metadata` is a "fake" metadata program meant to +illustrate what an attacker might make to exploit our `gameplay` program. This +program is almost identical to the `character-metadata` program, only it assigns +a character's initial health and power to be the max allowed: 255. + +### 2. Test create_character_insecure Instruction Handler + +There is already a test in the `tests` directory for this. It's long, but take a +minute to look at it before we talk through it together: + +```typescript +it("Insecure instructions allow attacker to win every time successfully", async () => { + try { + // Initialize player one with real metadata program + await gameplayProgram.methods + .createCharacterInsecure() + .accounts({ + metadataProgram: metadataProgram.programId, + authority: playerOne.publicKey, + }) + .signers([playerOne]) + .rpc(); + + // Initialize attacker with fake metadata program + await gameplayProgram.methods + .createCharacterInsecure() + .accounts({ + metadataProgram: fakeMetadataProgram.programId, + authority: attacker.publicKey, + }) + .signers([attacker]) + .rpc(); + + // Fetch both player's metadata accounts + const [playerOneMetadataKey] = getMetadataKey( + playerOne.publicKey, + gameplayProgram.programId, + metadataProgram.programId, + ); + + const [attackerMetadataKey] = getMetadataKey( + attacker.publicKey, + gameplayProgram.programId, + fakeMetadataProgram.programId, + ); + + const playerOneMetadata = + await metadataProgram.account.metadata.fetch(playerOneMetadataKey); + + const attackerMetadata = + await fakeMetadataProgram.account.metadata.fetch(attackerMetadataKey); + // The regular player should have health and power between 0 and 20 + expect(playerOneMetadata.health).to.be.lessThan(20); + expect(playerOneMetadata.power).to.be.lessThan(20); + + // The attacker will have health and power of 255 + expect(attackerMetadata.health).to.equal(255); + expect(attackerMetadata.power).to.equal(255); + } catch (error) { + console.error("Test failed:", error); + throw error; + } +}); +``` + +This test walks through the scenario where a regular player and an attacker both +create their characters. Only the attacker passes in the program ID of the fake +metadata program rather than the actual metadata program. And since the +`create_character_insecure` instruction has no program checks, it still +executes. + +The result is that the regular character has the appropriate amount of health +and power: each a value between 0 and 20. But the attacker's health and power +are each 255, making the attacker unbeatable. + +If you haven't already, run `anchor test` to see that this test in fact behaves +as described. + +### 3. Create a create_character_secure Instruction Handler + +Let's fix this by creating a secure instruction handler for creating a new +character. This instruction handler should implement proper program checks and +use the `character-metadata` program's `cpi` crate to do the CPI rather than +just using `invoke`. + +If you want to test out your skills, try this on your own before moving ahead. + +We'll start by updating our `use` statement at the top of the `gameplay` +programs `lib.rs` file. We're giving ourselves access to the program's type for +account validation, and the helper function for issuing the `create_metadata` +CPI. + +```rust +use character_metadata::{ + cpi::accounts::CreateMetadata, + cpi::create_metadata, + program::CharacterMetadata, +}; +``` + +Next let's create a new account validation struct called +`CreateCharacterSecure`. This time, we make `metadata_program` a `Program` type: + +```rust +#[derive(Accounts)] +pub struct CreateCharacterSecure<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account( + init, + payer = authority, + space = DISCRIMINATOR_SIZE + Character::INIT_SPACE, + seeds = [authority.key().as_ref()], + bump + )] + pub character: Account<'info, Character>, + #[account( + mut, + seeds = [character.key().as_ref()], + seeds::program = metadata_program.key(), + bump, + )] + /// CHECK: This account will not be checked by anchor + pub metadata_account: AccountInfo<'info>, + pub metadata_program: Program<'info, CharacterMetadata>, + pub system_program: Program<'info, System>, +} +``` + +Lastly, we add the `create_character_secure` instruction handler. It will be the +same as before but will use the full functionality of Anchor CPIs rather than +using `invoke` directly: + +```rust +pub fn create_character_secure(ctx: Context) -> Result<()> { + // Initialize character data + let character = &mut ctx.accounts.character; + character.metadata = ctx.accounts.metadata_account.key(); + character.authority = ctx.accounts.authority.key(); + character.wins = 0; + + // Prepare CPI context + let cpi_context = CpiContext::new( + ctx.accounts.metadata_program.to_account_info(), + CreateMetadata { + character: ctx.accounts.character.to_account_info(), + metadata: ctx.accounts.metadata_account.to_owned(), + authority: ctx.accounts.authority.to_account_info(), + system_program: ctx.accounts.system_program.to_account_info(), + }, + ); + + // Perform CPI to create metadata + create_metadata(cpi_context)?; + + Ok(()) +} +``` + +### 4. Test create_character_secure Instruction Handler + +Now that we have a secure way of initializing a new character, let's create a +new test. This test just needs to attempt to initialize the attacker's character +and expect an error to be thrown. + +```typescript +it("prevents secure character creation with fake program", async () => { + try { + await gameplayProgram.methods + .createCharacterSecure() + .accounts({ + metadataProgram: fakeMetadataProgram.programId, + authority: attacker.publicKey, + }) + .signers([attacker]) + .rpc(); + + throw new Error("Expected createCharacterSecure to throw an error"); + } catch (error) { + expect(error).to.be.instanceOf(Error); + console.log(error); + } +}); +``` + +Run `anchor test` if you haven't already. Notice that an error was thrown as +expected, detailing that the program ID passed into the instruction handler is +not the expected program ID: + +```bash +'Program log: AnchorError caused by account: metadata_program. Error Code: InvalidProgramId. Error Number: 3008. Error Message: Program ID was not as expected.', +'Program log: Left:', +'Program log: HQqG7PxftCD5BB9WUWcYksrjDLUwCmbV8Smh1W8CEgQm', +'Program log: Right:', +'Program log: 4FgVd2dgsFnXbSHz8fj9twNbfx8KWcBJkHa6APicU6KS' +``` + +That's all you need to do to protect against arbitrary CPIs! + +There may be times where you want more flexibility in your program's CPIs. We +certainly won't stop you from architecting the program you need, but please take +every precaution possible to ensure no vulnerabilities in your program. + +If you want to take a look at the final solution code you can find it on the +[`solution` branch of the same repository](https://github.com/solana-developers/arbitrary-cpi/tree/solution). + +## Challenge + +Just as with other lessons in this unit, your opportunity to practice avoiding +this security exploit lies in auditing your own or other programs. + +Take some time to review at least one program and ensure that program checks are +in place for every program passed into the instruction handlers, particularly +those that are invoked via CPI. + +Remember, if you find a bug or exploit in somebody else's program, please alert +them! If you find one in your own program, be sure to patch it right away. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=5bcaf062-c356-4b58-80a0-12cca99c29b0)! + + diff --git a/content/courses/program-security/bump-seed-canonicalization.mdx b/content/courses/program-security/bump-seed-canonicalization.mdx new file mode 100644 index 000000000..9d9a99768 --- /dev/null +++ b/content/courses/program-security/bump-seed-canonicalization.mdx @@ -0,0 +1,713 @@ +--- +title: Bump Seed Canonicalization +objectives: + - Explain the vulnerabilities associated with using PDAs derived without the + canonical bump + - Initialize a PDA using Anchor's `seeds` and `bump` constraints to + automatically use the canonical bump + - Use Anchor's `seeds` and `bump` constraints to ensure the canonical bump is + always used in future instructions when deriving a PDA +description: + "Understand the need for consistent PDA calculation by storing and reusing the + canonical bump." +--- + +## Summary + +- The + [**`create_program_address`**](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.create_program_address) + function derives a PDA but does so without searching for the canonical bump. + It allows multiple valid bumps to produce different addresses. While this can + still generate a valid PDA, it lacks determinism, as multiple bumps may yield + different addresses for the same set of seeds. +- Using + [**`find_program_address`**](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.find_program_address) + ensures that the **highest valid bump**, often referred to as the **canonical + bump**, is used in the PDA derivation. This provides a deterministic way to + compute an address for a given set of seeds, ensuring consistency across the + program. +- In Anchor, you can specify the `seeds` and the `bump` to ensure that PDA + derivations in your account validation struct always align with the correct + canonical bump. +- Anchor also allows you to specify a bump directly in the validation struct + using the `bump = ` constraint. This ensures that the correct bump + is used when verifying the PDA. +- Using `find_program_address` can be computationally expensive due to the + process of searching for the highest valid bump. It's considered best practice + to store the derived bump in an account's data field upon initialization. This + allows the bump to be referenced in subsequent instruction handlers, avoiding + the need to repeatedly call `find_program_address` to re-derive the PDA. + + ```rust + #[derive(Accounts)] + pub struct VerifyAddress<'info> { + #[account( + seeds = [DATA_PDA_SEED.as_bytes()], + bump = data.bump + )] + data: Account<'info, Data>, + } + ``` + +- In summary, while `create_program_address` can generate a PDA, + `find_program_address` ensures consistency and reliability by always producing + the canonical bump, which is critical for deterministic program execution. + This helps maintain integrity in onchain apps, especially when validating PDAs + across multiple instruction handlers. + +## Lesson + +Bump seeds are a number between 0 and 255, inclusive, used to ensure that an +address derived using +[`create_program_address`](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.create_program_address) +is a valid PDA. The **canonical bump** is the highest bump value that produces a +valid PDA. The standard in Solana is to _always use the canonical bump_ when +deriving PDAs, both for security and convenience. + +### Insecure PDA Derivation using create_program_address + +Given a set of seeds, the `create_program_address` function will produce a valid +PDA about 50% of the time. The bump seed is an additional byte added as a seed +to "bump" the derived address into a valid territory. Since there are 256 +possible bump seeds and the function produces valid PDAs approximately 50% of +the time, there are many valid bumps for a given set of input seeds. + +You can imagine that this could cause confusion in locating accounts when using +seeds as a way of mapping between known pieces of information to accounts. Using +the canonical bump as the standard ensures that you can always find the right +account. More importantly, it avoids security exploits caused by the open-ended +nature of allowing multiple bumps. + +In the example below, the `set_value` instruction handler uses a `bump` that was +passed in as instruction data to derive a PDA. The instruction handler then +derives the PDA using `create_program_address` function and checks that the +`address` matches the public key of the `data` account. + +```rust +use anchor_lang::prelude::*; + +declare_id!("ABQaKhtpYQUUgZ9m2sAY7ZHxWv6KyNdhUJW8Dh8NQbkf"); + +#[program] +pub mod bump_seed_canonicalization_insecure { + use super::*; + + // Insecure PDA Derivation using create_program_address + pub fn set_value(ctx: Context, key: u64, new_value: u64, bump: u8) -> Result<()> { + let address = + Pubkey::create_program_address(&[key.to_le_bytes().as_ref(), &[bump]], ctx.program_id) + .unwrap(); + if address != ctx.accounts.data.key() { + return Err(ProgramError::InvalidArgument.into()); + } + + ctx.accounts.data.value = new_value; + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct BumpSeed<'info> { + #[account(mut)] + pub data: Account<'info, Data>, +} + +#[account] +pub struct Data { + pub value: u64, +} +``` + +While the instruction handler derives the PDA and checks the passed-in account, +which is good, it allows the caller to pass in an arbitrary bump. Depending on +the context of your program, this could result in undesired behavior or +potential exploit. + +If the seed mapping was meant to enforce a one-to-one relationship between PDA +and user, for example, this program would not properly enforce that. A user +could call the program multiple times with many valid bumps, each producing a +different PDA. + +### Recommended Derivation using find_program_address + +A simple way around this problem is to have the program expect only the +canonical bump and use `find_program_address` to derive the PDA. + +The +[`find_program_address`](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.find_program_address) +_always uses the canonical bump_. This function iterates by calling +`create_program_address`, starting with a bump of 255 and decrementing the bump +by one with each iteration. As soon as a valid address is found, the function +returns both the derived PDA and the canonical bump used to derive it. + +This ensures a one-to-one mapping between your input seeds and the address they +produce. + +```rust +pub fn set_value_secure( + ctx: Context, + key: u64, + new_value: u64, + bump: u8, +) -> Result<()> { + let (address, expected_bump) = + Pubkey::find_program_address(&[key.to_le_bytes().as_ref()], ctx.program_id); + + if address != ctx.accounts.data.key() { + return Err(ProgramError::InvalidArgument.into()); + } + if expected_bump != bump { + return Err(ProgramError::InvalidArgument.into()); + } + + ctx.accounts.data.value = new_value; + Ok(()) +} +``` + +### Use Anchor's seeds and bump Constraints + +Anchor provides a convenient way to derive PDAs in the account validation struct +using the `seeds` and `bump` constraints. These can even be combined with the +`init` constraint to initialize the account at the intended address. To protect +the program from the vulnerability we've been discussing throughout this lesson, +Anchor does not even allow you to initialize an account at a PDA using anything +but the canonical bump. Instead, it uses `find_program_address` to derive the +PDA and subsequently performs the initialization. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod bump_seed_canonicalization_recommended { + use super::*; + + pub fn set_value(ctx: Context, _key: u64, new_value: u64) -> Result<()> { + ctx.accounts.data.value = new_value; + Ok(()) + } +} +// Initialize account at PDA +#[derive(Accounts)] +#[instruction(key: u64)] +pub struct BumpSeed<'info> { + #[account(mut)] + pub payer: Signer<'info>, + #[account( + init, + seeds = [key.to_le_bytes().as_ref()], + // Derives the PDA using the canonical bump + bump, + payer = payer, + space = DISCRIMINATOR_SIZE + Data::INIT_SPACE + )] + pub data: Account<'info, Data>, + + pub system_program: Program<'info, System>, +} + +#[account] +#[derive(InitSpace)] +pub struct Data { + pub value: u64, +} +``` + +If you aren't initializing an account, you can still validate PDAs with the +`seeds` and `bump` constraints. This simply rederives the PDA and compares the +derived address with the address of the account passed in. + +In this scenario, Anchor _does_ allow you to specify the bump to use to derive +the PDA with `bump = `. The intent here is not for you to use +arbitrary bumps, but rather to let you optimize your program. The iterative +nature of `find_program_address` makes it expensive, so best practice is to +store the canonical bump in the PDA account's data upon initializing a PDA, +allowing you to reference the bump stored when validating the PDA in subsequent +instruction handlers. + +When you specify the bump to use, Anchor uses `create_program_address` with the +provided bump instead of `find_program_address`. This pattern of storing the +bump in the account data ensures that your program always uses the canonical +bump without degrading performance. + +```rust +use anchor_lang::prelude::*; + +declare_id!("CVwV9RoebTbmzsGg1uqU1s4a3LvTKseewZKmaNLSxTqc"); + +// Constant for account space calculation +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod bump_seed_canonicalization_recommended { + use super::*; + + // Instruction handler to set a value and store the bump + pub fn set_value(ctx: Context, _key: u64, new_value: u64) -> Result<()> { + ctx.accounts.data.value = new_value; + + // Store the canonical bump on the account + // This bump is automatically derived by Anchor + ctx.accounts.data.bump = ctx.bumps.data; + + Ok(()) + } + + // Instruction handler to verify the PDA address + pub fn verify_address(ctx: Context, _key: u64) -> Result<()> { + msg!("PDA confirmed to be derived with canonical bump: {}", ctx.accounts.data.key()); + Ok(()) + } +} + +// Account validation struct for initializing the PDA account +#[derive(Accounts)] +#[instruction(key: u64)] +pub struct BumpSeed<'info> { + #[account(mut)] + pub payer: Signer<'info>, + + #[account( + init, + seeds = [key.to_le_bytes().as_ref()], + bump, // Anchor automatically uses the canonical bump + payer = payer, + space = DISCRIMINATOR_SIZE + Data::INIT_SPACE + )] + pub data: Account<'info, Data>, + + pub system_program: Program<'info, System> +} + +// Account validation struct for verifying the PDA address +#[derive(Accounts)] +#[instruction(key: u64)] +pub struct VerifyAddress<'info> { + #[account( + seeds = [key.to_le_bytes().as_ref()], + bump = data.bump // Use the stored bump, guaranteed to be canonical + )] + pub data: Account<'info, Data>, +} + +// Data structure for the PDA account +#[account] +#[derive(InitSpace)] +pub struct Data { + pub value: u64, + pub bump: u8 // Stores the canonical bump +} +``` + +If you don't specify the bump on the `bump` constraint, Anchor will still use +`find_program_address` to derive the PDA using the canonical bump. As a +consequence, your instruction handler will incur a variable amount of compute +budget. Programs that are already at risk of exceeding their compute budget +should use this with care since there is a chance that the program's budget may +be occasionally and unpredictably exceeded. + +On the other hand, if you only need to verify the address of a PDA passed in +without initializing an account, you'll be forced to either let Anchor derive +the canonical bump or expose your program to unnecessary risks. In that case, +please use the canonical bump despite the slight mark against performance. + +## Lab + +To demonstrate the security exploits possible when you don't check for the +canonical bump, let's work with a program that lets each program user "claim" +rewards on time. + +### 1. Setup + +Start by getting the code on the +[`starter` branch of this repository](https://github.com/solana-developers/bump-seed-canonicalization/tree/starter). + +Notice that there are two instruction handlers on the program and a single test +in the `tests` directory. + +The instruction handlers on the program are: + +1. `create_user_insecure` +2. `claim_insecure` + +The `create_user_insecure` instruction handler simply creates a new account at a +PDA derived using the signer's public key and a passed-in bump. + +The `claim_insecure` instruction handler mints 10 tokens to the user and then +marks the account's rewards as claimed so that they can't claim again. + +However, the program doesn't explicitly check that the PDAs in question are +using the canonical bump. + +Have a look at the program to understand what it does before proceeding. + +### 2. Test Insecure Instruction Handlers + +Since the instruction handlers don't explicitly require the `user` PDA to use +the canonical bump, an attacker can create multiple accounts per wallet and +claim more rewards than should be allowed. + +The test in the `tests` directory creates a new keypair called `attacker` to +represent an attacker. It then loops through all possible bumps and calls +`create_user_insecure` and `claim_insecure`. By the end, the test expects that +the attacker has been able to claim rewards multiple times and has earned more +than the 10 tokens allotted per user. + +```typescript +it("allows attacker to claim more than reward limit with insecure instruction handlers", async () => { + try { + const attacker = Keypair.generate(); + await airdropIfRequired( + connection, + attacker.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.5 * LAMPORTS_PER_SOL, + ); + const ataKey = await getAssociatedTokenAddress(mint, attacker.publicKey); + + let successfulClaimCount = 0; + + for (let i = 0; i < 256; i++) { + try { + const pda = anchor.web3.PublicKey.createProgramAddressSync( + [attacker.publicKey.toBuffer(), Buffer.from([i])], + program.programId, + ); + await program.methods + .createUserInsecure(i) + .accounts({ + user: pda, + payer: attacker.publicKey, + }) + .signers([attacker]) + .rpc(); + await program.methods + .claimInsecure(i) + .accounts({ + user: pda, + mint, + payer: attacker.publicKey, + userAta: ataKey, + mintAuthority, + tokenProgram: anchor.utils.token.TOKEN_PROGRAM_ID, + associatedTokenProgram: anchor.utils.token.ASSOCIATED_PROGRAM_ID, + systemProgram: anchor.web3.SystemProgram.programId, + rent: anchor.web3.SYSVAR_RENT_PUBKEY, + }) + .signers([attacker]) + .rpc(); + + successfulClaimCount += 1; + } catch (error) { + if ( + error instanceof Error && + !error.message.includes( + "Invalid seeds, address must fall off the curve", + ) + ) { + console.error(error); + } + } + } + + const ata = await getAccount(connection, ataKey); + + console.log( + `Attacker claimed ${successfulClaimCount} times and got ${Number( + ata.amount, + )} tokens`, + ); + + expect(successfulClaimCount).to.be.greaterThan(1); + expect(Number(ata.amount)).to.be.greaterThan(10); + } catch (error) { + throw new Error(`Test failed: ${error.message}`); + } +}); +``` + +Run `anchor test` to see that this test passes, showing that the attacker is +successful. Since the test calls the instruction handlers for every valid bump, +it takes a bit to run, so be patient. + +```bash + Bump seed canonicalization +Attacker claimed 121 times and got 1210 tokens + ✔ allows attacker to claim more than reward limit with insecure instructions (119994ms) +``` + +### 3. Create Secure Instruction Handler + +Let's demonstrate patching the vulnerability by creating two new instruction +handlers: + +1. `create_user_secure` +2. `claim_secure` + +Before we write the account validation or instruction handler logic, let's +create a new user type, `UserSecure`. This new type will add the canonical bump +as a field on the struct. + +```rust +// Secure user account structure +#[account] +#[derive(InitSpace)] +pub struct UserSecure { + pub auth: Pubkey, + pub bump: u8, + pub rewards_claimed: bool, +} +``` + +Next, let's create account validation structs for each of the new instruction +handlers. They'll be very similar to the insecure versions but will let Anchor +handle the derivation and deserialization of the PDAs. + +```rust +// Account validation struct for securely creating a user account +#[derive(Accounts)] +pub struct CreateUserSecure<'info> { + #[account(mut)] + pub payer: Signer<'info>, + #[account( + init, + payer = payer, + space = DISCRIMINATOR_SIZE + UserSecure::INIT_SPACE, + seeds = [payer.key().as_ref()], + bump + )] + pub user: Account<'info, UserSecure>, + pub system_program: Program<'info, System>, +} + +// Account validation struct for secure claiming of rewards +#[derive(Accounts)] +pub struct SecureClaim<'info> { + #[account( + mut, + seeds = [payer.key().as_ref()], + bump = user.bump, + constraint = !user.rewards_claimed @ ClaimError::AlreadyClaimed, + constraint = user.auth == payer.key() + )] + pub user: Account<'info, UserSecure>, + #[account(mut)] + pub payer: Signer<'info>, + #[account( + init_if_needed, + payer = payer, + associated_token::mint = mint, + associated_token::authority = payer + )] + pub user_ata: Account<'info, TokenAccount>, + #[account(mut)] + pub mint: Account<'info, Mint>, + /// CHECK: This is the mint authority PDA, checked by seeds constraint + #[account(seeds = [b"mint"], bump)] + pub mint_authority: UncheckedAccount<'info>, + pub token_program: Program<'info, Token>, + pub associated_token_program: Program<'info, AssociatedToken>, + pub system_program: Program<'info, System>, + pub rent: Sysvar<'info, Rent>, +} +``` + +Finally, let's implement the instruction handler logic for the two new +instruction handlers. The `create_user_secure` instruction handler simply needs +to set the `auth`, `bump` and `rewards_claimed` fields on the `user` account +data. + +```rust +// Secure instruction to create a user account +pub fn create_user_secure(ctx: Context) -> Result<()> { + ctx.accounts.user.set_inner(UserSecure { + auth: ctx.accounts.payer.key(), + bump: ctx.bumps.user, + rewards_claimed: false, + }); + Ok(()) +} +``` + +The `claim_secure` instruction handler needs to mint 10 tokens to the user and +set the `user` account's `rewards_claimed` field to `true`. + +```rust +// Secure instruction to claim rewards +pub fn claim_secure(ctx: Context) -> Result<()> { + // Mint tokens to the user's associated token account + token::mint_to( + CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + MintTo { + mint: ctx.accounts.mint.to_account_info(), + to: ctx.accounts.user_ata.to_account_info(), + authority: ctx.accounts.mint_authority.to_account_info(), + }, + &[&[b"mint", &[ctx.bumps.mint_authority]]], + ), + 10, + )?; + + // Mark rewards as claimed + ctx.accounts.user.rewards_claimed = true; + + Ok(()) +} +``` + +### 4. Test Secure Instruction Handlers + +Let's go ahead and write a test to show that the attacker can no longer claim +more than once using the new instruction handlers. + +Notice that if you start to loop through using multiple PDAs like the old test, +you can't even pass the non-canonical bump to the instruction handlers. However, +you can still loop through using the various PDAs and at the end check that only +1 claim happened for a total of 10 tokens. Your final test will look something +like this: + +```typescript +it("allows attacker to claim only once with secure instruction handlers", async () => { + try { + const attacker = Keypair.generate(); + await airdropIfRequired( + connection, + attacker.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.5 * LAMPORTS_PER_SOL, + ); + const ataKey = await getAssociatedTokenAddress(mint, attacker.publicKey); + const [userPDA] = anchor.web3.PublicKey.findProgramAddressSync( + [attacker.publicKey.toBuffer()], + program.programId, + ); + + await program.methods + .createUserSecure() + .accounts({ + payer: attacker.publicKey, + user: userPDA, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([attacker]) + .rpc(); + + await program.methods + .claimSecure() + .accounts({ + payer: attacker.publicKey, + user: userPDA, + userAta: ataKey, + mint, + mintAuthority, + tokenProgram: anchor.utils.token.TOKEN_PROGRAM_ID, + associatedTokenProgram: anchor.utils.token.ASSOCIATED_PROGRAM_ID, + systemProgram: anchor.web3.SystemProgram.programId, + rent: anchor.web3.SYSVAR_RENT_PUBKEY, + }) + .signers([attacker]) + .rpc(); + + let successfulClaimCount = 1; + + for (let i = 0; i < 256; i++) { + try { + const pda = anchor.web3.PublicKey.createProgramAddressSync( + [attacker.publicKey.toBuffer(), Buffer.from([i])], + program.programId, + ); + await program.methods + .createUserSecure() + .accounts({ + user: pda, + payer: attacker.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([attacker]) + .rpc(); + + await program.methods + .claimSecure() + .accounts({ + payer: attacker.publicKey, + user: pda, + userAta: ataKey, + mint, + mintAuthority, + tokenProgram: anchor.utils.token.TOKEN_PROGRAM_ID, + associatedTokenProgram: anchor.utils.token.ASSOCIATED_PROGRAM_ID, + systemProgram: anchor.web3.SystemProgram.programId, + rent: anchor.web3.SYSVAR_RENT_PUBKEY, + }) + .signers([attacker]) + .rpc(); + + successfulClaimCount += 1; + } catch (error) { + if ( + error instanceof Error && + !error.message.includes("Error Number: 2006") && + !error.message.includes( + "Invalid seeds, address must fall off the curve", + ) + ) { + // Comment console error logs to see the test outputs properly + console.error(error); + } + } + } + + const ata = await getAccount(connection, ataKey); + + console.log( + `Attacker claimed ${successfulClaimCount} times and got ${Number( + ata.amount, + )} tokens`, + ); + + expect(Number(ata.amount)).to.equal(10); + expect(successfulClaimCount).to.equal(1); + } catch (error) { + throw new Error(`Test failed: ${error.message}`); + } +}); +``` + +```bash + Bump seed canonicalization +Attacker claimed 119 times and got 1190 tokens + ✔ allows attacker to claim more than reward limit with insecure instruction handlers (117370ms) +Attacker claimed 1 times and got 10 tokens + ✔ allows attacker to claim only once with secure instruction handlers (16362ms) +``` + +If you use Anchor for all of the PDA derivations, this particular exploit is +pretty simple to avoid. However, if you end up doing anything "non-standard," be +careful to design your program to explicitly use the canonical bump! + +If you want to take a look at the final solution code you can find it on the +[`solution` branch of the same repository](https://github.com/solana-developers/bump-seed-canonicalization/tree/solution). + +## Challenge + +Just as with other lessons in this unit, your opportunity to practice avoiding +this security exploit lies in auditing your own or other programs. + +Take some time to review at least one program and ensure that all PDA +derivations and checks are using the canonical bump. + +Remember, if you find a bug or exploit in somebody else's program, please alert +them! If you find one in your own program, be sure to patch it right away. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=d3f6ca7a-11c8-421f-b7a3-d6c08ef1aa8b)! + + diff --git a/content/courses/program-security/closing-accounts.mdx b/content/courses/program-security/closing-accounts.mdx new file mode 100644 index 000000000..daba085db --- /dev/null +++ b/content/courses/program-security/closing-accounts.mdx @@ -0,0 +1,571 @@ +--- +title: Closing Accounts and Revival Attacks +objectives: + - Explain the various security vulnerabilities associated with closing program + accounts incorrectly + - Close program accounts safely and securely using native Rust + - Close program accounts safely and securely using the Anchor `close` + constraint +description: + "How to close program accounts safely and securely in Anchor and native Rust." +--- + +## Summary + +- **Closing an account** improperly creates an opportunity for + reinitialization/revival attacks +- The Solana runtime **garbage collects accounts** when they are no longer rent + exempt. Closing accounts involves transferring the lamports stored in the + account for rent exemption to another account of your choosing. +- You can use the Anchor `#[account(close = )]` + constraint to securely close accounts and set the account discriminator to the + `CLOSED_ACCOUNT_DISCRIMINATOR` + ```rust + #[account(mut, close = receiver)] + pub data_account: Account<'info, MyData>, + #[account(mut)] + pub receiver: SystemAccount<'info> + ``` + +## Lesson + +While it sounds simple, closing accounts properly can be tricky. There are a +number of ways an attacker could circumvent having the account closed if you +don't follow specific steps. + +To get a better understanding of these attack vectors, let's explore each of +these scenarios in depth. + +### Insecure account closing + +At its core, closing an account involves transferring its lamports to a separate +account, thus triggering the Solana runtime to garbage collect the first +account. This resets the owner from the owning program to the system program. + +Take a look at the example below. The instruction requires two accounts: + +1. `account_to_close` - the account to be closed +2. `destination` - the account that should receive the closed account's lamports + +The program logic is intended to close an account by simply increasing the +`destination` account's lamports by the amount stored in the `account_to_close` +and setting the `account_to_close` lamports to 0. With this program, after a +full transaction is processed, the `account_to_close` will be garbage collected +by the runtime. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod closing_accounts_insecure { + use super::*; + + pub fn close(ctx: Context) -> ProgramResult { + let dest_starting_lamports = ctx.accounts.destination.lamports(); + + **ctx.accounts.destination.lamports.borrow_mut() = dest_starting_lamports + .checked_add(ctx.accounts.account_to_close.to_account_info().lamports()) + .unwrap(); + **ctx.accounts.account_to_close.to_account_info().lamports.borrow_mut() = 0; + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Close<'info> { + account_to_close: Account<'info, Data>, + destination: AccountInfo<'info>, +} + +#[account] +pub struct Data { + data: u64, +} +``` + +However, the garbage collection doesn't occur until the transaction completes. +And since there can be multiple instructions in a transaction, this creates an +opportunity for an attacker to invoke the instruction to close the account but +also include in the transaction a transfer to refund the account's rent +exemption lamports. The result is that the account _will not_ be garbage +collected, opening up a path for the attacker to cause unintended behavior in +the program and even drain a protocol. + +### Secure account closing + +The two most important things you can do to close this loophole are to zero out +the account data and add an account discriminator that represents the account +has been closed. You need _both_ of these things to avoid unintended program +behavior. + +An account with zeroed out data can still be used for some things, especially if +it's a PDA whose address derivation is used within the program for verification +purposes. However, the damage may be potentially limited if the attacker can't +access the previously-stored data. + +To further secure the program, however, closed accounts should be given an +account discriminator that designates it as "closed," and all instructions +should perform checks on all passed-in accounts that return an error if the +account is marked closed. + +Look at the example below. This program transfers the lamports out of an +account, zeroes out the account data, and sets an account discriminator in a +single instruction in hopes of preventing a subsequent instruction from +utilizing this account again before it has been garbage collected. Failing to do +any one of these things would result in a security vulnerability. + +```rust +use anchor_lang::prelude::*; +use std::io::Write; +use std::ops::DerefMut; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod closing_accounts_insecure_still_still { + use super::*; + + pub fn close(ctx: Context) -> ProgramResult { + let account = ctx.accounts.account.to_account_info(); + + let dest_starting_lamports = ctx.accounts.destination.lamports(); + + **ctx.accounts.destination.lamports.borrow_mut() = dest_starting_lamports + .checked_add(account.lamports()) + .unwrap(); + **account.lamports.borrow_mut() = 0; + + let mut data = account.try_borrow_mut_data()?; + for byte in data.deref_mut().iter_mut() { + *byte = 0; + } + + let dst: &mut [u8] = &mut data; + let mut cursor = std::io::Cursor::new(dst); + cursor + .write_all(&anchor_lang::__private::CLOSED_ACCOUNT_DISCRIMINATOR) + .unwrap(); + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Close<'info> { + account: Account<'info, Data>, + destination: AccountInfo<'info>, +} + +#[account] +pub struct Data { + data: u64, +} +``` + +Note that the example above is using Anchor's `CLOSED_ACCOUNT_DISCRIMINATOR`. +This is simply an account discriminator where each byte is `255`. The +discriminator doesn't have any inherent meaning, but if you couple it with +account validation checks that return errors any time an account with this +discriminator is passed to an instruction, you'll stop your program from +unintentionally processing an instruction with a closed account. + +#### Manual Force Defund + +There is still one small issue. While the practice of zeroing out account data +and adding a "closed" account discriminator will stop your program from being +exploited, a user can still keep an account from being garbage collected by +refunding the account's lamports before the end of an instruction. This results +in one or potentially many accounts existing in a limbo state where they cannot +be used but also cannot be garbage collected. + +To handle this edge case, you may consider adding an instruction that will allow +_anyone_ to defund accounts tagged with the "closed" account discriminator. The +only account validation this instruction would perform is to ensure that the +account being defunded is marked as closed. It may look something like this: + +```rust +use anchor_lang::__private::CLOSED_ACCOUNT_DISCRIMINATOR; +use anchor_lang::prelude::*; +use std::io::{Cursor, Write}; +use std::ops::DerefMut; + +... + + pub fn force_defund(ctx: Context) -> ProgramResult { + let account = &ctx.accounts.account; + + let data = account.try_borrow_data()?; + assert!(data.len() > 8); + + let mut discriminator = [0u8; 8]; + discriminator.copy_from_slice(&data[0..8]); + if discriminator != CLOSED_ACCOUNT_DISCRIMINATOR { + return Err(ProgramError::InvalidAccountData); + } + + let dest_starting_lamports = ctx.accounts.destination.lamports(); + + **ctx.accounts.destination.lamports.borrow_mut() = dest_starting_lamports + .checked_add(account.lamports()) + .unwrap(); + **account.lamports.borrow_mut() = 0; + + Ok(()) + } + +... + +#[derive(Accounts)] +pub struct ForceDefund<'info> { + account: AccountInfo<'info>, + destination: AccountInfo<'info>, +} +``` + +Since anyone can call this instruction, this can act as a deterrent to attempted +revival attacks since the attacker is paying for account rent exemption but +anyone else can claim the lamports in a refunded account for themselves. + +While not necessary, this can help eliminate the waste of space and lamports +associated with these "limbo" accounts. + +### Use the Anchor `close` constraint + +Fortunately, Anchor makes all of this much simpler with the +`#[account(close = )]` constraint. This constraint handles +everything required to securely close an account: + +1. Transfers the account's lamports to the given `` +2. Zeroes out the account data +3. Sets the account discriminator to the `CLOSED_ACCOUNT_DISCRIMINATOR` variant + +All you have to do is add it in the account validation struct to the account you +want closed: + +```rust +#[derive(Accounts)] +pub struct CloseAccount { + #[account( + mut, + close = receiver + )] + pub data_account: Account<'info, MyData>, + #[account(mut)] + pub receiver: SystemAccount<'info> +} +``` + +The `force_defund` instruction is an optional addition that you'll have to +implement on your own if you'd like to utilize it. + +## Lab + +To clarify how an attacker might take advantage of a revival attack, let's work +with a simple lottery program that uses program account state to manage a user's +participation in the lottery. + +### 1. Setup + +Start by getting the code on the `starter` branch from the +[following repo](https://github.com/Unboxed-Software/solana-closing-accounts/tree/starter). + +The code has two instructions on the program and two tests in the `tests` +directory. + +The program instructions are: + +1. `enter_lottery` +2. `redeem_rewards_insecure` + +When a user calls `enter_lottery`, the program will initialize an account to +store some state about the user's lottery entry. + +Since this is a simplified example rather than a fully-fledge lottery program, +once a user has entered the lottery they can call the `redeem_rewards_insecure` +instruction at any time. This instruction will mint the user an amount of Reward +tokens proportional to the amount of times the user has entered the lottery. +After minting the rewards, the program closes the user's lottery entry. + +Take a minute to familiarize yourself with the program code. The `enter_lottery` +instruction simply creates an account at a PDA mapped to the user and +initializes some state on it. + +The `redeem_rewards_insecure` instruction performs some account and data +validation, mints tokens to the given token account, then closes the lottery +account by removing its lamports. + +However, notice the `redeem_rewards_insecure` instruction _only_ transfers out +the account's lamports, leaving the account open to revival attacks. + +### 2. Test Insecure Program + +An attacker that successfully keeps their account from closing can then call +`redeem_rewards_insecure` multiple times, claiming more rewards than they are +owed. + +Some starter tests have already been written that showcase this vulnerability. +Take a look at the `closing-accounts.ts` file in the `tests` directory. There is +some setup in the `before` function, then a test that simply creates a new +lottery entry for `attacker`. + +Finally, there's a test that demonstrates how an attacker can keep the account +alive even after claiming rewards and then claim rewards again. That test looks +like this: + +```typescript +it("attacker can close + refund lottery acct + claim multiple rewards", async () => { + // claim multiple times + for (let i = 0; i < 2; i++) { + const tx = new Transaction(); + // instruction claims rewards, program will try to close account + tx.add( + await program.methods + .redeemWinningsInsecure() + .accounts({ + lotteryEntry: attackerLotteryEntry, + user: attacker.publicKey, + userAta: attackerAta, + rewardMint: rewardMint, + mintAuth: mintAuth, + tokenProgram: TOKEN_PROGRAM_ID, + }) + .instruction(), + ); + + // user adds instruction to refund dataAccount lamports + const rentExemptLamports = + await provider.connection.getMinimumBalanceForRentExemption( + 82, + "confirmed", + ); + tx.add( + SystemProgram.transfer({ + fromPubkey: attacker.publicKey, + toPubkey: attackerLotteryEntry, + lamports: rentExemptLamports, + }), + ); + // send tx + await sendAndConfirmTransaction(provider.connection, tx, [attacker]); + await new Promise(x => setTimeout(x, 5000)); + } + + const ata = await getAccount(provider.connection, attackerAta); + const lotteryEntry = + await program.account.lotteryAccount.fetch(attackerLotteryEntry); + + expect(Number(ata.amount)).to.equal( + lotteryEntry.timestamp.toNumber() * 10 * 2, + ); +}); +``` + +This test does the following: + +1. Calls `redeem_rewards_insecure` to redeem the user's rewards +2. In the same transaction, adds an instruction to refund the user's + `lottery_entry` before it can actually be closed +3. Successfully repeats steps 1 and 2, redeeming rewards for a second time. + +You can theoretically repeat steps 1-2 infinitely until either a) the program +has no more rewards to give or b) someone notices and patches the exploit. This +would obviously be a severe problem in any real program as it allows a malicious +attacker to drain an entire rewards pool. + +### 3. Create a `redeem_rewards_secure` instruction + +To prevent this from happening we're going to create a new instruction that +closes the lottery account securely using the Anchor `close` constraint. Feel +free to try this out on your own if you'd like. + +The new account validation struct called `RedeemWinningsSecure` should look like +this: + +```rust +#[derive(Accounts)] +pub struct RedeemWinningsSecure<'info> { + // program expects this account to be initialized + #[account( + mut, + seeds = [user.key().as_ref()], + bump = lottery_entry.bump, + has_one = user, + close = user + )] + pub lottery_entry: Account<'info, LotteryAccount>, + #[account(mut)] + pub user: Signer<'info>, + #[account( + mut, + constraint = user_ata.key() == lottery_entry.user_ata + )] + pub user_ata: Account<'info, TokenAccount>, + #[account( + mut, + constraint = reward_mint.key() == user_ata.mint + )] + pub reward_mint: Account<'info, Mint>, + ///CHECK: mint authority + #[account( + seeds = [MINT_SEED.as_bytes()], + bump + )] + pub mint_auth: AccountInfo<'info>, + pub token_program: Program<'info, Token> +} +``` + +It should be the exact same as the original `RedeemWinnings` account validation +struct, except there is an additional `close = user` constraint on the +`lottery_entry` account. This will tell Anchor to close the account by zeroing +out the data, transferring its lamports to the `user` account, and setting the +account discriminator to the `CLOSED_ACCOUNT_DISCRIMINATOR`. This last step is +what will prevent the account from being used again if the program has attempted +to close it already. + +Then, we can create a `mint_ctx` method on the new `RedeemWinningsSecure` struct +to help with the minting CPI to the token program. + +```Rust +impl<'info> RedeemWinningsSecure <'info> { + pub fn mint_ctx(&self) -> CpiContext<'_, '_, '_, 'info, MintTo<'info>> { + let cpi_program = self.token_program.to_account_info(); + let cpi_accounts = MintTo { + mint: self.reward_mint.to_account_info(), + to: self.user_ata.to_account_info(), + authority: self.mint_auth.to_account_info() + }; + + CpiContext::new(cpi_program, cpi_accounts) + } +} +``` + +Finally, the logic for the new secure instruction should look like this: + +```rust +pub fn redeem_winnings_secure(ctx: Context) -> Result<()> { + + msg!("Calculating winnings"); + let amount = ctx.accounts.lottery_entry.timestamp as u64 * 10; + + msg!("Minting {} tokens in rewards", amount); + // program signer seeds + let auth_bump = *ctx.bumps.get("mint_auth").unwrap(); + let auth_seeds = &[MINT_SEED.as_bytes(), &[auth_bump]]; + let signer = &[&auth_seeds[..]]; + + // redeem rewards by minting to user + mint_to(ctx.accounts.mint_ctx().with_signer(signer), amount)?; + + Ok(()) +} +``` + +This logic simply calculates the rewards for the claiming user and transfers the +rewards. However, because of the `close` constraint in the account validation +struct, the attacker shouldn't be able to call this instruction multiple times. + +### 4. Test the Program + +To test our new secure instruction, let's create a new test that tries to call +`redeemingWinningsSecure` twice. We expect the second call to throw an error. + +```typescript +it("attacker cannot claim multiple rewards with secure claim", async () => { + const tx = new Transaction(); + // instruction claims rewards, program will try to close account + tx.add( + await program.methods + .redeemWinningsSecure() + .accounts({ + lotteryEntry: attackerLotteryEntry, + user: attacker.publicKey, + userAta: attackerAta, + rewardMint: rewardMint, + mintAuth: mintAuth, + tokenProgram: TOKEN_PROGRAM_ID, + }) + .instruction(), + ); + + // user adds instruction to refund dataAccount lamports + const rentExemptLamports = + await provider.connection.getMinimumBalanceForRentExemption( + 82, + "confirmed", + ); + tx.add( + SystemProgram.transfer({ + fromPubkey: attacker.publicKey, + toPubkey: attackerLotteryEntry, + lamports: rentExemptLamports, + }), + ); + // send tx + await sendAndConfirmTransaction(provider.connection, tx, [attacker]); + + try { + await program.methods + .redeemWinningsSecure() + .accounts({ + lotteryEntry: attackerLotteryEntry, + user: attacker.publicKey, + userAta: attackerAta, + rewardMint: rewardMint, + mintAuth: mintAuth, + tokenProgram: TOKEN_PROGRAM_ID, + }) + .signers([attacker]) + .rpc(); + } catch (error) { + console.log(error.message); + expect(error); + } +}); +``` + +Run `anchor test` to see that the test passes. The output will look something +like this: + +```bash + closing-accounts + ✔ Enter lottery (451ms) + ✔ attacker can close + refund lottery acct + claim multiple rewards (18760ms) +AnchorError caused by account: lottery_entry. Error Code: AccountDiscriminatorMismatch. Error Number: 3002. Error Message: 8 byte discriminator did not match what was expected. + ✔ attacker cannot claim multiple rewards with secure claim (414ms) +``` + +Note, this does not prevent the malicious user from refunding their account +altogether - it just protects our program from accidentally re-using the account +when it should be closed. We haven't implemented a `force_defund` instruction so +far, but we could. If you're feeling up for it, give it a try yourself! + +The simplest and most secure way to close accounts is using Anchor's `close` +constraint. If you ever need more custom behavior and can't use this constraint, +make sure to replicate its functionality to ensure your program is secure. + +If you want to take a look at the final solution code you can find it on the +`solution` branch of +[the same repository](https://github.com/Unboxed-Software/solana-closing-accounts/tree/solution). + +## Challenge + +Just as with other lessons in this unit, your opportunity to practice avoiding +this security exploit lies in auditing your own or other programs. + +Take some time to review at least one program and ensure that when accounts are +closed they're not susceptible to revival attacks. + +Remember, if you find a bug or exploit in somebody else's program, please alert +them! If you find one in your own program, be sure to patch it right away. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=e6b99d4b-35ed-4fb2-b9cd-73eefc875a0f)! + + diff --git a/content/courses/program-security/duplicate-mutable-accounts.mdx b/content/courses/program-security/duplicate-mutable-accounts.mdx new file mode 100644 index 000000000..4ec27388c --- /dev/null +++ b/content/courses/program-security/duplicate-mutable-accounts.mdx @@ -0,0 +1,439 @@ +--- +title: Duplicate Mutable Accounts +objectives: + - Explain the security risks associated with instructions that require two + mutable accounts of the same type and how to avoid them + - Implement a check for duplicate mutable accounts using native Rust + - Implement a check for duplicate mutable accounts using Anchor constraints +description: + "Under vulnerabilities that can occur with instruction handlers that handle + two mutable accounts, and how to mitigate them." +--- + +## Summary + +- When an instruction requires two mutable accounts of the same type, an + attacker can pass in the same account twice, leading to unintended mutations. +- To check for duplicate mutable accounts in Rust, simply compare the public + keys of the two accounts and throw an error if they are the same. + +### Checking for Duplicate Mutable Accounts in Rust + +In Rust, you can simply compare the public keys of the accounts and return an +error if they are identical: + +```rust +if ctx.accounts.account_one.key() == ctx.accounts.account_two.key() { + return Err(ProgramError::InvalidArgument) +} +``` + +### Using Constraints in Anchor + +In Anchor, you can add an explicit `constraint` to an account, ensuring it is +not the same as another account. + +## Lesson + +**Duplicate Mutable Accounts** occur when an instruction requires two mutable +accounts of the same type. If the same account is passed twice, it can be +mutated in unintended ways, potentially causing security vulnerabilities. + +### No check + +Consider a program that updates a data field for `user_a` and `user_b` in a +single instruction. If the same account is passed for both `user_a` and +`user_b`, the program will overwrite the data field with the second value, +potentially leading to unintended side effects. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod duplicate_mutable_accounts_insecure { + use super::*; + + pub fn update(ctx: Context, a: u64, b: u64) -> Result<()> { + ctx.accounts.user_a.data = a; + ctx.accounts.user_b.data = b; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Update<'info> { + #[account(mut)] + pub user_a: Account<'info, User>, + #[account(mut)] + pub user_b: Account<'info, User>, +} + +#[account] +#[derive(Default)] +pub struct User { + pub data: u64, +} +``` + +#### Adding a check in Rust + +To avoid this, add a check in the instruction logic to ensure the accounts are +different: + +```rust +if ctx.accounts.user_a.key() == ctx.accounts.user_b.key() { + return Err(ProgramError::InvalidArgument) +} +``` + +This check ensures that `user_a` and `user_b` are not the same account. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod duplicate_mutable_accounts_secure { + use super::*; + + pub fn update(ctx: Context, a: u64, b: u64) -> Result<()> { + if ctx.accounts.user_a.key() == ctx.accounts.user_b.key() { + return Err(ProgramError::InvalidArgument) + } + ctx.accounts.user_a.data = a; + ctx.accounts.user_b.data = b; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Update<'info> { + #[account(mut)] + pub user_a: Account<'info, User>, + #[account(mut)] + pub user_b: Account<'info, User>, +} + +#[account] +#[derive(Default)] +pub struct User { + pub data: u64, +} +``` + +#### Using Anchor Constraint + +An even better solution in Anchor is to use +[the `constraint` keyword](https://www.anchor-lang.com/docs/account-constraints) +in the account validation struct. + +You can use the #[account(..)] attribute macro and the constraint keyword to add +a manual constraint to an account. The constraint keyword will check whether the +expression that follows evaluates to true or false, returning an error if the +expression evaluates to false. + +This ensures the check is performed automatically during account validation: + +```rust +use anchor_lang::prelude::*; + +declare_id!("AjBhRphs24vC1V8zZM25PTuLJhJJXFnYbimsZF8jpJAS"); + +#[program] +pub mod duplicate_mutable_accounts_recommended { + use super::*; + + pub fn update(ctx: Context, a: u64, b: u64) -> Result<()> { + ctx.accounts.user_a.data = a; + ctx.accounts.user_b.data = b; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Update<'info> { + #[account( + mut, + constraint = user_a.key() != user_b.key())] + pub user_a: Account<'info, User>, + #[account(mut)] + pub user_b: Account<'info, User>, +} + +#[account] +#[derive(Default)] +pub struct User { + pub data: u64, +} +``` + +## Lab + +Let's practice by creating a simple Rock Paper Scissors program to demonstrate +how failing to check for duplicate mutable accounts can cause undefined behavior +within your program. + +This program will initialize “player” accounts and have a separate instruction +that requires two player accounts to represent starting a game of rock, paper +and scissors. + +- An `initialize` instruction to initialize a `PlayerState` account +- A `rock_paper_scissors_shoot_insecure` instruction that requires two + `PlayerState` accounts, but does not check that the accounts passed into the + instruction are different +- A `rock_paper_scissors_shoot_secure` instruction that is the same as the + `rock_paper_scissors_shoot_insecure` instruction but adds a constraint that + ensures the two player accounts are different + +### Starter + +To get started, download the starter code on the `starter` branch +of [this repository](https://github.com/solana-developers/duplicate-mutable-accounts/tree/starter). +The starter code includes a program with two instructions and the boilerplate +setup for the test file. + +The `initialize` instruction initializes a new `PlayerState` account that stores +the public key of a player and a `choice` field that is set to `None`. + +The `rock_paper_scissors_shoot_insecure` instruction requires two `PlayerState` +accounts and requires a choice from the `RockPaperScissors` enum for each +player, but does not check that the accounts passed into the instruction are +different. This means a single account can be used for both `PlayerState` +accounts in the instruction. + +```rust title="constants.rs" +pub const DISCRIMINATOR_SIZE: usize = 8; +``` + +```rust title="lib.rs" +use anchor_lang::prelude::*; + +mod constants; +use constants::DISCRIMINATOR_SIZE; + +declare_id!("Lo5sj2wWy4BHbe8kCSUvgdhzFbv9c6CEERfgAXusBj9"); + +#[program] +pub mod duplicate_mutable_accounts { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + ctx.accounts.new_player.player = ctx.accounts.payer.key(); + ctx.accounts.new_player.choice = None; + Ok(()) + } + + pub fn rock_paper_scissors_shoot_insecure( + ctx: Context, + player_one_choice: RockPaperScissors, + player_two_choice: RockPaperScissors, + ) -> Result<()> { + ctx.accounts.player_one.choice = Some(player_one_choice); + ctx.accounts.player_two.choice = Some(player_two_choice); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account( + init, + payer = payer, + space = DISCRIMINATOR_SIZE + PlayerState::INIT_SPACE + )] + pub new_player: Account<'info, PlayerState>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[derive(Accounts)] +pub struct RockPaperScissorsInsecure<'info> { + #[account(mut)] + pub player_one: Account<'info, PlayerState>, + #[account(mut)] + pub player_two: Account<'info, PlayerState>, +} + +#[account] +#[derive(Default, InitSpace)] +pub struct PlayerState { + pub player: Pubkey, + pub choice: Option, +} + +#[derive(AnchorSerialize, AnchorDeserialize, Clone, Copy, PartialEq, Eq, InitSpace)] +pub enum RockPaperScissors { + Rock, + Paper, + Scissors, +} +``` + +### Test rock_paper_scissors_shoot_insecure instruction + +The test file includes the code to invoke the `initialize` instruction twice to +create two player accounts. + +Add a test to invoke the `rock_paper_scissors_shoot_insecure` instruction by +passing in the `playerOne.publicKey` for as both `playerOne` and `playerTwo`. + +```typescript +describe("duplicate-mutable-accounts", () => { + ... + it("Invokes insecure instruction", async () => { + await program.methods + .rockPaperScissorsShootInsecure({ rock: {} }, { scissors: {} }) + .accounts({ + playerOne: playerOne.publicKey, + playerTwo: playerOne.publicKey, + }) + .rpc() + + const p1 = await program.account.playerState.fetch(playerOne.publicKey) + assert.equal(JSON.stringify(p1.choice), JSON.stringify({ scissors: {} })) + assert.notEqual(JSON.stringify(p1.choice), JSON.stringify({ rock: {} })) + }) +}) +``` + +Run `anchor test` to see that the transactions are completed successfully, even +though the same account is used as two accounts in the instruction. Since the +`playerOne` account is used as both players in the instruction, note the +`choice` stored on the `playerOne` account is also overridden and set +incorrectly as `scissors`. + +```bash +duplicate-mutable-accounts + ✔ Initialized Player One (461ms) + ✔ Initialized Player Two (404ms) + ✔ Invoke insecure instruction (406ms) +``` + +Not only does allowing duplicate accounts do not make a whole lot of sense for +the game, but it also causes undefined behavior. If we were to build out this +program further, the program only has one chosen option and therefore can't be +compared against a second option. The game would end in a draw every time. It's +also unclear to a human whether `playerOne`'s choice should be rock or scissors, +so the program behavior is strange. + +### Add rock_paper_scissors_shoot_secure instruction + +Next, return to `lib.rs` and add a `rock_paper_scissors_shoot_secure` +instruction that uses the `#[account(...)]` macro to add an additional +`constraint` to check that `player_one` and `player_two` are different accounts. + +```rust +#[program] +pub mod duplicate_mutable_accounts { + use super::*; + ... + pub fn rock_paper_scissors_shoot_secure( + ctx: Context, + player_one_choice: RockPaperScissors, + player_two_choice: RockPaperScissors, + ) -> Result<()> { + ctx.accounts.player_one.choice = Some(player_one_choice); + ctx.accounts.player_two.choice = Some(player_two_choice); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct RockPaperScissorsSecure<'info> { + #[account( + mut, + constraint = player_one.key() != player_two.key() + )] + pub player_one: Account<'info, PlayerState>, + #[account(mut)] + pub player_two: Account<'info, PlayerState>, +} +``` + +### Test rock_paper_scissors_shoot_secure instruction + +To test the `rock_paper_scissors_shoot_secure` instruction, we'll invoke the +instruction twice. First, we'll invoke the instruction using two different +player accounts to check that the instruction works as intended. Then, we'll +invoke the instruction using the `playerOne.publicKey` as both player accounts, +which we expect to fail. + +```typescript +describe("duplicate-mutable-accounts", () => { + ... + it("Invokes secure instruction", async () => { + await program.methods + .rockPaperScissorsShootSecure({ rock: {} }, { scissors: {} }) + .accounts({ + playerOne: playerOne.publicKey, + playerTwo: playerTwo.publicKey, + }) + .rpc() + + const p1 = await program.account.playerState.fetch(playerOne.publicKey) + const p2 = await program.account.playerState.fetch(playerTwo.publicKey) + assert.equal(JSON.stringify(p1.choice), JSON.stringify({ rock: {} })) + assert.equal(JSON.stringify(p2.choice), JSON.stringify({ scissors: {} })) + }) + + it("Invoke secure instruction - expect error", async () => { + try { + await program.methods + .rockPaperScissorsShootSecure({ rock: {} }, { scissors: {} }) + .accounts({ + playerOne: playerOne.publicKey, + playerTwo: playerOne.publicKey, + }) + .rpc() + } catch (err) { + expect(err) + console.log(err) + } + }) +}) +``` + +Run `anchor test` to see that the instruction works as intended and using the +`playerOne` account twice returns the expected error. + +```bash +'Program Lo5sj2wWy4BHbe8kCSUvgdhzFbv9c6CEERfgAXusBj9 invoke [1]', +'Program log: Instruction: RockPaperScissorsShootSecure', +'Program log: AnchorError caused by account: player_one. Error Code: ConstraintRaw. Error Number: 2003. Error Message: A raw constraint was violated.', +'Program Lo5sj2wWy4BHbe8kCSUvgdhzFbv9c6CEERfgAXusBj9 consumed 3414 of 200000 compute units', +'Program Lo5sj2wWy4BHbe8kCSUvgdhzFbv9c6CEERfgAXusBj9 failed: custom program error: 0x7d3' +``` + +The simple constraint is all it takes to close this loophole. While somewhat +contrived, this example illustrates the odd behavior that can occur if you write +your program under the assumption that two same-typed accounts will be different +instances of an account but don't explicitly write that constraint into your +program. Always think about the behavior you're expecting from the program and +whether that is explicit. + +If you want to take a look at the final solution code you can find it on the +`solution` branch of +[the repository](https://github.com/solana-developers/duplicate-mutable-accounts/tree/solution). + +## Challenge + +Just as with other lessons in this unit, your opportunity to practice avoiding +this security exploit lies in auditing your own or other programs. + +Take some time to review at least one program and ensure that any instructions +with two same-typed mutable accounts are properly constrained to avoid +duplicates. + +Remember, if you find a bug or exploit in somebody else's program, please alert +them! If you find one in your own program, be sure to patch it right away. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=9b759e39-7a06-4694-ab6d-e3e7ac266ea7)! + + diff --git a/content/courses/program-security/index.mdx b/content/courses/program-security/index.mdx new file mode 100644 index 000000000..ce0beb6bc --- /dev/null +++ b/content/courses/program-security/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Program Security +description: Understand common security pitfalls and how to build secure programs. +--- diff --git a/content/courses/program-security/meta.json b/content/courses/program-security/meta.json new file mode 100644 index 000000000..de038cbf5 --- /dev/null +++ b/content/courses/program-security/meta.json @@ -0,0 +1,15 @@ +{ + "pages": [ + "security-intro", + "signer-auth", + "owner-checks", + "account-data-matching", + "reinitialization-attacks", + "duplicate-mutable-accounts", + "type-cosplay", + "arbitrary-cpi", + "bump-seed-canonicalization", + "closing-accounts", + "pda-sharing" + ] +} diff --git a/content/courses/program-security/owner-checks.mdx b/content/courses/program-security/owner-checks.mdx new file mode 100644 index 000000000..dd0bc4685 --- /dev/null +++ b/content/courses/program-security/owner-checks.mdx @@ -0,0 +1,671 @@ +--- +title: Owner Checks +objectives: + - Explain the security risks associated with not performing appropriate owner + checks + - Use Anchor's `Account<'info, T>` wrapper and an account type to automate + owner checks + - Use Anchor's `#[account(owner = )]` constraint to explicitly define an + external program that should own an account + - Implement owner checks using native Rust +description: + "Understand the use of account owner checks when processing incoming + instructions." +--- + +## Summary + +- **Owner checks** ensure that accounts are owned by the expected program. + Without owner checks, accounts owned by other programs can be used in an + instruction handler. +- Anchor program account types implement the `Owner` trait, allowing + `Account<'info, T>` to automatically verify program ownership. +- You can also use Anchor's + [`#[account(owner = )]`](https://www.anchor-lang.com/docs/account-constraints) + constraint to define an account's owner when it's external to the current + program. +- To implement an owner check in native Rust, verify that the account's owner + matches the expected program ID. + +```rust +if ctx.accounts.account.owner != ctx.program_id { + return Err(ProgramError::IncorrectProgramId.into()); +} +``` + +## Lesson + +Owner checks are used to verify that an account passed into an instruction +handler is owned by the expected program, preventing exploitation by accounts +from different programs. + +The `AccountInfo` struct contains several fields, including the `owner`, which +represents the **program** that owns the account. Owner checks ensure that this +`owner` field in the `AccountInfo` matches the expected program ID. + +```rust +/// Account information +#[derive(Clone)] +pub struct AccountInfo<'a> { + /// Public key of the account + pub key: &'a Pubkey, + /// Was the transaction signed by this account's public key? + pub is_signer: bool, + /// Is the account writable? + pub is_writable: bool, + /// The lamports in the account. Modifiable by programs. + pub lamports: Rc>, + /// The data held in this account. Modifiable by programs. + pub data: Rc>, + /// Program that owns this account + pub owner: &'a Pubkey, + /// This account's data contains a loaded program (and is now read-only) + pub executable: bool, + /// The epoch at which this account will next owe rent + pub rent_epoch: Epoch, +} +``` + +### Missing owner check + +In the following example, an `admin_instruction` is intended to be restricted to +an `admin` account stored in the `admin_config` account. However, it fails to +check whether the program owns the `admin_config` account. Without this check, +an attacker can spoof the account. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Cft4eTTrt4sJU4Ar35rUQHx6PSXfJju3dixmvApzhWws"); + +#[program] +pub mod owner_check { + use super::*; + ... + + pub fn admin_instruction(ctx: Context) -> Result<()> { + let account_data = ctx.accounts.admin_config.try_borrow_data()?; + let mut account_data_slice: &[u8] = &account_data; + let account_state = AdminConfig::try_deserialize(&mut account_data_slice)?; + + if account_state.admin != ctx.accounts.admin.key() { + return Err(ProgramError::InvalidArgument.into()); + } + msg!("Admin: {}", account_state.admin.to_string()); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Unchecked<'info> { + /// CHECK: This account will not be checked by Anchor + admin_config: UncheckedAccount<'info>, + admin: Signer<'info>, +} + +#[account] +pub struct AdminConfig { + admin: Pubkey, +} +``` + +### Add owner check + +To resolve this issue in native Rust, compare the `owner` field with the program +ID: + +```rust +if ctx.accounts.admin_config.owner != ctx.program_id { + return Err(ProgramError::IncorrectProgramId.into()); +} +``` + +Adding an `owner` check ensures that accounts from other programs cannot be +passed into the instruction handler. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Cft4eTTrt4sJU4Ar35rUQHx6PSXfJju3dixmvApzhWws"); + +#[program] +pub mod owner_check { + use super::*; + ... + pub fn admin_instruction(ctx: Context) -> Result<()> { + if ctx.accounts.admin_config.owner != ctx.program_id { + return Err(ProgramError::IncorrectProgramId.into()); + } + + let account_data = ctx.accounts.admin_config.try_borrow_data()?; + let mut account_data_slice: &[u8] = &account_data; + let account_state = AdminConfig::try_deserialize(&mut account_data_slice)?; + + if account_state.admin != ctx.accounts.admin.key() { + return Err(ProgramError::InvalidArgument.into()); + } + msg!("Admin: {}", account_state.admin.to_string()); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Unchecked<'info> { + /// CHECK: This account will not be checked by Anchor + admin_config: UncheckedAccount<'info>, + admin: Signer<'info>, +} + +#[account] +pub struct AdminConfig { + admin: Pubkey, +} +``` + +### Use Anchor's `Account<'info, T>` + +Anchor simplifies owner checks with the `Account` type, which wraps +`AccountInfo` and automatically verifies ownership. + +In the following example, `Account<'info, AdminConfig>` validates the +`admin_config` account, and the `has_one` constraint checks that the admin +account matches the `admin` field in `admin_config`. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Cft4eTTrt4sJU4Ar35rUQHx6PSXfJju3dixmvApzhWws"); + +#[program] +pub mod owner_check { + use super::*; + ... + pub fn admin_instruction(ctx: Context) -> Result<()> { + msg!("Admin: {}", ctx.accounts.admin_config.admin.to_string()); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Checked<'info> { + #[account( + has_one = admin, + )] + admin_config: Account<'info, AdminConfig>, + admin: Signer<'info>, +} + +#[account] +pub struct AdminConfig { + admin: Pubkey, +} +``` + +### Use Anchor's `#[account(owner = )]` constraint + +In addition to the `Account` type, you can use the Anchor's +[`owner` constraint](https://www.anchor-lang.com/docs/account-constraints) to +specify the program that should own an account when it differs from the +executing program. This is particularly useful when an instruction handler +expects an account to be a PDA created by another program. By using the `seeds` +and `bump` constraints along with the `owner`, you can properly derive and +verify the account's address. + +To apply the `owner` constraint, you need access to the public key of the +program expected to own the account. This can be provided either as an +additional account or by hard-coding the public key within your program. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Cft4eTTrt4sJU4Ar35rUQHx6PSXfJju3dixmvApzhWws"); + +#[program] +pub mod owner_check { + use super::*; + ... + pub fn admin_instruction(ctx: Context) -> Result<()> { + msg!("Admin: {}", ctx.accounts.admin_config.admin.to_string()); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Checked<'info> { + #[account( + has_one = admin, + )] + admin_config: Account<'info, AdminConfig>, + admin: Signer<'info>, + #[account( + seeds = b"test-seed", + bump, + owner = token_program.key() + )] + pda_derived_from_another_program: AccountInfo<'info>, + token_program: Program<'info, Token> +} + +#[account] +pub struct AdminConfig { + admin: Pubkey, +} +``` + +## Lab + +In this lab, we'll demonstrate how the absence of an owner check can allow a +malicious actor to drain tokens from a simplified token vault. This is similar +to the lab from the +[Signer Authorization lesson](/developers/courses/program-security/signer-auth). + +We'll use two programs to illustrate this: + +1. One program lacks an owner check on the vault account it withdraws tokens + from. +2. The second program is a clone created by a malicious user to mimic the first + program's vault account. + +Without the owner check, the malicious user can pass in their vault account +owned by a fake program, and the original program will still execute the +withdrawal. + +### 1. Starter + +Begin by downloading the starter code from the +[`starter` branch of this repository](https://github.com/solana-developers/owner-checks/tree/starter). +The starter code includes two programs: `clone` and `owner_check`, and the setup +for the test file. + +The `owner_check` program includes two instruction handlers: + +- `initialize_vault`: Initializes a simplified vault account storing the + addresses of a token account and an authority account. +- `insecure_withdraw`: Withdraws tokens from the token account but lacks an + owner check for the vault account. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Mint, Token, TokenAccount}; + +declare_id!("3uF3yaymq1YBmDDHpRPwifiaBf4eK8M2jLgaMcCTg9n9"); + +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod owner_check { + use super::*; + + pub fn initialize_vault(ctx: Context) -> Result<()> { + ctx.accounts.vault.token_account = ctx.accounts.token_account.key(); + ctx.accounts.vault.authority = ctx.accounts.authority.key(); + Ok(()) + } + + pub fn insecure_withdraw(ctx: Context) -> Result<()> { + let account_data = ctx.accounts.vault.try_borrow_data()?; + let mut account_data_slice: &[u8] = &account_data; + let account_state = Vault::try_deserialize(&mut account_data_slice)?; + + if account_state.authority != ctx.accounts.authority.key() { + return Err(ProgramError::InvalidArgument.into()); + } + + let amount = ctx.accounts.token_account.amount; + + let seeds = &[ + b"token".as_ref(), + &[ctx.bumps.token_account], + ]; + let signer = [&seeds[..]]; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.token_account.to_account_info(), + to: ctx.accounts.withdraw_destination.to_account_info(), + }, + &signer, + ); + + token::transfer(cpi_ctx, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InitializeVault<'info> { + #[account( + init, + payer = authority, + space = DISCRIMINATOR_SIZE + Vault::INIT_SPACE, + )] + pub vault: Account<'info, Vault>, + #[account( + init, + payer = authority, + token::mint = mint, + token::authority = token_account, + seeds = [b"token"], + bump, + )] + pub token_account: Account<'info, TokenAccount>, + pub mint: Account<'info, Mint>, + #[account(mut)] + pub authority: Signer<'info>, + pub token_program: Program<'info, Token>, + pub system_program: Program<'info, System>, + pub rent: Sysvar<'info, Rent>, +} + +#[derive(Accounts)] +pub struct InsecureWithdraw<'info> { + /// CHECK: This account will not be checked by anchor + pub vault: UncheckedAccount<'info>, + #[account( + mut, + seeds = [b"token"], + bump, + )] + pub token_account: Account<'info, TokenAccount>, + #[account(mut)] + pub withdraw_destination: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, + pub authority: Signer<'info>, +} + +#[account] +#[derive(Default, InitSpace)] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +``` + +The `clone` program includes a single instruction handler: + +- `initialize_vault`: Initializes a fake vault account that mimics the vault + account of the `owner_check` program, allowing the malicious user to set their + own authority. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::TokenAccount; + +declare_id!("2Gn5MFGMvRjd548z6vhreh84UiL7L5TFzV5kKGmk4Fga"); + +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod clone { + use super::*; + + pub fn initialize_vault(ctx: Context) -> Result<()> { + ctx.accounts.vault.token_account = ctx.accounts.token_account.key(); + ctx.accounts.vault.authority = ctx.accounts.authority.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InitializeVault<'info> { + #[account( + init, + payer = authority, + space = DISCRIMINATOR_SIZE + Vault::INIT_SPACE, + )] + pub vault: Account<'info, Vault>, + pub token_account: Account<'info, TokenAccount>, + #[account(mut)] + pub authority: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +#[derive(Default, InitSpace)] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +``` + +### 2. Test insecure_withdraw Instruction Handler + +The test file contains tests that initialize a vault in both programs. We'll add +a test to invoke the `insecure_withdraw` instruction handler, showing how the +lack of an owner check allows token withdrawal from the original program's +vault. + +```typescript +describe("Owner Check", () => { + ... + it("performs insecure withdraw", async () => { + try { + const transaction = await program.methods + .insecureWithdraw() + .accounts({ + vault: vaultCloneAccount.publicKey, + tokenAccount: tokenPDA, + withdrawDestination: unauthorizedWithdrawDestination, + authority: unauthorizedWallet.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(connection, transaction, [ + unauthorizedWallet, + ]); + + const tokenAccountInfo = await getAccount(connection, tokenPDA); + expect(Number(tokenAccountInfo.amount)).to.equal(0); + } catch (error) { + console.error("Insecure withdraw failed:", error); + throw error; + } + }); +}) +``` + +Run an `anchor test` to verify that the `insecure_withdraw` is complete +successfully. + +```bash +owner-check + ✔ initializes vault (866ms) + ✔ initializes fake vault (443ms) + ✔ performs insecure withdraw (444ms) +``` + + + +The `vaultCloneAccount` deserializes successfully due to both programs using the +same discriminator, derived from the identical `Vault` struct name. + + + +```rust +#[account] +#[derive(Default, InitSpace)] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +``` + +### 3. Add secure_withdraw Instruction Handler + +We'll now close the security loophole by adding a `secure_withdraw` instruction +handler with an `Account<'info, Vault>` type to ensure an owner check is +performed. + +In the `lib.rs` file of the `owner_check` program, add a `secure_withdraw` +instruction handler and a `SecureWithdraw` accounts struct. The `has_one` +constraint will be used to ensure that the `token_account` and `authority` +passed into the instruction handler match the values stored in the `vault` +account. + +```rust +#[program] +pub mod owner_check { + use super::*; + ... + + pub fn secure_withdraw(ctx: Context) -> Result<()> { + let amount = ctx.accounts.token_account.amount; + + let seeds = &[ + b"token".as_ref(), + &[*ctx.bumps.get("token_account").unwrap()], + ]; + let signer = [&seeds[..]]; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.token_account.to_account_info(), + to: ctx.accounts.withdraw_destination.to_account_info(), + }, + &signer, + ); + + token::transfer(cpi_ctx, amount)?; + Ok(()) + } +} +... + +#[derive(Accounts)] +pub struct SecureWithdraw<'info> { + #[account( + has_one = token_account, + has_one = authority + )] + pub vault: Account<'info, Vault>, + #[account( + mut, + seeds = [b"token"], + bump, + )] + pub token_account: Account<'info, TokenAccount>, + #[account(mut)] + pub withdraw_destination: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, + pub authority: Signer<'info>, +} +``` + +### 4. Test secure_withdraw Instruction Handler + +To test the `secure_withdraw` instruction handler, we'll invoke it twice. First, +we'll use the `vaultCloneAccount` account, expecting it to fail. Then, we'll +invoke the instruction handler with the correct `vaultAccount` account to verify +the instruction handler works as intended. + +```typescript +describe("Owner Check", () => { + ... + it("fails secure withdraw with incorrect authority", async () => { + try { + const transaction = await program.methods + .secureWithdraw() + .accounts({ + vault: vaultCloneAccount.publicKey, + tokenAccount: tokenPDA, + withdrawDestination: unauthorizedWithdrawDestination, + authority: unauthorizedWallet.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(connection, transaction, [ + unauthorizedWallet, + ]); + throw new Error("Expected transaction to fail, but it succeeded"); + } catch (error) { + expect(error).to.be.an("error"); + console.log("Error message:", error.message); + } + }); + + it("performs secure withdraw successfully", async () => { + try { + await mintTo( + connection, + walletAuthority.payer, + tokenMint, + tokenPDA, + walletAuthority.payer, + INITIAL_TOKEN_AMOUNT + ); + + await program.methods + .secureWithdraw() + .accounts({ + vault: vaultAccount.publicKey, + tokenAccount: tokenPDA, + withdrawDestination: authorizedWithdrawDestination, + authority: walletAuthority.publicKey, + }) + .rpc(); + + const tokenAccountInfo = await getAccount(connection, tokenPDA); + expect(Number(tokenAccountInfo.amount)).to.equal(0); + } catch (error) { + console.error("Secure withdraw failed:", error); + throw error; + } + }); +}) +``` + +Running `anchor test` will show that the transaction using the +`vaultCloneAccount` account fails, while the transaction using the +`vaultAccount` account withdraws successfully. + +```bash +"Program 3uF3yaymq1YBmDDHpRPwifiaBf4eK8M2jLgaMcCTg9n9 invoke [1]", +"Program log: Instruction: SecureWithdraw", +"Program log: AnchorError caused by account: vault. Error Code: AccountOwnedByWrongProgram. Error Number: 3007. Error Message: The given account is owned by a different program than expected.", +"Program log: Left:", +"Program log: 2Gn5MFGMvRjd548z6vhreh84UiL7L5TFzV5kKGmk4Fga", +"Program log: Right:", +"Program log: 3uF3yaymq1YBmDDHpRPwifiaBf4eK8M2jLgaMcCTg9n9", +"Program 3uF3yaymq1YBmDDHpRPwifiaBf4eK8M2jLgaMcCTg9n9 consumed 4449 of 200000 compute units", +"Program 3uF3yaymq1YBmDDHpRPwifiaBf4eK8M2jLgaMcCTg9n9 failed: custom program error: 0xbbf" +``` + +Here we see how using Anchor's `Account<'info, T>` type simplifies the account +validation process by automating ownership checks. Additionally, Anchor errors +provide specific details, such as which account caused the error. For example, +the log indicates `AnchorError caused by account: vault`, which aids in +debugging. + +```bash +✔ fails secure withdraw with incorrect authority +✔ performs secure withdraw successfully (847ms) +``` + +Ensuring account ownership checks is critical to avoid security vulnerabilities. +This example demonstrates how simple it is to implement proper validation, but +it's vital to always verify which accounts are owned by specific programs. + +If you'd like to review the final solution code, it's available on the +[`solution` branch of the repository](https://github.com/solana-developers/owner-checks/tree/solution). + +## Challenge + +As with other lessons in this unit, practice preventing security exploits by +auditing your own or other programs. + +Take time to review at least one program to confirm that ownership checks are +properly enforced on all accounts passed into each instruction handler. + +If you find a bug or exploit in another program, notify the developer. If you +find one in your own program, patch it immediately. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=e3069010-3038-4984-b9d3-2dc6585147b1)! + + diff --git a/content/courses/program-security/pda-sharing.mdx b/content/courses/program-security/pda-sharing.mdx new file mode 100644 index 000000000..f02e4aa9f --- /dev/null +++ b/content/courses/program-security/pda-sharing.mdx @@ -0,0 +1,585 @@ +--- +title: PDA Sharing +objectives: + - Explain the security risks associated with PDA sharing + - Derive PDAs that have discrete authority domains + - Use Anchor's `seeds` and `bump` constraints to validate PDA accounts +description: + "Understand the potential problems of reusing PDAs by using user and domain + specific PDAs." +--- + +## Summary + +- Using the same PDA for multiple authority domains opens your program up to the + possibility of users accessing data and funds that don't belong to them +- Prevent the same PDA from being used for multiple accounts by using seeds that + are user and/or domain-specific +- Use Anchor's `seeds` and `bump` constraints to validate that a PDA is derived + using the expected seeds and bump + +## Lesson + +PDA sharing refers to using the same PDA as a signer across multiple users or +domains. Especially when using PDAs for signing, it may seem appropriate to use +a global PDA to represent the program. However, this opens up the possibility of +account validation passing but a user being able to access funds, transfers, or +data not belonging to them. + +### Insecure Global PDA + +In the example below, the `authority` of the `vault` account is a PDA derived +using the `mint` address stored on the `pool` account. This PDA is passed into +the instruction handler as the `authority` account to sign for the transfer of +tokens from the `vault` to the `withdraw_destination`. + +Using the `mint` address as a seed to derive the PDA to sign for the `vault` is +insecure because multiple `pool` accounts could be created for the same `vault` +token account, but with different `withdraw_destination` accounts. By using the +`mint` as a `seed` to derive the PDA for signing token transfers, any `pool` +account could sign for the transfer of tokens from a `vault` token account to an +arbitrary `withdraw_destination`. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Token, TokenAccount}; + +declare_id!("ABQaKhtpYQUUgZ9m2sAY7ZHxWv6KyNdhUJW8Dh8NQbkf"); + +#[program] +pub mod pda_sharing_insecure { + use super::*; + + pub fn withdraw_tokens(ctx: Context) -> Result<()> { + let amount = ctx.accounts.vault.amount; + let seeds = &[ctx.accounts.pool.mint.as_ref(), &[ctx.accounts.pool.bump]]; + token::transfer(get_transfer_ctx(&ctx.accounts).with_signer(&[seeds]), amount) + } +} + +#[derive(Accounts)] +pub struct WithdrawTokens<'info> { + #[account(has_one = vault, has_one = withdraw_destination)] + pool: Account<'info, TokenPool>, + vault: Account<'info, TokenAccount>, + withdraw_destination: Account<'info, TokenAccount>, + /// CHECK: This is the PDA that signs for the transfer + authority: UncheckedAccount<'info>, + token_program: Program<'info, Token>, +} + +pub fn get_transfer_ctx<'accounts, 'remaining, 'cpi_code, 'info>( + accounts: &'accounts WithdrawTokens<'info>, +) -> CpiContext<'accounts, 'remaining, 'cpi_code, 'info, token::Transfer<'info>> { + CpiContext::new( + accounts.token_program.to_account_info(), + token::Transfer { + from: accounts.vault.to_account_info(), + to: accounts.withdraw_destination.to_account_info(), + authority: accounts.authority.to_account_info(), + }, + ) +} + +#[account] +#[derive(InitSpace)] +pub struct TokenPool { + pub vault: Pubkey, + pub mint: Pubkey, + pub withdraw_destination: Pubkey, + pub bump: u8, +} +``` + +### Secure account specific PDA + +One approach to create an account specific PDA is to use the +`withdraw_destination` as a seed to derive the PDA used as the authority of the +`vault` token account. This ensures the PDA signing for the CPI in the +`withdraw_tokens` instruction handler is derived using the intended +`withdraw_destination` token account. In other words, tokens from a `vault` +token account can only be withdrawn to the `withdraw_destination` that was +originally initialized with the `pool` account. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Token, TokenAccount}; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod pda_sharing_secure { + use super::*; + + pub fn withdraw_tokens(ctx: Context) -> Result<()> { + let amount = ctx.accounts.vault.amount; + let seeds = &[ + ctx.accounts.pool.withdraw_destination.as_ref(), + &[ctx.accounts.pool.bump], + ]; + token::transfer(get_transfer_ctx(&ctx.accounts).with_signer(&[seeds]), amount) + } +} + +#[derive(Accounts)] +pub struct WithdrawTokens<'info> { + #[account(has_one = vault, has_one = withdraw_destination)] + pool: Account<'info, TokenPool>, + vault: Account<'info, TokenAccount>, + withdraw_destination: Account<'info, TokenAccount>, + /// CHECK: This is the PDA that signs for the transfer + authority: UncheckedAccount<'info>, + token_program: Program<'info, Token>, +} + +pub fn get_transfer_ctx<'accounts, 'remaining, 'cpi_code, 'info>( + accounts: &'accounts WithdrawTokens<'info>, +) -> CpiContext<'accounts, 'remaining, 'cpi_code, 'info, token::Transfer<'info>> { + CpiContext::new( + accounts.token_program.to_account_info(), + token::Transfer { + from: accounts.vault.to_account_info(), + to: accounts.withdraw_destination.to_account_info(), + authority: accounts.authority.to_account_info(), + }, + ) +} + +#[account] +#[derive(InitSpace)] +pub struct TokenPool { + pub vault: Pubkey, + pub mint: Pubkey, + pub withdraw_destination: Pubkey, + pub bump: u8, +} +``` + +### Anchor's seeds and bump Constraints + +PDAs can be used as both the address of an account and allow programs to sign +for the PDAs they own. + +The example below uses a PDA derived using the `withdraw_destination` as both +the address of the `pool` account and the owner of the `vault` token account. +This means that only the `pool` account associated with the correct `vault` and +`withdraw_destination` can be used in the `withdraw_tokens` instruction handler. + +You can use Anchor's `seeds` and `bump` constraints with the +[`#[account(...)]`](https://www.anchor-lang.com/docs/account-constraints) +attribute to validate the `pool` account PDA. Anchor derives a PDA using the +`seeds` and `bump` specified and compares it against the account passed into the +instruction handler as the `pool` account. The `has_one` constraint is used to +further ensure that only the correct accounts stored on the `pool` account are +passed into the instruction handler. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Token, TokenAccount}; + +declare_id!("ABQaKhtpYQUUgZ9m2sAY7ZHxWv6KyNdhUJW8Dh8NQbkf"); + +#[program] +pub mod pda_sharing_recommended { + use super::*; + + pub fn withdraw_tokens(ctx: Context) -> Result<()> { + let amount = ctx.accounts.vault.amount; + let seeds = &[ + ctx.accounts.pool.withdraw_destination.as_ref(), + &[ctx.accounts.pool.bump], + ]; + token::transfer(get_transfer_ctx(&ctx.accounts).with_signer(&[seeds]), amount) + } +} + +#[derive(Accounts)] +pub struct WithdrawTokens<'info> { + #[account( + seeds = [withdraw_destination.key().as_ref()], + bump = pool.bump, + has_one = vault, + has_one = withdraw_destination, + )] + pool: Account<'info, TokenPool>, + #[account(mut)] + vault: Account<'info, TokenAccount>, + #[account(mut)] + withdraw_destination: Account<'info, TokenAccount>, + token_program: Program<'info, Token>, +} + +pub fn get_transfer_ctx<'accounts, 'remaining, 'cpi_code, 'info>( + accounts: &'accounts WithdrawTokens<'info>, +) -> CpiContext<'accounts, 'remaining, 'cpi_code, 'info, token::Transfer<'info>> { + CpiContext::new( + accounts.token_program.to_account_info(), + token::Transfer { + from: accounts.vault.to_account_info(), + to: accounts.withdraw_destination.to_account_info(), + authority: accounts.pool.to_account_info(), + }, + ) +} + +#[account] +#[derive(InitSpace)] +pub struct TokenPool { + pub vault: Pubkey, + pub mint: Pubkey, + pub withdraw_destination: Pubkey, + pub bump: u8, +} +``` + +## Lab + +Let's practice by creating a simple program to demonstrate how PDA sharing can +allow an attacker to withdraw tokens that don't belong to them. This lab expands +on the examples above by including the instruction handlers to initialize the +required program accounts. + +### 1. Starter + +To get started, download the starter code on the +[`starter` branch of this repository](https://github.com/solana-developers/pda-sharing/tree/starter). +The starter code includes a program with two instruction handlers and the +boilerplate setup for the test file. + +The `initialize_pool` instruction handler initializes a new `TokenPool` that +stores a `vault`, `mint`, `withdraw_destination`, and `bump`. The `vault` is a +token account where the authority is set as a PDA derived using the `mint` +address. + +The `withdraw_insecure` instruction handler will transfer tokens in the `vault` +token account to a `withdraw_destination` token account. + +However, as written the seeds used for signing are not specific to the vault's +withdrawal destination, thus opening up the program to security exploits. Take a +minute to familiarize yourself with the code before continuing on. + +### 2. Test withdraw_insecure Instruction Handler + +The test file includes the code to invoke the `initialize_pool` instruction +handler and then mint 100 tokens to the `vault` token account. It also includes +a test to invoke the `withdraw_insecure` using the intended +`withdraw_destination`. This shows that the instruction handlers can be used as +intended. + +After that, there are two more tests to show how the instruction handlers are +vulnerable to exploit. + +The first test invokes the `initialize_pool` instruction handler to create a +"fake" `pool` account using the same `vault` token account, but a different +`withdraw_destination`. + +The second test withdraws from this pool, stealing funds from the vault. + +```typescript +it("allows insecure initialization with incorrect vault", async () => { + try { + await program.methods + .initializePool(insecureAuthorityBump) + .accounts({ + pool: insecurePoolFake.publicKey, + mint: tokenMint, + vault: insecureVault.address, + withdrawDestination: fakeWithdrawDestination, + }) + .signers([insecurePoolFake]) + .rpc(); + + await mintTo( + connection, + wallet.payer, + tokenMint, + insecureVault.address, + wallet.payer, + INITIAL_MINT_AMOUNT, + ); + + const vaultAccount = await getAccount(connection, insecureVault.address); + expect(Number(vaultAccount.amount)).to.equal(INITIAL_MINT_AMOUNT); + } catch (error) { + throw new Error(`Test failed: ${error.message}`); + } +}); + +it("allows insecure withdrawal to incorrect destination", async () => { + try { + await program.methods + .withdrawInsecure() + .accounts({ + pool: insecurePoolFake.publicKey, + authority: insecureAuthority, + }) + .rpc(); + + const vaultAccount = await getAccount(connection, insecureVault.address); + expect(Number(vaultAccount.amount)).to.equal(0); + } catch (error) { + throw new Error(`Test failed: ${error.message}`); + } +}); +``` + +Run `anchor test` to see that the transactions complete successfully and the +`withdraw_instrucure` instruction handler allows the `vault` token account to be +drained to a fake withdraw destination stored on the fake `pool` account. + +### 3. Add initialize_pool_secure Instruction Handler + +Now let's add a new instruction handler to the program for securely initializing +a pool. + +This new `initialize_pool_secure` instruction handler will initialize a `pool` +account as a PDA derived using the `withdraw_destination`. It will also +initialize a `vault` token account with the authority set as the `pool` PDA. + +```rust +pub fn initialize_pool_secure(ctx: Context) -> Result<()> { + ctx.accounts.pool.vault = ctx.accounts.vault.key(); + ctx.accounts.pool.mint = ctx.accounts.mint.key(); + ctx.accounts.pool.withdraw_destination = ctx.accounts.withdraw_destination.key(); + ctx.accounts.pool.bump = ctx.bumps.pool; + Ok(()) +} +... + +#[derive(Accounts)] +pub struct InitializePoolSecure<'info> { + #[account( + init, + payer = payer, + space = DISCRIMINATOR_SIZE + TokenPool::INIT_SPACE, + seeds = [withdraw_destination.key().as_ref()], + bump + )] + pub pool: Account<'info, TokenPool>, + pub mint: Account<'info, Mint>, + #[account( + init, + payer = payer, + token::mint = mint, + token::authority = pool, + )] + pub vault: Account<'info, TokenAccount>, + pub withdraw_destination: Account<'info, TokenAccount>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, + pub token_program: Program<'info, Token>, + pub rent: Sysvar<'info, Rent>, +} +``` + +### 4. Add withdraw_secure Instruction Handler + +Next, add a `withdraw_secure` instruction handler. This instruction handler will +withdraw tokens from the `vault` token account to the `withdraw_destination`. +The `pool` account is validated using the `seeds` and `bump` constraints to +ensure the correct PDA account is provided. The `has_one` constraints check that +the correct `vault` and `withdraw_destination` token accounts are provided. + +```rust +pub fn withdraw_secure(ctx: Context) -> Result<()> { + let amount = ctx.accounts.vault.amount; + let seeds = &[ + ctx.accounts.pool.withdraw_destination.as_ref(), + &[ctx.accounts.pool.bump], + ]; + token::transfer( + get_secure_transfer_ctx(&ctx.accounts).with_signer(&[seeds]), + amount, + ) +} + +... + +#[derive(Accounts)] +pub struct WithdrawTokensSecure<'info> { + #[account( + has_one = vault, + has_one = withdraw_destination, + seeds = [withdraw_destination.key().as_ref()], + bump = pool.bump, + )] + pub pool: Account<'info, TokenPool>, + #[account(mut)] + pub vault: Account<'info, TokenAccount>, + #[account(mut)] + pub withdraw_destination: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, +} + +pub fn get_secure_transfer_ctx<'accounts, 'remaining, 'cpi_code, 'info>( + accounts: &'accounts WithdrawTokensSecure<'info>, +) -> CpiContext<'accounts, 'remaining, 'cpi_code, 'info, token::Transfer<'info>> { + CpiContext::new( + accounts.token_program.to_account_info(), + token::Transfer { + from: accounts.vault.to_account_info(), + to: accounts.withdraw_destination.to_account_info(), + authority: accounts.pool.to_account_info(), + }, + ) +} +``` + +### 5. Test withdraw_secure Instruction Handler + +Finally, return to the test file to test the `withdraw_secure` instruction +handler and show that by narrowing the scope of our PDA signing authority, we've +removed the vulnerability. + +Before we write a test showing the vulnerability has been patched let's write a +test that simply shows that the initialization and withdraw instruction handlers +work as expected: + +```typescript +it("performs secure pool initialization and withdrawal correctly", async () => { + try { + const initialWithdrawBalance = await getAccount( + connection, + withdrawDestination, + ); + + await program.methods + .initializePoolSecure() + .accounts({ + mint: tokenMint, + vault: recommendedVault.publicKey, + withdrawDestination: withdrawDestination, + }) + .signers([recommendedVault]) + .rpc(); + + await new Promise(resolve => setTimeout(resolve, 1000)); + + await mintTo( + connection, + wallet.payer, + tokenMint, + recommendedVault.publicKey, + wallet.payer, + INITIAL_MINT_AMOUNT, + ); + + await program.methods + .withdrawSecure() + .accounts({ + vault: recommendedVault.publicKey, + withdrawDestination: withdrawDestination, + }) + .rpc(); + + const finalWithdrawBalance = await getAccount( + connection, + withdrawDestination, + ); + + expect( + Number(finalWithdrawBalance.amount) - + Number(initialWithdrawBalance.amount), + ).to.equal(INITIAL_MINT_AMOUNT); + } catch (error) { + throw new Error(`Test failed: ${error.message}`); + } +}); +``` + +Now, we'll test that the exploit no longer works. Since the `vault` authority is +the `pool` PDA derived using the intended `withdraw_destination` token account, +there should no longer be a way to withdraw to an account other than the +intended `withdraw_destination`. + +Add a test that shows you can't call `withdraw_secure` with the wrong withdrawal +destination. It can use the pool and vault created in the previous test. + +```typescript +it("prevents secure withdrawal to incorrect destination", async () => { + try { + await program.methods + .withdrawSecure() + .accounts({ + vault: recommendedVault.publicKey, + withdrawDestination: fakeWithdrawDestination, + }) + .signers([recommendedVault]) + .rpc(); + + throw new Error("Expected an error but withdrawal succeeded"); + } catch (error) { + expect(error).to.exist; + console.log("Error message:", error.message); + } +}); +``` + +Lastly, since the `pool` account is a PDA derived using the +`withdraw_destination` token account, we can't create a fake `pool` account +using the same PDA. Add one more test showing that the new +`initialize_pool_secure` instruction handler won't let an attacker put in the +wrong vault. + +```typescript +it("prevents secure pool initialization with incorrect vault", async () => { + try { + await program.methods + .initializePoolSecure() + .accounts({ + mint: tokenMint, + vault: insecureVault.address, + withdrawDestination: withdrawDestination, + }) + .signers([recommendedVault]) + .rpc(); + + throw new Error("Expected an error but initialization succeeded"); + } catch (error) { + expect(error).to.exist; + console.log("Error message:", error.message); + } +}); +``` + +Run `anchor test` to see that the new instruction handlers don't allow an +attacker to withdraw from a vault that isn't theirs. + +```bash + PDA sharing + ✔ allows insecure initialization with incorrect vault (852ms) + ✔ allows insecure withdrawal to incorrect destination (425ms) + ✔ performs secure pool initialization and withdrawal correctly (2150ms) +Error message: unknown signer: BpaG3NbsvLUqyFLZo9kWPwda3iPM8abJYkBfwBsASsgi + ✔ prevents secure withdrawal to incorrect destination +Error message: unknown signer: BpaG3NbsvLUqyFLZo9kWPwda3iPM8abJYkBfwBsASsgi + ✔ prevents secure pool initialization with incorrect vault +``` + +And that's it! Unlike some of the other security vulnerabilities we've +discussed, this one is more conceptual and can't be fixed by simply using a +particular Anchor type. You'll need to think through the architecture of your +program and ensure that you aren't sharing PDAs across different domains. + +If you want to take a look at the final solution code you can find it on the +[`solution` branch of the same repository](https://github.com/solana-developers/pda-sharing/tree/solution). + +## Challenge + +Just as with other lessons in this unit, your opportunity to practice avoiding +this security exploit lies in auditing your own or other programs. + +Take some time to review at least one program and look for potential +vulnerabilities in its PDA structure. PDAs used for signing should be narrow and +focused on a single domain as much as possible. + +Remember, if you find a bug or exploit in somebody else's program, please alert +them! If you find one in your own program, be sure to patch it right away. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=5744079f-9473-4485-9a14-9be4d31b40d1)! + + diff --git a/content/courses/program-security/reinitialization-attacks.mdx b/content/courses/program-security/reinitialization-attacks.mdx new file mode 100644 index 000000000..db445169f --- /dev/null +++ b/content/courses/program-security/reinitialization-attacks.mdx @@ -0,0 +1,525 @@ +--- +title: Reinitialization Attacks +objectives: + - Explain security risks associated with a reinitialization vulnerability + - Using Anchor's `init` constraint to initialize accounts, which automatically + sets an account discriminator that is checked to prevent the + reinitialization of an account + - Use native Rust to check if an account has already been initialized +description: + "Understand the security risks of account reinitialized attacks being used to + override data, and how to prevent them." +--- + +## Summary + +- **Prevent Account Reinitialization:** Use an account discriminator or + initialization flag to prevent an account from being reinitialized and + overwriting existing data. +- **Anchor Approach:** Simplify this by using Anchor's `init` constraint to + create an account via a CPI to the system program, automatically setting its + discriminator. +- **Native Rust Approach:** In native Rust, set an is_initialized flag during + account initialization and check it before reinitializing: + + ```rust + if account.is_initialized { + return Err(ProgramError::AccountAlreadyInitialized.into()); + } + ``` + +## Lesson + +Initialization sets the data of a new account for the first time. It's essential +to check if an account has already been initialized to prevent overwriting +existing data. Note that creating and initializing an account are separate +actions. Creating an account involves invoking the `create_account` instruction +handler on the System Program, which allocates space, rent in lamports, and +assigns the program owner. Initialization sets the account data. These steps can +be combined into a single transaction. + +### Missing Initialization Check + +In the example below, there's no check on the `user` account. The `initialize` +instruction handler sets the `authority` field on the `User` account type and +serializes the data. Without checks, an attacker could reinitialize the account, +overwriting the existing `authority`. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod initialization_insecure { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + ctx.accounts.user.authority = ctx.accounts.authority.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(mut)] + pub user: Account<'info, User>, + #[account(mut)] + pub authority: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +#[derive(InitSpace)] +pub struct User { + pub authority: Pubkey, +} +``` + +### Add is_initialized Check + +To fix this, add an `is_initialized` field to the User account type and check it +before reinitializing: + +```rust +if user.is_initialized { + return Err(ProgramError::AccountAlreadyInitialized.into()); +} +``` + +This ensures the `user` account is only initialized once. If `is_initialized` is +true, the transaction fails, preventing an attacker from changing the account +authority. + +```rust +use anchor_lang::prelude::*; +use anchor_lang::solana_program::program_error::ProgramError; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod initialization_secure { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + let user = &mut ctx.accounts.user; + + if user.is_initialized { + return Err(ProgramError::AccountAlreadyInitialized.into()); + } + + user.is_initialized = true; + user.authority = ctx.accounts.authority.key(); + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(mut)] + pub user: Account<'info, User>, + #[account(mut)] + pub authority: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +#[derive(InitSpace)] +pub struct User { + pub is_initialized: bool, + pub authority: Pubkey, +} +``` + +### Use Anchor's init Constraint + +[Anchor's `init` constraint](https://www.anchor-lang.com/docs/account-constraints), +used with the `#[account(...)]` attribute, initializes an account, sets the +account discriminator, and ensures that the instruction handler can only be +called once per account. The `init` constraint must be used with `payer` and +`space` constraints to specify the account paying for initialization and the +amount of space required. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod initialization_recommended { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + msg!("GM"); + ctx.accounts.user.authority = ctx.accounts.authority.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account( + init, + payer = authority, + space = DISCRIMINATOR_SIZE + User::INIT_SPACE + )] + pub user: Account<'info, User>, + #[account(mut)] + pub authority: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +#[derive(InitSpace)] +pub struct User { + pub authority: Pubkey, +} +``` + +#### Anchor's init_if_needed Constraint + + + +[Anchor's `init_if_needed` constraint](https://www.anchor-lang.com/docs/account-constraints), +guarded by a feature flag, should be used with caution.It initializes an account +only if it hasn't been initialized yet. If the account is already initialized, +the instruction handler will still execute, so it's **extremely** important to +include checks in your instruction handler to prevent resetting the account to +its initial state. + + + +For example, if the `authority` field is set in the instruction handler, ensure +that your instruction handler includes checks to prevent an attacker from +reinitializing it after it's already been set. Typically, it's safer to have a +separate instruction handler for initializing account data. + +## Lab + +In this lab, we'll create a simple Solana program with two instruction handlers: + +- `insecure_initialization` - Initializes an account without checks, allowing + reinitialization. +- `recommended_initialization` - Initializes an account using Anchor's `init` + constraint, preventing reinitialization. + +### 1. Starter + +To get started, download the starter code from the +[`starter` branch of this repository](https://github.com/solana-developers/reinitialization-attacks/tree/starter). +The starter code includes a program with one instruction handler and the +boilerplate setup for the test file. + +The `insecure_initialization` instruction handler initializes a new `user` +account that stores the public key of an `authority`. The account is expected to +be allocated client-side and then passed into the program instruction. However, +there are no checks to verify if the `user` account's initial state has already +been set. This means the same account can be passed in a second time, allowing +the `authority` to be overwritten. + +```rust +use anchor_lang::prelude::*; + +declare_id!("HLhxJzFYjtXCET4HxnSzv27SpXg16FWNDi2LvrNmSvzH"); + +#[program] +pub mod initialization { + use super::*; + + pub fn insecure_initialization(ctx: Context) -> Result<()> { + let user = &mut ctx.accounts.user; + let mut user_data = User::try_from_slice(&user.data.borrow())?; + user_data.authority = ctx.accounts.authority.key(); + user_data.serialize(&mut *user.data.borrow_mut())?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Unchecked<'info> { + #[account(mut)] + /// CHECK: This account will be initialized in the instruction + pub user: UncheckedAccount<'info>, + pub authority: Signer<'info>, +} + +#[account] +#[derive(InitSpace)] +pub struct User { + pub authority: Pubkey, +} +``` + +### 2. Test insecure_initialization Instruction Handler + +The test file includes the setup to create an account by invoking the system +program and then invokes the `insecure_initialization` instruction handler twice +using the same account. + +Since there are no checks in the `insecure_initialization` instruction handler +to verify that the account data has not already been initialized, this +instruction handler will execute successfully both times, even with a +_different_ authority account. + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { Initialization } from "../target/types/initialization"; +import { + Keypair, + LAMPORTS_PER_SOL, + SystemProgram, + Transaction, + SendTransactionError, +} from "@solana/web3.js"; +import { expect } from "chai"; +import { airdropIfRequired } from "@solana-developers/helpers"; + +describe("Initialization", () => { + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + + const program = anchor.workspace.Initialization as Program; + + const walletAuthority = provider.wallet as anchor.Wallet; + const secondWallet = Keypair.generate(); + + const insecureUserAccount = Keypair.generate(); + const recommendedUserAccount = Keypair.generate(); + + const ACCOUNT_SPACE = 32; + const AIRDROP_AMOUNT = 1 * LAMPORTS_PER_SOL; + const MINIMUM_BALANCE_FOR_RENT_EXEMPTION = 1 * LAMPORTS_PER_SOL; + + before(async () => { + try { + const rentExemptionAmount = + await provider.connection.getMinimumBalanceForRentExemption( + ACCOUNT_SPACE, + ); + + const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: walletAuthority.publicKey, + newAccountPubkey: insecureUserAccount.publicKey, + space: ACCOUNT_SPACE, + lamports: rentExemptionAmount, + programId: program.programId, + }); + + const transaction = new Transaction().add(createAccountInstruction); + + await anchor.web3.sendAndConfirmTransaction( + provider.connection, + transaction, + [walletAuthority.payer, insecureUserAccount], + ); + + await airdropIfRequired( + provider.connection, + secondWallet.publicKey, + AIRDROP_AMOUNT, + MINIMUM_BALANCE_FOR_RENT_EXEMPTION, + ); + } catch (error) { + console.error("Setup failed:", error); + throw error; + } + }); + + it("performs insecure initialization", async () => { + try { + await program.methods + .insecureInitialization() + .accounts({ + user: insecureUserAccount.publicKey, + authority: walletAuthority.publicKey, + }) + .signers([walletAuthority.payer]) + .rpc(); + } catch (error) { + console.error("Insecure initialization failed:", error); + throw error; + } + }); + + it("re-invokes insecure initialization with different authority", async () => { + try { + const transaction = await program.methods + .insecureInitialization() + .accounts({ + user: insecureUserAccount.publicKey, + authority: secondWallet.publicKey, + }) + .signers([secondWallet]) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction( + provider.connection, + transaction, + [secondWallet], + ); + } catch (error) { + console.error("Re-invocation of insecure initialization failed:", error); + throw error; + } + }); +}); +``` + +Run `anchor test` to verify that the `insecure_initialization` instruction +handler executes successfully in both invocations. + +```bash +Initialization + ✔ performs insecure initialization (420ms) + ✔ re-invokes insecure initialization with different authority (419ms) +``` + +### 3. Add recommended_initialization Instruction Handler + +Now, let's create a new instruction handler called `recommended_initialization` +that addresses the issue. Unlike the insecure instruction handler, this one will +handle both the creation and initialization of the user's account using Anchor's +`init` constraint. + +This constraint ensures the account is created via a CPI to the system program, +and the discriminator is set. This way, any subsequent invocation with the same +user account will fail, preventing reinitialization. + +```rust +use anchor_lang::prelude::*; +use borsh::{BorshDeserialize, BorshSerialize}; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod initialization { + use super::*; + ... + pub fn recommended_initialization(ctx: Context) -> Result<()> { + ctx.accounts.user.authority = ctx.accounts.authority.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Checked<'info> { + #[account( + init, + payer = authority, + space = DISCRIMINATOR_SIZE + User::INIT_SPACE + )] + user: Account<'info, User>, + #[account(mut)] + authority: Signer<'info>, + system_program: Program<'info, System>, +} + +#[account] +#[derive(InitSpace)] +pub struct User { + pub authority: Pubkey, +} +``` + +### 4. Test recommended_initialization Instruction Handler + +To test the `recommended_initialization` instruction handler, invoke it twice as +before. This time, the transaction should fail when attempting to initialize the +same account a second time. + +```typescript +describe("Initialization", () => { + ... + it("performs recommended initialization", async () => { + try { + await program.methods + .recommendedInitialization() + .accounts({ + user: recommendedUserAccount.publicKey, + }) + .signers([recommendedUserAccount]) + .rpc(); + } catch (error) { + console.error("Recommended initialization failed:", error); + throw error; + } + }); + + it("fails to re-invoke recommended initialization with different authority", async () => { + try { + const transaction = await program.methods + .recommendedInitialization() + .accounts({ + user: recommendedUserAccount.publicKey, + authority: secondWallet.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction( + provider.connection, + transaction, + [secondWallet, recommendedUserAccount], + { commitment: "confirmed" } + ); + + throw new Error("Re-invocation succeeded unexpectedly"); + } catch (error) { + if (error.message === "Re-invocation succeeded unexpectedly") { + throw error; + } + + if (error instanceof SendTransactionError) { + console.log("Transaction failed as expected"); + } else { + console.error("Unexpected error:", error); + } + console.log(error) + expect(error).to.exist; + } + }); +}); +``` + +Run `anchor test` to confirm that the second transaction fails with an error +indicating the account is already in use. + +```bash +'Program HLhxJzFYjtXCET4HxnSzv27SpXg16FWNDi2LvrNmSvzH invoke [1]', +'Program log: Instruction: RecommendedInitialization', +'Program 11111111111111111111111111111111 invoke [2]', +'Allocate: account Address { address: FcW7tG71GKuRgxEbgFuuNQNV3HVSMmVyKATo74iCK4yi, base: None } already in use', +'Program 11111111111111111111111111111111 failed: custom program error: 0x0', +'Program HLhxJzFYjtXCET4HxnSzv27SpXg16FWNDi2LvrNmSvzH consumed 3330 of 200000 compute units', +'Program HLhxJzFYjtXCET4HxnSzv27SpXg16FWNDi2LvrNmSvzH failed: custom program error: 0x0' +``` + +Using Anchor's `init` constraint is usually sufficient to protect against +reinitialization attacks. While the fix for these security exploits is +straightforward, it is crucial. Every time you initialize an account, ensure +that you're either using the `init` constraint or implementing another check to +prevent resetting an existing account's initial state. + +For the final solution code, refer to the +[`solution` branch of this repository](https://github.com/solana-developers/reinitialization-attacks/tree/solution). + +## Challenge + +Your challenge is to audit your own or other programs to practice avoiding this +security exploit. + +Take some time to review at least one program and confirm that instruction +handlers are adequately protected against reinitialization attacks. + +If you find a bug or exploit in another program, alert the developer. If you +find one in your own program, patch it immediately. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=652c68aa-18d9-464c-9522-e531fd8738d5)! + + diff --git a/content/courses/program-security/security-intro.mdx b/content/courses/program-security/security-intro.mdx new file mode 100644 index 000000000..604b0f5bf --- /dev/null +++ b/content/courses/program-security/security-intro.mdx @@ -0,0 +1,49 @@ +--- +title: How to Approach the Program Security Course +objectives: + - Understand how to approach the Program Security Course +description: + "Learn how to think intelligently about security for your onchain programs, + whether developing in Anchor or Native Rust." +--- + +## Overview + +This course aims to introduce you to a range of common security exploits unique +to Solana development. We've modeled this course heavily on Coral's +[Sealevel Attacks](https://github.com/coral-xyz/sealevel-attacks) repository. + +Program security is covered in our +[Anchor](/developers/courses/onchain-development) and +[Native Rust](/developers/courses/native-onchain-development) development +courses to ensure that anyone deploying programs to Mainnet has at least a basic +understanding of security. Those courses should help you avoid some common +Solana exploits on your own. + +This course builds on those courses with two main goals: + +1. Expand your awareness of the Solana programming model and highlight areas + where you need to focus to close security loopholes. +2. Introduce you to the tools provided by Anchor to help keep your programs + secure, and show native Rust users how to implement similar techniques on + their own. + +While the first few lessons in this course cover topics similar to those in the +[Anchor course](/developers/courses/onchain-development/intro-to-anchor) or +[Program Security lesson](/developers/courses/native-onchain-development/program-security) +in the [Native Course](/developers/courses/native-onchain-development), but as +you progress, you'll encounter new types of attacks. We encourage you to explore +all of them. + + + +Unlike the lessons in other courses, which are in order, you are welcome to +explore these lessons in whatever order suits you best. + + + +Even though each security vulnerability may seem "simple," there's a lot to +discuss. These lessons contain less prose and more code, ensuring you gain a +solid understanding of the security risks discussed. + +As always, your feedback is appreciated. Good luck as you delve into the course! diff --git a/content/courses/program-security/signer-auth.mdx b/content/courses/program-security/signer-auth.mdx new file mode 100644 index 000000000..13df877c3 --- /dev/null +++ b/content/courses/program-security/signer-auth.mdx @@ -0,0 +1,611 @@ +--- +title: Signer Authorization +objectives: + - Explain the security risks of not performing appropriate signer checks. + - Implement signer checks using native Rust + - Implement signer checks using Anchor's `Signer` type + - Implement signer checks using Anchor's `#[account(signer)]` constraint +description: + "Ensure instructions are only executed by authorized accounts by implementing + signer checks." +--- + +## Summary + +- **Signer Checks** are essential to verify that specific accounts have signed a + transaction. Without proper signer checks, unauthorized accounts may execute + instructions they shouldn't be allowed to perform. +- In Anchor, you can use the `Signer` account type in your account validation + struct to automatically perform a signer check on a given account. +- Anchor also provides the + [`#[account(signer)]`](https://www.anchor-lang.com/docs/account-constraints) + constraint, which automatically verifies that a specified account has signed + the transaction. +- In native Rust, implement a signer check by verifying that an account's + `is_signer` property is `true`: + + ```rust + if !ctx.accounts.authority.is_signer { + return Err(ProgramError::MissingRequiredSignature.into()); + } + ``` + +## Lesson + +**Signer checks** ensure that only authorized accounts can execute specific +instructions. Without these checks, any account might perform operations that +should be restricted, potentially leading to severe security vulnerabilities, +such as unauthorized access and control over program accounts. + +### Missing Signer Check + +Below is an oversimplified instruction handler that updates the `authority` +field on a program account. Notice that the `authority` field in the +`UpdateAuthority` account validation struct is of type `UncheckedAccount`. In +Anchor, the +[`UncheckedAccount`](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/unchecked_account/struct.UncheckedAccount.html) +type indicates that no checks are performed on the account before executing the +instruction handler. + +Although the `has_one` constraint ensures that the `authority` account passed to +the instruction handler matches the `authority` field on the `vault` account, +there is no verification that the `authority` account actually authorized the +transaction. + +This omission allows an attacker to pass in the `authority` account's public key +and their own public key as the `new_authority` account, effectively reassigning +themselves as the new authority of the `vault` account. Once they have control, +they can interact with the program as the new authority. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod insecure_update{ + use super::*; + ... + pub fn update_authority(ctx: Context) -> Result<()> { + ctx.accounts.vault.authority = ctx.accounts.new_authority.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct UpdateAuthority<'info> { + #[account( + mut, + has_one = authority + )] + pub vault: Account<'info, Vault>, + /// CHECK: This account will not be checked by Anchor + pub new_authority: UncheckedAccount<'info>, + /// CHECK: This account will not be checked by Anchor + pub authority: UncheckedAccount<'info>, +} + +#[account] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +``` + +### Adding Signer Authorization Checks + +To validate that the `authority` account signed the transaction, add a signer +check within the instruction handler: + +```rust +if !ctx.accounts.authority.is_signer { + return Err(ProgramError::MissingRequiredSignature.into()); +} +``` + +By adding this check, the instruction handler will only proceed if the +`authority` account has signed the transaction. If the account is not signed, +the transaction will fail. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod secure_update{ + use super::*; + ... + pub fn update_authority(ctx: Context) -> Result<()> { + if !ctx.accounts.authority.is_signer { + return Err(ProgramError::MissingRequiredSignature.into()); + } + + ctx.accounts.vault.authority = ctx.accounts.new_authority.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct UpdateAuthority<'info> { + #[account( + mut, + has_one = authority + )] + pub vault: Account<'info, Vault>, + /// CHECK: This account will not be checked by Anchor + pub new_authority: UncheckedAccount<'info>, + /// CHECK: This account will not be checked by Anchor + pub authority: UncheckedAccount<'info>, +} + +#[account] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +``` + +### Use Anchor's Signer Account Type + +Incorporating the +[`signer`](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/signer/struct.Signer.html) +check directly within the instruction handler logic can blur the separation +between account validation and instruction handler execution. To maintain this +separation, use Anchor's `Signer` account type. By changing the `authority` +account's type to `Signer` in the validation struct, Anchor automatically checks +at runtime that the specified account signed the transaction. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod secure_update{ + use super::*; + ... + pub fn update_authority(ctx: Context) -> Result<()> { + ctx.accounts.vault.authority = ctx.accounts.new_authority.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct UpdateAuthority<'info> { + #[account( + mut, + has_one = authority + )] + pub vault: Account<'info, Vault>, + /// CHECK: This account will not be checked by Anchor + pub new_authority: UncheckedAccount<'info>, + pub authority: Signer<'info>, +} + +#[account] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +``` + + + +When you use the `Signer` type, no other ownership or type checks are +performed. + + + +### Using Anchor's `#[account(signer)]` Constraint + +While the `Signer` account type is useful, it doesn't perform other ownership or +type checks, limiting its use in instruction handler logic. +[Anchor's `#[account(signer)]`](https://www.anchor-lang.com/docs/account-constraints) +constraint addresses this by verifying that the account signed the transaction +while allowing access to its underlying data. + +For example, if you expect an account to be both a signer and a data source, +using the `Signer` type would require manual deserialization, and you wouldn't +benefit from automatic ownership and type checking. Instead, the +`#[account(signer)]` constraint allows you to access the data and ensure the +account signed the transaction. + +In this example, you can safely interact with the data stored in the `authority` +account while ensuring that it signed the transaction. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod secure_update{ + use super::*; + ... + pub fn update_authority(ctx: Context) -> Result<()> { + ctx.accounts.vault.authority = ctx.accounts.new_authority.key(); + + // access the data stored in authority + msg!("Total number of depositors: {}", ctx.accounts.authority.num_depositors); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct UpdateAuthority<'info> { + #[account( + mut, + has_one = authority + )] + pub vault: Account<'info, Vault>, + /// CHECK: This account will not be checked by Anchor + pub new_authority: UncheckedAccount<'info>, + #[account(signer)] + pub authority: Account<'info, AuthState> +} + +#[account] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +#[account] +pub struct AuthState{ + amount: u64, + num_depositors: u64, + num_vaults: u64 +} +``` + +## Lab + +In this lab, we'll create a simple program to demonstrate how a missing signer +check can allow an attacker to withdraw tokens that don't belong to them. This +program initializes a simplified token `vault` account and shows how the absence +of a signer check could result in the vault being drained. + +### 1. Starter + +To get started, download the starter code from the +[`starter` branch of this repository](https://github.com/solana-developers/signer-auth/tree/starter). +The starter code includes a program with two instruction handlers and the +boilerplate setup for the test file. + +The `initialize_vault` instruction handler sets up two new accounts: `Vault` and +`TokenAccount`. The `Vault` account is initialized using a Program Derived +Address (PDA) and stores the address of a token account and the vault's +authority. The `vault` PDA will be the authority of the token account, enabling +the program to sign off on token transfers. + +The `insecure_withdraw` instruction handler transfers tokens from the `vault` +account's token account to a `withdraw_destination` token account. However, the +`authority` account in the `InsecureWithdraw` struct is of type +`UncheckedAccount`, a wrapper around `AccountInfo` that explicitly indicates the +account is unchecked. + +Without a signer check, anyone can provide the public key of the `authority` +account that matches the `authority` stored on the `vault` account, and the +`insecure_withdraw` instruction handler will continue processing. + +Although this example is somewhat contrived, as any DeFi program with a vault +would be more sophisticated, it effectively illustrates how the lack of a signer +check can lead to unauthorized token withdrawals. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Mint, Token, TokenAccount}; + +declare_id!("FeKh59XMh6BcN6UdekHnaFHsNH9NVE121GgDzSyYPKKS"); + +pub const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod signer_authorization { + use super::*; + + pub fn initialize_vault(ctx: Context) -> Result<()> { + ctx.accounts.vault.token_account = ctx.accounts.token_account.key(); + ctx.accounts.vault.authority = ctx.accounts.authority.key(); + Ok(()) + } + + pub fn insecure_withdraw(ctx: Context) -> Result<()> { + let amount = ctx.accounts.token_account.amount; + + let seeds = &[b"vault".as_ref(), &[ctx.bumps.vault]]; + let signer = [&seeds[..]]; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.vault.to_account_info(), + to: ctx.accounts.withdraw_destination.to_account_info(), + }, + &signer, + ); + + token::transfer(cpi_ctx, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InitializeVault<'info> { + #[account( + init, + payer = authority, + space = DISCRIMINATOR_SIZE + Vault::INIT_SPACE, + seeds = [b"vault"], + bump + )] + pub vault: Account<'info, Vault>, + #[account( + init, + payer = authority, + token::mint = mint, + token::authority = vault, + )] + pub token_account: Account<'info, TokenAccount>, + pub mint: Account<'info, Mint>, + #[account(mut)] + pub authority: Signer<'info>, + pub token_program: Program<'info, Token>, + pub system_program: Program<'info, System>, + pub rent: Sysvar<'info, Rent>, +} + +#[derive(Accounts)] +pub struct InsecureWithdraw<'info> { + #[account( + seeds = [b"vault"], + bump, + has_one = token_account, + has_one = authority + )] + pub vault: Account<'info, Vault>, + #[account(mut)] + pub token_account: Account<'info, TokenAccount>, + #[account(mut)] + pub withdraw_destination: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, + /// CHECK: demo missing signer check + pub authority: UncheckedAccount<'info>, +} + +#[account] +#[derive(Default, InitSpace)] +pub struct Vault { + token_account: Pubkey, + authority: Pubkey, +} +``` + +### 2. Test insecure_withdraw Instruction Handler + +The test file includes code to invoke the `initialize_vault` instruction +handler, using `walletAuthority` as the `authority` on the vault. The code then +mints 100 tokens to the `vaultTokenAccount` token account. Ideally, only the +`walletAuthority` key should be able to withdraw these 100 tokens from the +vault. + +Next, we'll add a test to invoke `insecure_withdraw` on the program to +demonstrate that the current version allows a third party to withdraw those 100 +tokens. + +In the test, we'll use the `walletAuthority` public key as the `authority` +account but sign and send the transaction with a different keypair. + +```typescript +describe("Signer Authorization", () => { + ... + it("performs insecure withdraw", async () => { + try { + const transaction = await program.methods + .insecureWithdraw() + .accounts({ + vault: vaultPDA, + tokenAccount: vaultTokenAccount.publicKey, + withdrawDestination: unauthorizedWithdrawDestination, + authority: walletAuthority.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(connection, transaction, [ + unauthorizedWallet, + ]); + + const tokenAccountInfo = await getAccount( + connection, + vaultTokenAccount.publicKey + ); + expect(Number(tokenAccountInfo.amount)).to.equal(0); + } catch (error) { + console.error("Insecure withdraw failed:", error); + throw error; + } + }); +}) +``` + +Run `anchor test` to confirm that both transactions will be completed +successfully. + +```bash +Signer Authorization + ✔ initializes vault and mints tokens (882ms) + ✔ performs insecure withdraw (435ms) +``` + +The `insecure_withdraw` instruction handler demonstrates a security +vulnerability. Since there is no signer check for the `authority` account, this +handler will transfer tokens from the `vaultTokenAccount` to the +`unauthorizedWithdrawDestination`, as long as the public key of the `authority` +account matches the `walletAuthority.publicKey` stored in the `vault` account's +`authority` field. + +In the test, we use the `unauthorizedWallet` to sign the transaction, while +still specifying the `walletAuthority.publicKey` as the authority in the +instruction accounts. This mismatch between the signer and the specified +`authority` would normally cause a transaction to fail. However, due to the lack +of a proper signer check in the `insecure_withdraw` handler, the transaction +succeeds. + +### 3. Add secure_withdraw Instruction Handler + +To fix this issue, we'll create a new instruction handler called +`secure_withdraw`. This instruction handler will be identical to +`insecure_withdraw`, but we'll use the `Signer` type in the Accounts struct to +validate the authority account in the `SecureWithdraw` struct. If the +`authority` account isn't a signer on the transaction, the transaction should +fail with an error. + +```rust +use anchor_lang::prelude::*; +use anchor_spl::token::{self, Mint, Token, TokenAccount}; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod signer_authorization { + use super::*; + ... + pub fn secure_withdraw(ctx: Context) -> Result<()> { + let amount = ctx.accounts.token_account.amount; + + let seeds = &[b"vault".as_ref(), &[ctx.bumps.vault]]; + let signer = [&seeds[..]]; + + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::Transfer { + from: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.vault.to_account_info(), + to: ctx.accounts.withdraw_destination.to_account_info(), + }, + &signer, + ); + + token::transfer(cpi_ctx, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct SecureWithdraw<'info> { + #[account( + seeds = [b"vault"], + bump, + has_one = token_account, + has_one = authority + )] + pub vault: Account<'info, Vault>, + #[account(mut)] + pub token_account: Account<'info, TokenAccount>, + #[account(mut)] + pub withdraw_destination: Account<'info, TokenAccount>, + pub token_program: Program<'info, Token>, + pub authority: Signer<'info>, +} +``` + +### 4. Test secure_withdraw Instruction Handler + +With the new instruction handler in place, return to the test file to test the +`secureWithdraw` instruction handler. Invoke the `secureWithdraw` instruction +handler, using the `walletAuthority.publicKey` as the `authority` account, and +use the `unauthorizedWallet` keypair as the signer. Set the +`unauthorizedWithdrawDestination` as the withdraw destination. + +Since the `authority` account is validated using the `Signer` type, the +transaction should fail with a signature verification error. This is because the +`unauthorizedWallet` is attempting to sign the transaction, but it doesn't match +the `authority` specified in the instruction (which is +`walletAuthority.publicKey`). + +The test expects this transaction to fail, demonstrating that the secure +withdraw function properly validates the signer. If the transaction unexpectedly +succeeds, the test will throw an error indicating that the expected security +check did not occur. + +```typescript +describe("Signer Authorization", () => { + ... + it("fails to perform secure withdraw with incorrect signer", async () => { + try { + const transaction = await program.methods + .secureWithdraw() + .accounts({ + vault: vaultPDA, + tokenAccount: vaultTokenAccount.publicKey, + withdrawDestination: unauthorizedWithdrawDestination, + authority: walletAuthority.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(connection, transaction, [ + unauthorizedWallet, + ]); + throw new Error("Expected transaction to fail, but it succeeded"); + } catch (error) { + expect(error).to.be.an("error"); + console.log("Error message:", error.message); + } + }); +}) +``` + +Run `anchor test` to see that the transaction now returns a signature +verification error. + +```bash +signer-authorization +Error message: Signature verification failed. +Missing signature for public key [`GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM`]. + ✔ fails to perform secure withdraw with incorrect signer +``` + +This example shows how important it is to think through who should authorize +instructions and ensure that each is a signer on the transaction. + +To review the final solution code, you can find it on the +[`solution` branch of the repository](https://github.com/solana-developers/signer-auth/tree/solution). + +## Challenge + +Now that you've worked through the labs and challenges in this course, it's time +to apply your knowledge in a practical setting. For this challenge and those +that follow on security vulnerabilities, audit your own programs for the +specific vulnerability discussed in each lesson. + +### Steps + +1. **Audit Your Program or Find an Open Source Project**: + + - Begin by auditing your own code for missing signer checks, or find an open + source Solana program to audit. A great place to start is with the + [program examples](https://github.com/solana-developers/program-examples) + repository. + +2. **Look for Signer Check Issues**: + + - Focus on instruction handlers where signer authorization is crucial, + especially those that transfer tokens or modify sensitive account data. + - Review the program for any `UncheckedAccount` types where signer validation + should be enforced. + - Ensure that any accounts that should require user authorization are defined + as `Signer` in the instruction handler. + +3. **Patch or Report**: + - If you find a bug in your own code, fix it by using the `Signer` type for + accounts that require signer validation. + - If the issue exists in an open source project, notify the project + maintainers or submit a pull request. + + + +After completing the challenge, push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=26b3f41e-8241-416b-9cfa-05c5ab519d80)! + + diff --git a/content/courses/program-security/type-cosplay.mdx b/content/courses/program-security/type-cosplay.mdx new file mode 100644 index 000000000..c64ed9994 --- /dev/null +++ b/content/courses/program-security/type-cosplay.mdx @@ -0,0 +1,582 @@ +--- +title: Type Cosplay +objectives: + - Explain the security risks associated with not checking account types + - Implement an account type discriminator using native Rust + - Use Anchor's `init` constraint to initialize accounts + - Use Anchor's `Account` type for account validation +description: + "Understand the risks of using incorrect account types in instructions and how + to mitigate them with account type checks." +--- + +## Summary + +- **Discriminators** are 8-byte identifiers written to accounts that distinguish + between different account types, ensuring programs interact with the correct + data. +- **Implement a discriminator** in Rust by including a field in the account + struct to represent the account type. + + ```rust + #[derive(BorshSerialize, BorshDeserialize)] + pub struct User { + discriminant: AccountDiscriminant, + user: Pubkey, + } + + #[derive(BorshSerialize, BorshDeserialize, PartialEq)] + pub enum AccountDiscriminant { + User, + Admin, + } + ``` + +- **Check the discriminator** in Rust to verify that the deserialized account + data matches the expected value. + + ```rust + if user.discriminant != AccountDiscriminant::User { + return Err(ProgramError::InvalidAccountData.into()); + } + ``` + +- **In Anchor**, program account types automatically implement the + `Discriminator` trait, which creates an 8-byte unique identifier for a type. +- Use Anchor's `Account<'info, T>` type to automatically check the discriminator + when deserializing the account data. + +## Lesson + +"Type cosplay" refers to using an unexpected account type in place of an +expected one. Under the hood, account data is stored as an array of bytes that a +program deserializes into a custom account type. Without a method to distinguish +between account types explicitly, data from an unexpected account could result +in instructions being used in unintended ways. + +### Unchecked Account + +In the example below, both the `AdminConfig` and `UserConfig` account types +store a single public key. The `admin_instruction` deserializes the +`admin_config` account as an `AdminConfig` type and then performs an owner check +and data validation check. + +However, since the `AdminConfig` and `UserConfig` account types have the same +data structure, a `UserConfig` account type could be passed as the +`admin_config` account. As long as the public key stored on the account matches +the `admin` signing the transaction, the `admin_instruction` would process, even +if the signer isn't actually an admin. + +Note that the names of the fields stored on the account types (`admin` and +`user`) make no difference when deserializing account data. The data is +serialized and deserialized based on the order of fields rather than their +names. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod type_cosplay_insecure { + use super::*; + + pub fn admin_instruction(ctx: Context) -> Result<()> { + let account_data = + AdminConfig::try_from_slice(&ctx.accounts.admin_config.data.borrow()).unwrap(); + if ctx.accounts.admin_config.owner != ctx.program_id { + return Err(ProgramError::IllegalOwner.into()); + } + if account_data.admin != ctx.accounts.admin.key() { + return Err(ProgramError::InvalidAccountData.into()); + } + msg!("Admin {}", account_data.admin); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct AdminInstruction<'info> { + /// CHECK: This account is not being validated by Anchor + admin_config: UncheckedAccount<'info>, + admin: Signer<'info>, +} + +#[derive(AnchorSerialize, AnchorDeserialize, InitSpace)] +pub struct AdminConfig { + admin: Pubkey, +} + +#[derive(AnchorSerialize, AnchorDeserialize, InitSpace)] +pub struct UserConfig { + user: Pubkey, +} +``` + +#### Add Account Discriminator + +To resolve this, add a discriminant field for each account type and set the +discriminant when initializing an account. + + + +While they sound similar, a +[Rust **discriminant**](https://doc.rust-lang.org/std/mem/fn.discriminant.html) +isn't the same thing as an +[Anchor **discriminator**](https://book.anchor-lang.com/anchor_bts/discriminator.html)! + +- **Rust discriminant**: This is an internal value that Rust uses to keep track + of which variant an enum currently represents. It's like a behind-the-scenes + label for enum variants. + +- **Anchor discriminator**: This is a unique 8-byte identifier that Anchor adds + to the beginning of each account's data. It helps Solana programs quickly + recognize what type of account they're dealing with. + +In simple terms: + +- Discriminants are Rust's way of organizing enum variants. +- Discriminators are Anchor's way of labeling different account types in Solana. + + + + +The example below updates the `AdminConfig` and `UserConfig` account types with +a `discriminant` field. The `admin_instruction` now includes an additional data +validation check for the `discriminant` field. + +```rust +if account_data.discriminant != AccountDiscriminant::Admin { + return Err(ProgramError::InvalidAccountData.into()); +} +``` + +If the `discriminant` field of the account passed into the instruction as the +`admin_config` account does not match the expected `AccountDiscriminant`, the +transaction will fail. Ensure that the appropriate value for `discriminant` is +set when initializing each account, and then include these checks in every +subsequent instruction. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod type_cosplay_secure { + use super::*; + + pub fn admin_instruction(ctx: Context) -> Result<()> { + let account_data = + AdminConfig::try_from_slice(&ctx.accounts.admin_config.data.borrow()).unwrap(); + if ctx.accounts.admin_config.owner != ctx.program_id { + return Err(ProgramError::IllegalOwner.into()); + } + if account_data.admin != ctx.accounts.admin.key() { + return Err(ProgramError::InvalidAccountData.into()); + } + if account_data.discriminant != AccountDiscriminant::Admin { + return Err(ProgramError::InvalidAccountData.into()); + } + msg!("Admin {}", account_data.admin); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct AdminInstruction<'info> { + /// CHECK: This account is not being validated by Anchor + admin_config: UncheckedAccount<'info>, + admin: Signer<'info>, +} + +#[derive(AnchorSerialize, AnchorDeserialize, InitSpace)] +pub struct AdminConfig { + discriminant: AccountDiscriminant, + admin: Pubkey, +} + +#[derive(AnchorSerialize, AnchorDeserialize, InitSpace)] +pub struct UserConfig { + discriminant: AccountDiscriminant, + user: Pubkey, +} + +#[derive(AnchorSerialize, AnchorDeserialize, PartialEq, InitSpace)] +pub enum AccountDiscriminant { + Admin, + User, +} +``` + +### Use Anchor's Account Wrapper + +Implementing these checks for every account in every instruction can be tedious. +Fortunately, Anchor provides a `#[account]` attribute macro for automatically +implementing traits that every account should have. + +Structs marked with `#[account]` can then be used with `Account` to validate +that the passed-in account is indeed the type you expect. When initializing an +account whose struct representation has the `#[account]` attribute, the first 8 +bytes are automatically reserved for a discriminator unique to the account type. +When deserializing the account data, Anchor will automatically check if the +discriminator matches the expected account type and throw an error if it does +not. + +In the example below, `Account<'info, AdminConfig>` specifies that the +`admin_config` account should be of type `AdminConfig`. Anchor then +automatically checks that the first 8 bytes of account data match the +discriminator of the `AdminConfig` type. + +The data validation check for the `admin` field is also moved from the +instruction logic to the account validation struct using the `has_one` +constraint. `#[account(has_one = admin)]` specifies that the `admin_config` +account's `admin` field must match the `admin` account passed into the +instruction. Note that for the `has_one` constraint to work, the naming of the +account in the struct must match the naming of the field on the account you are +validating. + +```rust +use anchor_lang::prelude::*; + +declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); + +#[program] +pub mod type_cosplay_recommended { + use super::*; + + pub fn admin_instruction(ctx: Context) -> Result<()> { + msg!("Admin {}", ctx.accounts.admin_config.admin); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct AdminInstruction<'info> { + #[account(has_one = admin)] + admin_config: Account<'info, AdminConfig>, + admin: Signer<'info>, +} + +#[account] +#[derive(InitSpace)] +pub struct AdminConfig { + admin: Pubkey, +} + +#[account] +#[derive(InitSpace)] +pub struct UserConfig { + user: Pubkey, +} +``` + +This vulnerability is something you generally don't have to worry about when +using Anchor—that's the whole point! However, after exploring how this issue can +arise in native Rust programs, you should now have a better understanding of the +importance of the account discriminator in an Anchor account. Anchor's automatic +discriminator checks mean that developers can focus more on their product, but +it's still crucial to understand what Anchor is doing behind the scenes to build +robust Solana programs. + +## Lab + +In this lab, you'll create two programs to demonstrate a type cosplay +vulnerability: + +- The first program initializes accounts without a discriminator. +- The second program initializes accounts using Anchor's `init` constraint, + which automatically sets an account discriminator. + +### 1. Starter + +To get started, download the starter code from the starter branch of +[this repository](https://github.com/solana-developers/type-cosplay/tree/starter). +The starter code includes a program with three instructions and some tests. + +The three instructions are: + +1. `initialize_admin`- Initializes an admin account and sets the admin authority + of the program. +2. `initialize_user` - Initializes a standard user account. +3. `update_admin` - Allows the existing admin to update the admin authority of + the program. + +Review the instructions in the `lib.rs` file. The last instruction should only +be callable by the account matching the `admin` field on the admin account +initialized using the `initialize_admin` instruction. + +### 2. Test Insecure update_admin Instruction + +Both the `AdminConfig` and `User` account types have the same fields and field +types: + +```rust +#[derive(AnchorSerialize, AnchorDeserialize)] +pub struct AdminConfig { + admin: Pubkey, +} + +#[derive(AnchorSerialize, AnchorDeserialize)] +pub struct User { + user: Pubkey, +} +``` + +Because of this, it's possible to pass a `User` account in place of the `admin` +account in the `update_admin` instruction, bypassing the requirement that only +an admin can call this instruction. + +Take a look at the `solana-type-cosplay.ts` file in the `tests` directory. It +contains a basic setup and two tests: one initializes a user account, and the +other invokes `update_admin` with the user account instead of an admin account. + +Run `anchor test` to see that invoking `update_admin` completes successfully: + +```bash + type-cosplay + ✔ Initialize User Account (223ms) + ✔ Invoke update admin instruction with user account (442ms) +``` + +### 3. Create type-checked Program + +Next, create a new program called `type-checked` by running +`anchor new type-checked` from the root of the existing anchor program. + +Now, in your `programs` folder, you will have two programs. Run +`anchor keys list` to see the program ID for the new program. Add it to the +`lib.rs` file of the `type-checked` program and to the `Anchor.toml` file. + +Update the test file's setup to include the new program and two new keypairs for +the accounts to be initialized: + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { TypeCosplay } from "../target/types/type_cosplay"; +import { TypeChecked } from "../target/types/type_checked"; +import { expect } from "chai"; + +describe("type-cosplay", () => { + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + + const program = anchor.workspace.TypeCosplay as Program; + const programChecked = anchor.workspace.TypeChecked as Program; + + const userAccount = anchor.web3.Keypair.generate(); + const newAdmin = anchor.web3.Keypair.generate(); + + const userAccountChecked = anchor.web3.Keypair.generate(); + const adminAccountChecked = anchor.web3.Keypair.generate(); +}); +``` + +### 4. Implement the type-checked Program + +In the `type_checked` program, add two instructions using the `init` constraint +to initialize an `AdminConfig` account and a `User` account. Anchor will +automatically set the first 8 bytes of account data as a unique discriminator +for the account type. + +Add an `update_admin` instruction that validates the `admin_config` account as +an `AdminConfig` account type using Anchor's `Account` wrapper. Anchor will +automatically check that the account discriminator matches the expected account +type: + +```rust +use anchor_lang::prelude::*; + +declare_id!("G36iNpB591wxFeaeq55qgTwHKJspBrETmgok94oyqgcc"); + +const DISCRIMINATOR_SIZE: usize = 8; + +#[program] +pub mod type_checked { + use super::*; + + pub fn initialize_admin(ctx: Context) -> Result<()> { + ctx.accounts.admin_config.admin = ctx.accounts.admin.key(); + Ok(()) + } + + pub fn initialize_user(ctx: Context) -> Result<()> { + ctx.accounts.user_account.user = ctx.accounts.user.key(); + Ok(()) + } + + pub fn update_admin(ctx: Context) -> Result<()> { + ctx.accounts.admin_config.admin = ctx.accounts.admin.key(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InitializeAdmin<'info> { + #[account( + init, + payer = admin, + space = DISCRIMINATOR_SIZE + AdminConfig::INIT_SPACE + )] + pub admin_config: Account<'info, AdminConfig>, + #[account(mut)] + pub admin: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[derive(Accounts)] +pub struct InitializeUser<'info> { + #[account( + init, + payer = user, + space = DISCRIMINATOR_SIZE + User::INIT_SPACE + )] + pub user_account: Account<'info, User>, + #[account(mut)] + pub user: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[derive(Accounts)] +pub struct UpdateAdmin<'info> { + #[account( + mut, + has_one = admin + )] + pub admin_config: Account<'info, AdminConfig>, + pub new_admin: SystemAccount<'info>, + #[account(mut)] + pub admin: Signer<'info>, +} + +#[account] +#[derive(InitSpace)] +pub struct AdminConfig { + admin: Pubkey, +} + +#[account] +#[derive(InitSpace)] +pub struct User { + user: Pubkey, +} +``` + +### 5. Test Secure update_admin Instruction + +In the test file, initialize an `AdminConfig` account and a `User` account from +the `type_checked` program. Then, invoke the `updateAdmin` instruction twice, +passing in the newly created accounts: + +```typescript +describe("type-cosplay", () => { + ... + + it("Initialize type checked AdminConfig Account", async () => { + try { + await programChecked.methods + .initializeAdmin() + .accounts({ + adminConfig: adminAccountChecked.publicKey, + }) + .signers([adminAccountChecked]) + .rpc(); + } catch (error) { + throw new Error( + `Initializing type checked AdminConfig Account failed: ${error.message}` + ); + } + }); + + it("Initialize type checked User Account", async () => { + try { + await programChecked.methods + .initializeUser() + .accounts({ + userAccount: userAccountChecked.publicKey, + user: provider.wallet.publicKey, + }) + .signers([userAccountChecked]) + .rpc(); + } catch (error) { + throw new Error( + `Initializing type checked User Account failed: ${error.message}` + ); + } + }); + + it("Invoke update instruction using User Account", async () => { + try { + await programChecked.methods + .updateAdmin() + .accounts({ + adminConfig: userAccountChecked.publicKey, + newAdmin: newAdmin.publicKey, + admin: provider.wallet.publicKey, + }) + .rpc(); + } catch (error) { + expect(error); + console.log(error); + } + }); + + it("Invoke update instruction using AdminConfig Account", async () => { + try { + await programChecked.methods + .updateAdmin() + .accounts({ + adminConfig: adminAccountChecked.publicKey, + newAdmin: newAdmin.publicKey, + admin: provider.wallet.publicKey, + }) + .rpc(); + } catch (error) { + throw new Error( + `Invoking update instruction using AdminConfig Account failed: ${error.message}` + ); + } + }); +}) +``` + +Run `anchor test`. For the transaction where we pass in the User account type, +we expect the instruction to return an Anchor Error due to the account not being +of type AdminConfig: + +```bash +'Program G36iNpB591wxFeaeq55qgTwHKJspBrETmgok94oyqgcc invoke [1]', +'Program log: Instruction: UpdateAdmin', +'Program log: AnchorError caused by account: admin_config. Error Code: AccountDiscriminatorMismatch. Error Number: 3002. Error Message: 8 byte discriminator did not match what was expected.', +'Program G36iNpB591wxFeaeq55qgTwHKJspBrETmgok94oyqgcc consumed 3506 of 200000 compute units', +'Program G36iNpB591wxFeaeq55qgTwHKJspBrETmgok94oyqgcc failed: custom program error: 0xbba' +``` + +Following Anchor's best practices ensures that your programs avoid this +vulnerability. Always use the `#[account]` attribute when creating account +structs, use the `init` constraint when initializing accounts, and use the +`Account` type in your account validation structs. + +For the final solution code, you can find it on the `solution` branch of +[the repository](https://github.com/solana-developers/type-cosplay/tree/solution). + +## Challenge + +As with other lessons in this unit, practice avoiding this security exploit by +auditing your own or other programs. + +Review at least one program and ensure that account types have a discriminator +and that these are checked for each account and instruction. Since standard +Anchor types handle this check automatically, you're more likely to find a +vulnerability in a native program. + +Remember, if you find a bug or exploit in somebody else's program, please alert +them. If you find one in your own program, patch it immediately. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=37ebccab-b19a-43c6-a96a-29fa7e80fdec)! + + diff --git a/content/courses/solana-pay/index.mdx b/content/courses/solana-pay/index.mdx new file mode 100644 index 000000000..b52eb8467 --- /dev/null +++ b/content/courses/solana-pay/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Solana Pay +description: Request payments and initiate transactions using links and QR codes. +--- diff --git a/content/courses/solana-pay/meta.json b/content/courses/solana-pay/meta.json new file mode 100644 index 000000000..1535fc608 --- /dev/null +++ b/content/courses/solana-pay/meta.json @@ -0,0 +1,3 @@ +{ + "pages": ["solana-pay"] +} diff --git a/content/courses/solana-pay/solana-pay.mdx b/content/courses/solana-pay/solana-pay.mdx new file mode 100644 index 000000000..26e81231a --- /dev/null +++ b/content/courses/solana-pay/solana-pay.mdx @@ -0,0 +1,853 @@ +--- +title: Solana Pay +objectives: + - Use the Solana Pay specification to build payment requests and initiate + transactions using URLs encoded as QR codes + - Use the `@solana/pay` library to create Solana Pay transaction requests + - Partially sign transactions and implement transaction gating based on + specific conditions +description: + "How to create Solana Pay payment requests using links and QR codes." +--- + +## Summary + +- **Solana Pay** is a specification for encoding Solana transaction requests + within URLs, enabling standardized transaction requests across different + Solana apps and wallets. +- **Partial signing** of transactions allows the creation of transactions that + require multiple signatures before they are submitted to the network. +- **Transaction gating** involves implementing rules that determine whether + certain transactions are allowed to be processed, based on specific conditions + or the presence of particular data in the transaction. + +## Lesson + +The Solana community is continually improving and expanding the network's +functionality. But that doesn't always mean developing brand-new technology. +Sometimes it means leveraging the network's existing features in new and +innovative ways. + +Solana Pay is a great example of this. Rather than adding new functionality to +the network, Solana Pay uses the network's existing signing features in a unique +way to enable merchants and applications to request transactions and build +gating mechanisms for specific transaction types. + +Throughout this lesson, you'll learn how to use Solana Pay to create transfer +and transaction requests, encode these requests as a QR code, partially sign +transactions, and gate transactions based on conditions you choose. Rather than +leaving it at that, we hope you'll see this as an example of leveraging existing +features in new and innovative ways, using it as a launching pad for your own +unique client-side network interactions. + +### Solana Pay + +The [Solana Pay specification](https://docs.solanapay.com/spec) is a set of +standards that allow users to request payments and initiate transactions using +URLs uniformly across various Solana apps and wallets. + +Request URLs are prefixed with `solana:` so that platforms can direct the link +to the appropriate application. For example, on mobile, a URL that starts with +`solana:` will be directed to wallet applications that support the Solana Pay +specification. From there, the wallet can use the remainder of the URL to +appropriately handle the request. + +There are two types of requests defined by the Solana Pay specification: + +1. Transfer Request: used for simple SOL or SPL Token transfers +2. Transaction Request: used to request any type of Solana transaction + +#### Transfer requests + +The transfer request specification describes a non-interactive request for SOL +or SPL token transfer. Transfer request URLs take the following format +`solana:?`. + +The value of `recipient` is required and must be a base58-encoded public key of +the account from which a transfer is being requested. Additionally, the +following optional query parameters are supported: + +- `amount` - a non-negative integer or decimal value indicating the amount of + tokens to transfer +- `spl-token` - a base58-encoded public key of an SPL Token mint account if the + transfer is of an SPL token and not SOL +- `reference` - optional reference values as base58-encoded 32 byte arrays. This + can be used by a client for identifying the transaction onchain since the + client will not have a transaction's signature. +- `label` - a URL-encoded UTF-8 string that describes the source of the transfer + request +- `message` - a URL-encoded UTF-8 string that describes the nature of the + transfer request +- `memo` - a URL-encoded UTF-8 string that must be included in the SPL memo + instruction in the payment transaction + +By way of example, here is a URL describing a transfer request for 1 SOL: + +```text +solana:mvines9iiHiQTysrwkJjGf2gb9Ex9jXJX8ns3qwf2kN?amount=1&label=Michael&message=Thanks%20for%20all%20the%20fish&memo=OrderId12345 +``` + +And here is a URL describing a transfer request for 0.1 USDC: + +```text +solana:mvines9iiHiQTysrwkJjGf2gb9Ex9jXJX8ns3qwf2kN?amount=0.01&spl-token=EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v +``` + +#### Transaction requests + +The Solana Pay transaction request is similar to a transfer request in that it +is simply a URL that can be consumed by a supporting wallet. However, this +request is interactive and the format is more open-ended: + +```text +solana: +``` + +The value of `link` should be a URL to which the consuming wallet can make an +HTTP request. Rather than containing all the information needed for a +transaction, a transaction request uses this URL to fetch the transaction that +should be presented to the user. + +When a wallet receives a transaction Request URL, four things happen: + +1. The wallet sends a GET request to the application at the provided `link` URL + to retrieve a label and icon image to display to the user. +2. The wallet then sends a POST request with the public key of the end user. +3. Using the public key of the end user (and any additional information provided + in `link`), the application then builds the transaction and responds with a + base64-encoded serialized transaction. +4. The wallet decodes and deserializes the transaction, then lets the user sign + and send the transaction. + +Given that transaction requests are more involved than transfer requests, the +remainder of this lesson will focus on creating transaction requests. + +### Create a transaction request + +#### Define the API endpoint + +The main thing you, the developer, need to do to make the transaction request +flow work is set up a REST API endpoint at the URL you plan to include in the +transaction request. In this lesson, we'll be using +[Next.js API Routes](https://nextjs.org/docs/api-routes/introduction) for our +endpoints, but you're welcome to use whatever stack and tools you're most +comfortable with. + +In Next.js, you do this by adding a file to the `pages/api` folder and exporting +a function that handles the request and response. + +```typescript +import { NextApiRequest, NextApiResponse } from "next"; + +export default async function handler( + request: NextApiRequest, + response: NextApiResponse, +) { + // Handle the request +} +``` + +#### Handle a GET request + +The wallet consuming your transaction request URL will first issue a GET request +to this endpoint. You'll want your endpoint to return a JSON object with two +fields: + +1. `label` - a string that describes the source of the transaction request +2. `icon`- a URL to an image that can be displayed to the user + +Building on the empty endpoint from before, that may look like this: + +```typescript +import { NextApiRequest, NextApiResponse } from "next"; + +export default async function handler( + request: NextApiRequest, + response: NextApiResponse, +) { + if (request.method === "GET") { + return get(response); + } + return response.status(405).json({ error: "Method not allowed" }); +} + +function get(response: NextApiResponse) { + response.status(200).json({ + label: "Store Name", + icon: "https://solana.com/src/img/branding/solanaLogoMark.svg", + }); +} +``` + +When the wallet makes a GET request to the API endpoint, the `get` function is +called, returning a response with a status code of 200 and the JSON object +containing `label` and `icon`. + +#### Handle a POST request and build the transaction + +After issuing a GET request, the wallet will issue a POST request to the same +URL. Your endpoint should expect the POST request's `body` to contain a JSON +object with an `account` field provided by the requesting wallet. The value of +`account` will be a string representing the end user's public key. + +With this information and any additional parameters provided, you can build the +transaction and return it to the wallet for signing by: + +1. Connecting to the Solana network and getting the latest `blockhash`. +2. Creating a new transaction using the `blockhash`. +3. Adding instructions to the transaction +4. Serializing the transaction and returning it in a `PostResponse` object along + with a message for the user. + +```typescript +import { NextApiRequest, NextApiResponse } from "next"; + +export default async function handler( + request: NextApiRequest, + response: NextApiResponse, +) { + if (request.method === "GET") { + return get(response); + } + if (request.method === "POST") { + return post(request, response); + } + return response.status(405).json({ error: "Method not allowed" }); +} + +function get(response: NextApiResponse) { + response.status(200).json({ + label: "Store Name", + icon: "https://solana.com/src/img/branding/solanaLogoMark.svg", + }); +} +async function post(request: NextApiRequest, response: NextApiResponse) { + const { account, reference } = request.body; + + const connection = new Connection(clusterApiUrl("devnet")); + + const { blockhash } = await connection.getLatestBlockhash(); + + const transaction = new Transaction({ + recentBlockhash: blockhash, + feePayer: account, + }); + + const instruction = SystemProgram.transfer({ + fromPubkey: new PublicKey(account), + toPubkey: Keypair.generate().publicKey, + lamports: 0.001 * LAMPORTS_PER_SOL, + }); + + instruction.keys.push({ + pubkey: reference, + isSigner: false, + isWritable: false, + }); + + transaction.add(instruction); + + const serializedTransaction = transaction.serialize({ + requireAllSignatures: false, + }); + const base64 = serializedTransaction.toString("base64"); + + const message = "Simple transfer of 0.001 SOL"; + + response.status(200).json({ + transaction: base64, + message, + }); +} +``` + +There is nothing too out of the ordinary here. It's the same transaction +construction you would use in a standard client-side application. The only +difference is that instead of signing and submitting to the network, you send +the transaction as a base64-encoded string back in the HTTP response. The wallet +that issued the request can then present the transaction to the user for +signing. + +#### Confirm transaction + +You may have noticed that the previous example assumed a `reference` was +provided as a query parameter. While this is _not_ a value provided by the +requesting wallet, it _is_ useful to set up your initial transaction request URL +to contain this query parameter. + +Since your application isn't the one submitting a transaction to the network, +your code won't have access to a transaction signature. This would typically be +how your app can locate a transaction on the network and see its status. + +To get around this, you can include a `reference` value as a query parameter for +each transaction request. This value should be a base58-encoded 32 byte array +that can be included as a non-signer key on the transaction. This allows your +app to then use the `getSignaturesForAddress` RPC method to locate the +transaction. Your app can then tailor its UI according to a transaction's +status. + +If you use the `@solana/pay` library, you can use the `findReference` helper +function instead of using `getSignaturesForAddress` directly. + +### Gated transactions + +We've mentioned before how Solana Pay is an example of being able to do cool new +things with the network by getting creative with existing functionality. Another +small example of doing this within the Solana Pay umbrella is to only make +certain transactions available once certain conditions are met. + +Since you control the endpoint building the transaction, you can determine what +criteria must be met before a transaction is built. For example, you can use the +`account` field provided in the POST request to check if the end user holds an +NFT from a particular collection or if that public key is on a predetermined +list of accounts who can make this particular transaction. + +```typescript +// retrieve array of nfts owned by the given wallet +const nfts = await metaplex.nfts().findAllByOwner({ owner: account }).run(); + +// iterate over the nfts array +for (let i = 0; i < nfts.length; i++) { + // check if the current nft has a collection field with the desired value + if (nfts[i].collection?.address.toString() == collection.toString()) { + // build transaction + } else { + // return an error + } +} +``` + +#### Partial Signing + +If you want certain transactions behind some kind of gating mechanism, that +functionality will have to be enforced onchain as well. Returning an error from +your Solana Pay endpoint makes it more difficult for end users to do the +transaction, but they could still build it manually. + +What this means is that the instruction(s) being called should require some type +of "admin" signature that only your application can provide. In doing that, +however, you'll have made it so that our previous examples don't work. The +transaction is built and sent to the requesting wallet for the end user's +signature, but the submitted transaction will fail without the admin signature. + +Fortunately, Solana enables signature composability with partial signing. + +Partially signing a multi-signature transaction allows signers to add their +signature before the transaction is broadcast on the network. This can be useful +in a number of situations, including: + +- Approving transactions that require the signature of multiple parties, such as + a merchant and a buyer who need to confirm the details of a payment. +- Invoking custom programs that require the signatures of both a user and an + administrator. This can help to limit access to the program instructions and + ensure that only authorized parties can execute them. + +```typescript +const { blockhash, lastValidBlockHeight } = await connection.getLatestBlockhash() + +const transaction = new Transaction({ + feePayer: account, + blockhash, + lastValidBlockHeight, +}) + +... + +transaction.partialSign(adminKeypair) +``` + +The `partialSign` function is used to add a signature to a transaction without +overriding any previous signatures on the transaction. If you are building a +transaction with multiple signers, it is important to remember that if you don't +specify a transaction's `feePayer`, the first signer will be used as the fee +payer for the transaction. To avoid any confusion or unexpected behavior, make +sure to explicitly set the fee payer when necessary. + +In our example of only allowing a transaction request to go through when the end +user has a specific NFT, you would simply add your admin signature to the +transaction using `partialSign` before encoding the transaction as a +base64-encoded string and issuing the HTTP response. + +### Solana Pay QR codes + +One of the standout features of Solana Pay is its easy integration with QR +codes. Since transfer and transaction requests are simply URLs, you can embed +them into QR codes that you make available in your application or elsewhere. + +The `@solana/pay` library simplifies this with the provided `createQR` helper +function. This function needs you to provide the following: + +- `url` - the url of the transaction request. +- `size` (optional) - the width and height of the QR code in pixels. Defaults + to 512. +- `background` (optional) - the background color. Defaults to white. +- `color` (optional) - the foreground color. Defaults to black. + +```typescript +const qr = createQR(url, 400, "transparent"); +``` + +## Lab + +Now that you've got a conceptual grasp on Solana Pay, let's put it into +practice. We'll use Solana Pay to generate a series of QR codes for a scavenger +hunt. Participants must visit each scavenger hunt location in order. At each +location, they'll use the provided QR code to submit the appropriate transaction +to the scavenger hunt's smart contract that keeps track of user progress. + +#### 1. Starter + +To get started, download the starter code on the `starter` branch of this +[repository](https://github.com/Unboxed-Software/solana-scavenger-hunt-app/tree/starter). +The starter code is a Next.js app that displays a Solana Pay QR code. Notice +that the menu bar lets you switch between different QR codes. The default option +is a simple SOL transfer for illustrative purposes. Throughout this lab, we'll +be adding functionality to the location options in the menu bar. + +![scavenger hunt app](/assets/courses/unboxed/scavenger-hunt-screenshot.png) + +To do this, we'll be creating a new endpoint for a transaction request that +builds a transaction for invoking an Anchor program on Devnet. This program has +been made specifically for this "scavenger hunt" app and has two instructions: +`initialize` and `check_in`. The `initialize` instruction is used to set up the +user's state, while the `check_in` instruction is used to record a check-in at a +location in the scavenger hunt. We won't be making any changes to the program in +this lab, but feel free to check out the +[source code](https://github.com/Unboxed-Software/anchor-scavenger-hunt) if +you'd like to familiarize yourself with the program. + +Before moving on, make sure you get familiar with the starter code for the +Scavenger Hunt app. Looking at `pages/index.tsx`, +`utils/createQrCode/simpleTransfer`, and `/utils/checkTransaction` will let you +see how the transaction request for sending SOL is set up. We'll be following a +similar pattern for the transaction request for checking in at a location. + +#### 2. Setup + +Before we move forward, let's make sure you can run the app locally. Start by +renaming the `.env.example` file in the frontend directory to `.env`. This file +contains a keypair that will be used in this lab to partially sign transactions. + +Next, install dependencies with `yarn`, then use `yarn dev` and open your +browser `localhost:3000` (or the port indicated in the console if 3000 was +already in use). + +Now, if you try to scan the QR code shown on the page from your mobile device, +you'll get an error. That's because the QR code is set up to send you to your +computer's `localhost:3000`, which isn't an address your phone can get to. +Further, Solana Pay needs to use an HTTPS URL to work. + +To get around this, you can use [ngrok](https://ngrok.com/). You'll need to +install it if you haven't used it before. Once it's installed, run the following +command in your terminal, replacing `3000` with whichever port you're using for +this project: + +```bash +ngrok http 3000 +``` + +This will provide you with a unique URL that you can use to access your local +server remotely. The output will look something like this: + +```bash +Session Status online +Account your_email@gmail.com (Plan: Free) +Update update available (version 3.1.0, Ctrl-U to update) +Version 3.0.6 +Region United States (us) +Latency 45ms +Web Interface http://127.0.0.1:4040 +Forwarding https://7761-24-28-107-82.ngrok.io -> http://localhost:3000 +``` + +Now, open the HTTPS ngrok URL shown in your console in the browser (e.g. +https://7761-24-28-107-82.ngrok.io). This will allow you to scan QR codes from +your mobile device while testing locally. + +At the time of writing, this lab works best with Solflare. Some wallets will +display an incorrect warning message when scanning a Solana Pay QR code. +Regardless of the wallet you use, make sure you switch to devnet in the wallet. +Then scan the QR code on the home page labeled “SOL Transfer”. This QR code is a +reference implementation for a transaction request that performs a simple SOL +transfer. It also calls the `requestAirdrop` function to fund your mobile wallet +with Devnet SOL since most people don't have Devnet SOL available for testing. + +If you were able to successfully execute the transaction using the QR code, +you're good to move on! + +#### 3. Create a check-in transaction request endpoint + +Now that you're up and running, it's time to create an endpoint that supports +transaction requests for location check-in using the Scavenger Hunt program. + +Start by opening the file at `pages/api/checkIn.ts`. Notice that it has a helper +function for initializing `eventOrganizer` from a secret key environment +variable. The first thing we'll do in this file is the following: + +1. Export a `handler` function to handle an arbitrary HTTP request +2. Add `get` and `post` functions for handling those HTTP methods +3. Add logic to the body of the `handler` function to either call `get`, `post`, + or return a 405 error based on the HTTP request method + +```typescript +import { NextApiRequest, NextApiResponse } from "next"; + +export default async function handler( + request: NextApiRequest, + response: NextApiResponse, +) { + if (request.method === "GET") { + return get(response); + } + if (request.method === "POST") { + return await post(request, response); + } + return response.status(405).json({ error: "Method not allowed" }); +} + +function get(response: NextApiResponse) {} + +async function post(request: NextApiRequest, response: NextApiResponse) {} +``` + +#### 4. Update `get` function + +Remember, the first request from a wallet will be a GET request expecting the +endpoint to return a label and icon. Update the `get` function to send a +response with a "Scavenger Hunt!" label and a Solana logo icon. + +```jsx +function get(response: NextApiResponse) { + response.status(200).json({ + label: "Scavenger Hunt!", + icon: "https://solana.com/src/img/branding/solanaLogoMark.svg", + }); +} +``` + +#### 5. Update `post` function + +After the GET request, a wallet will issue a POST request to the endpoint. The +request's `body` will contain a JSON object with an `account` field representing +the end user's public key. + +Additionally, the query parameters will contain whatever you encoded into the QR +code. If you take a look at `utils/createQrCode/checkIn.ts`, you'll notice that +this particular app includes parameters for `reference` and `id` as the +following: + +1. `reference` - a randomly generated public key used to identify the + transaction +2. `id` - the location id as an integer + +Go ahead and update the `post` function to extract `account`, `reference`, and +`id` from the request. You should respond with an error if any of these is +missing. + +Next, add a `try catch` statement where the `catch` block responds with an error +and the `try` block calls out to a new function `buildTransaction`. If +`buildTransaction` is successful, respond with a 200 and a JSON object with the +transaction and a message that the user has found the given location. Don't +worry about the logic for the `buildTransaction` function just yet - we'll do +that next. + +Note that you'll need to import `PublicKey` and `Transaction` from +`@solana/web3.js` here as well. + +```typescript +import { NextApiRequest, NextApiResponse } from "next" +import { PublicKey, Transaction } from "@solana/web3.js" +... + +async function post(request: NextApiRequest, response: NextApiResponse) { + const { account } = request.body; + const { reference, id } = request.query; + + if (!account || !reference || !id) { + response.status(400).json({ error: "Missing required parameter(s)" }); + return; + } + + try { + const transaction = await buildTransaction( + new PublicKey(account), + new PublicKey(reference), + id.toString(), + ); + + response.status(200).json({ + transaction: transaction, + message: `You've found location ${id}!`, + }); + } catch (error) { + console.log(error); + response.status(500).json({ transaction: "", message: error.message }); + return; + } +} + +async function buildTransaction( + account: PublicKey, + reference: PublicKey, + id: string +): Promise { + return new Transaction() +} +``` + +#### 6. Implement the `buildTransaction` function + +Next, let's implement the `buildTransaction` function. It should build, +partially sign, and return the check-in transaction. The sequence of items it +needs to perform is: + +1. Fetch the user state +2. Use the `locationAtIndex` helper function and the location id to get a + Location object +3. Verify that the user is at the correct location +4. Get the current blockhash and last valid block height from the connection +5. Create a new transaction object +6. Add an initialize instruction to the transaction if user state does not exist +7. Add a check-in instruction to the transaction +8. Add the `reference` public key to the check-in instruction +9. Partially sign the transaction with the event organizer's keypair +10. Serialize the transaction with base64 encoding and return the transaction + +While each of these steps is straightforward, it's a lot of steps. To simplify +the function, we're going to create empty helper functions that we'll fill in +later for steps 1, 3, 6, and 7-8. We'll call these `fetchUserState`, +`verifyCorrectLocation`, `createInitUserInstruction`, and +`createCheckInInstruction`, respectively. + +We'll also add the following imports: + +```typescript +import { NextApiRequest, NextApiResponse } from "next"; +import { + PublicKey, + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; +import { locationAtIndex, Location, locations } from "../../utils/locations"; +import { connection, gameId, program } from "../../utils/programSetup"; +``` + +Using the empty helper functions and the new imports, we can fill in the +`buildTransaction` function: + +```typescript +async function buildTransaction( + account: PublicKey, + reference: PublicKey, + id: string, +): Promise { + const userState = await fetchUserState(account); + + const currentLocation = locationAtIndex(new Number(id).valueOf()); + + if (!currentLocation) { + throw { message: "Invalid location id" }; + } + + if (!verifyCorrectLocation(userState, currentLocation)) { + throw { message: "You must visit each location in order!" }; + } + + const { blockhash, lastValidBlockHeight } = + await connection.getLatestBlockhash(); + + const transaction = new Transaction({ + feePayer: account, + blockhash, + lastValidBlockHeight, + }); + + if (!userState) { + transaction.add(await createInitUserInstruction(account)); + } + + transaction.add( + await createCheckInInstruction(account, reference, currentLocation), + ); + + transaction.partialSign(eventOrganizer); + + const serializedTransaction = transaction.serialize({ + requireAllSignatures: false, + }); + + const base64 = serializedTransaction.toString("base64"); + + return base64; +} + +interface UserState { + user: PublicKey; + gameId: PublicKey; + lastLocation: PublicKey; +} + +async function fetchUserState(account: PublicKey): Promise { + return null; +} + +function verifyCorrectLocation( + userState: UserState | null, + currentLocation: Location, +): boolean { + return false; +} + +async function createInitUserInstruction( + account: PublicKey, +): Promise { + throw ""; +} + +async function createCheckInInstruction( + account: PublicKey, + reference: PublicKey, + location: Location, +): Promise { + throw ""; +} +``` + +#### 7. Implement `fetchUserState` function + +With the `buildTransaction` function finished, we can start implementing the +empty helper functions we created, starting with `fetchUserState`. This function +uses the `gameId` and user's `account` to derive the user state PDA, then +fetches that account, returning null if it doesn't exist. + +```typescript +async function fetchUserState(account: PublicKey): Promise { + const userStatePDA = PublicKey.findProgramAddressSync( + [gameId.toBuffer(), account.toBuffer()], + program.programId, + )[0]; + + try { + return await program.account.userState.fetch(userStatePDA); + } catch { + return null; + } +} +``` + +#### 8. Implement `verifyCorrectLocation` function + +Next, let's implement the `verifyCorrectLocation` helper function. This function +is used to verify that a user is at the correct location in a scavenger hunt +game. + +If `userState` is `null`, that means the user should be visiting the first +location. Otherwise, the user should be visiting the location whose index is 1 +more than their last visited location. + +If these conditions are satisfied, the function will return true. Otherwise, +it'll return false. + +```typescript +function verifyCorrectLocation( + userState: UserState | null, + currentLocation: Location, +): boolean { + if (!userState) { + return currentLocation.index === 1; + } + + const lastLocation = locations.find( + location => location.key.toString() === userState.lastLocation.toString(), + ); + + if (!lastLocation || currentLocation.index !== lastLocation.index + 1) { + return false; + } + return true; +} +``` + +#### 9. Implement the instruction creation functions + +Lastly, let's implement `createInitUserInstruction` and +`createCheckInInstruction`. These can use Anchor to generate and return the +corresponding instructions. The only catch is that `createCheckInInstruction` +needs to add `reference` to the instructions list of keys. + +```typescript +async function createInitUserInstruction( + account: PublicKey, +): Promise { + const initializeInstruction = await program.methods + .initialize(gameId) + .accounts({ user: account }) + .instruction(); + + return initializeInstruction; +} + +async function createCheckInInstruction( + account: PublicKey, + reference: PublicKey, + location: Location, +): Promise { + const checkInInstruction = await program.methods + .checkIn(gameId, location.key) + .accounts({ + user: account, + eventOrganizer: eventOrganizer.publicKey, + }) + .instruction(); + + checkInInstruction.keys.push({ + pubkey: reference, + isSigner: false, + isWritable: false, + }); + + return checkInInstruction; +} +``` + +#### 10. Test the app + +At this point your app should be working! Go ahead and test it using your mobile +wallet. Start by scanning the QR code for `Location 1`. Remember to make sure +your frontend is running using the ngrok URL rather than `localhost`. + +After scanning the QR code, you should see a message indicating that you are at +location 1. From there, scan the QR code on the `Location 2` page. You may need +to wait a few seconds for the previous transaction to finalize before +continuing. + +Congratulations, you have successfully finished the scavenger hunt demo using +Solana Pay! Depending on your background, this may not feel intuitive or +straightforward. If that's the case, feel free to go through the lab again or +make something on your own. Solana Pay opens a lot of doors for bridging the gap +between real life and onchain interaction. + +If you want to take a look at the final solution code you can find it on the +solution branch of +[the same repository](https://github.com/Unboxed-Software/solana-scavenger-hunt-app/tree/solution). + +## Challenge + +It's time to try this out on your own. Feel free to build out an idea of your +own using Solana Pay. Or, if you need some inspiration, you can use the prompt +below. + +Build out an app using Solana Pay (or modify the one from the lab) to mint an +NFT to users. To take it up a notch, only make the transaction possible if the +user meets one or more conditions (e.g. holds an NFT from a specific collection, +is already on a pre-determined list, etc.). + +Get creative with this! The Solana pay spec opens up a lot of doors for unique +use cases. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=3c7e5796-c433-4575-93e1-1429f718aa10)! + + diff --git a/content/courses/state-compression/compressed-nfts.mdx b/content/courses/state-compression/compressed-nfts.mdx new file mode 100644 index 000000000..cc1d38f81 --- /dev/null +++ b/content/courses/state-compression/compressed-nfts.mdx @@ -0,0 +1,1146 @@ +--- +title: Compressed NFTs +objectives: + - Create a compressed NFT collection using Metaplex’s Bubblegum program + - Mint compressed NFTs using the Bubblegum program + - Transfer compressed NFTs using the Bubblegum program + - Read compressed NFT data using the Read API +description: + "How to mint, transfer and read large-scale NFT collections using Metaplex's + Bubblegum Program." +--- + +## Summary + +- **Compressed NFTs (cNFTs)** use **State Compression** to hash NFT data and + store the hash onchain in an account using a **concurrent Merkle tree** + structure. +- The cNFT data hash can’t be used to infer the cNFT data, but it can be used to + **verify** if the cNFT data you’re seeing is correct. +- Supporting RPC providers **index** cNFT data offchain when the cNFT is minted + so that you can use the **Read API** to access the data +- The **Metaplex Bubblegum program** is an abstraction on top of the **State + Compression** program that enables you to more simply create, mint, and manage + cNFT collections. + +## Lesson + +Compressed NFTs (cNFTs) are exactly what their name suggests: NFTs whose +structure takes up less account storage than traditional NFTs. Compressed NFTs +leverage a concept called **State Compression** to store data in a way that +drastically reduces costs. + +Solana's transaction costs are so cheap that most users never think about how +expensive minting NFTs can be at scale. The cost to set up and mint 1 million +traditional NFTs using the Token Metadata Program is approximately 24,000 SOL. +By comparison, cNFTs can be structured to where the same setup and mint costs 10 +SOL or less. That means anyone using NFTs at scale could cut costs by more than +1000x by using cNFTs over traditional NFTs. + +However, cNFTs can be tricky to work with. Eventually, the tooling required to +work with them will be sufficiently abstracted from the underlying technology +that the developer experience between traditional NFTs and cNFTs will be +negligible. But for now, you'll still need to understand the low level puzzle +pieces, so let's dig in! + +### A theoretical overview of cNFTs + +Most of the costs associated with traditional NFTs come down to account storage +space. Compressed NFTs use a concept called State Compression to store data in +the blockchain’s **ledger state**, only using the account state to store a +“fingerprint”, or **hash**, of the data. This hash allows you to +cryptographically verify that data has not been tampered with. + +To both store hashes and enable verification, we use a special binary tree +structure known as a **concurrent Merkle tree**. This tree structure lets us +hash data together in a deterministic way to compute a single, final hash that +gets stored onchain. This final hash is significantly smaller than all the +original data combined, hence the “compression.” The steps to this process are: + +1. Take any piece of data +2. Create a hash of this data +3. Store this hash as a “leaf” at the bottom of the tree +4. Each leaf pair is then hashed together, creating a “branch” +5. Each branch is then hashed together +6. Continually climb the tree and hash adjacent branches together +7. Once at the top of the tree, a final ”root hash” is produced +8. Store the root hash onchain as a verifiable proof of the data within each + leaf +9. Anyone wanting to verify that the data they have matches the “source of + truth” can go through the same process and compare the final hash without + having to store all the data onchain + +One problem not addressed in the above is how to make data available if it can't +be fetched from an account. Since this hashing process occurs onchain, all the +data exists in the ledger state and could theoretically be retrieved from the +original transaction by replaying the entire chain state from origin. However, +it's much more straightforward (though still complicated) to have an **indexer** +track and index this data as the transactions occur. This ensures there is an +offchain “cache” of the data that anyone can access and subsequently verify +against the onchain root hash. + +This process is _very complex_. We’ll cover some key concepts below but don’t +worry if you don’t understand it right away. We’ll talk more theory in the state +compression lesson and focus primarily on application to NFTs in this lesson. +You’ll be able to work with cNFTs by the end of this lesson even if you don’t +fully understand every piece of the state compression puzzle. + +#### Concurrent Merkle trees + +A **Merkle tree** is a binary tree structure represented by a single hash. Every +leaf node in the structure is a hash of its inner data while every branch is a +hash of its child leaf hashes. In turn, branches are also hashed together until +eventually one final root hash remains. + +Any modification to leaf data changes the root hash. This causes an issue when +multiple transactions in the same slot are attempting to modify leaf data. Since +these transactions must execute in series, all but the first will fail since the +root hash and proof passed in will have been invalidated by the first +transaction to be executed. + +A **concurrent Merkle tree** is a Merkle tree that stores a secure changelog of +the most recent changes along with their root hash and the proof to derive it. +When multiple transactions in the same slot try to modify leaf data, the +changelog can be used as a source of truth to allow for concurrent changes to be +made to the tree. + +When working with a concurrent Merkle tree, there are three variables that +determine the size of the tree, the cost to create the tree, and the number of +concurrent changes that can be made to the tree: + +1. Max depth +2. Max buffer size +3. Canopy depth + +The **max depth** is the maximum number of hops to get from any leaf to the root +of the tree. Since Merkle trees are binary trees, every leaf is connected only +to one other leaf. Max depth can then logically be used to calculate the number +of nodes for the tree with `2 ^ maxDepth`. + +The **max buffer size** is effectively the maximum number of concurrent changes +that you can make to a tree within a single slot with the root hash still being +valid. + +The **canopy depth** is the number of proof nodes that are stored onchain for +any given proof path. Verifying any leaf requires the complete proof path for +the tree. The complete proof path is made up of one proof node for every “layer” +of the tree, i.e. a max depth of 14 means there are 14 proof nodes. Every proof +node adds 32 bytes to a transaction, so large trees would quickly exceed the +maximum transaction size limit without caching proof nodes onchain. + +Each of these three values, max depth, max buffer size, and canopy depth, comes +with a tradeoff. Increasing any of these values increases the size of the +account used to store the tree, thus increasing the cost to create the tree. + +Choosing the max depth is fairly straightforward as it directly relates to the +number of leafs and therefore the amount of data you can store. If you need +1million cNFTs on a single tree, find the max depth that makes the following +expression true: `2^maxDepth > 1million`. The answer is 20. + +Choosing a max buffer size is effectively a question of throughput: how many +concurrent writes do you need. + +#### SPL State Compression and Noop Programs + +The SPL State Compression Program exists to make the above process repeatable +and composable throughout the Solana ecosystem. It provides instructions for +initializing Merkle trees, managing tree leafs (i.e. add, update, remove data), +and verifying leaf data. + +The State Compression Program also leverages a separate “no op” program whose +primary purpose is to make leaf data easier to index by logging it to the ledger +state. + +#### Use the Ledger State for storage + +The Solana ledger is a list of entries containing signed transactions. In +theory, this can be traced back to the genesis block. This effectively means any +data that has ever been put into a transaction exists in the ledger. + +When you want to store compressed data, you pass it to the State Compression +program where it gets hashed and emitted as an “event” to the Noop program. The +hash is then stored in the corresponding concurrent Merkle tree. Since the data +passed through a transaction and even exists on the Noop program logs, it will +forever exist on the ledger state. + +#### Index data for easy lookup + +Under normal conditions, you would typically access onchain data by fetching the +appropriate account. When using state compression, however, it's not so +straightforward. + +As mentioned above, the data now exists in the ledger state rather than in an +account. The easiest place to find the full data is in the logs of the Noop +instruction, but while this data will in a sense exist in the ledger state +forever, it will likely be inaccessible through validators after a certain +period of time. + +To save space and be more performant, validators don't retain every transaction +back to the genesis block. The specific amount of time you'll be able to access +the Noop instruction logs related to your data will vary based on the validator, +but eventually you'll lose access to it if you're relying directly on +instruction logs. + +Technically, you _can_ replay transaction state back to the genesis block, but +the average team isn’t going to do that, and it certainly won’t be performant. + +Instead, you should use an indexer that will observe the events sent to the Noop +program and store the relevant data off chain. That way you don't need to worry +about old data becoming inaccessible. + +### Create a cNFT Collection + +With the theoretical background out of the way, let's turn our attention to the +main point of this lesson: how to create a cNFT collection. + +Fortunately, you can use tools created by Solana Foundation, the Solana +developer community, and Metaplex to simplify the process. Specifically, we'll +be using the `@solana/spl-account-compression` SDK, the Metaplex Bubblegum +program `@metaplex-foundation/mpl-bubblegum` through the Umi library from +Metaplex. + +#### Prepare metadata + +Prior to starting, you'll prepare your NFT metadata similarly to how you would +if you were using a Candy Machine. At its core, an NFT is simply a token with +metadata that follows the NFT standard. In other words, it should be shaped +something like this: + +```json +{ + "name": "My Collection", + "symbol": "MC", + "description": "My Collection description", + "image": "https://lvvg33dqzykc2mbfa4ifua75t73tchjnfjbcspp3n3baabugh6qq.arweave.net/XWpt7HDOFC0wJQcQWgP9n_cxHS0qQik9-27CAAaGP6E", + "attributes": [ + { + "trait_type": "Background", + "value": "transparent" + }, + { + "trait_type": "Shape", + "value": "sphere" + }, + { + "trait_type": "Resolution", + "value": "1920x1920" + } + ] +} +``` + +Depending on your use case, you may be able to generate this dynamically, or you +might want to have a JSON file prepared for each cNFT beforehand. You’ll also +need any other assets referenced by the JSON, such as the `image` URL shown in +the example above. + +#### Create Collection NFT + +NFTs are intrinsically unique, compared to fungible tokens which have a supply. +However, it is important to bind NFTs produced by the same series together, +using a Collection. Collections allow people to discover other NFTs in the same +collection, and verify that individual NFTs are actually members of the +Collection (and not look-alikes produced by someone else). + +To have your cNFTs to be part of a collection, you’ll need to create a +Collection NFT **before** you start minting cNFTs. This is a traditional Token +Metadata Program NFT that acts as the reference binding your cNFTs together into +a single collection. The procedure to create this NFT is outlined in our +[NFTs with Metaplex lesson](/developers/courses/tokens-and-nfts/nfts-with-metaplex#add-the-nft-to-a-collection) + +```typescript +const collectionMint = generateSigner(umi); + +await createNft(umi, { + mint: collectionMint, + name: `My Collection`, + uri, + sellerFeeBasisPoints: percentAmount(0), + isCollection: true, // mint as collection NFT +}).sendAndConfirm(umi); +``` + +#### Create Merkle tree Account + +Now we start to deviate from the process you would use when creating traditional +NFTs. The onchain storage mechanism you use for state compression is an account +representing a concurrent Merkle tree. This Merkle tree account belongs to the +SPL State Compression program. Before you can do anything related to cNFTs, you +need to create an empty Merkle tree account with the appropriate size. + +The variables impacting the size of the account are: + +1. Max depth +2. Max buffer size +3. Canopy depth + +The first two variables must be chosen from an existing set of valid pairs. The +table below shows the valid pairs along with the number of cNFTs that can be +created with those values. + +| Max Depth | Max Buffer Size | Max Number of cNFTs | +| --------- | --------------- | ------------------- | +| 3 | 8 | 8 | +| 5 | 8 | 32 | +| 14 | 64 | 16,384 | +| 14 | 256 | 16,384 | +| 14 | 1,024 | 16,384 | +| 14 | 2,048 | 16,384 | +| 15 | 64 | 32,768 | +| 16 | 64 | 65,536 | +| 17 | 64 | 131,072 | +| 18 | 64 | 262,144 | +| 19 | 64 | 524,288 | +| 20 | 64 | 1,048,576 | +| 20 | 256 | 1,048,576 | +| 20 | 1,024 | 1,048,576 | +| 20 | 2,048 | 1,048,576 | +| 24 | 64 | 16,777,216 | +| 24 | 256 | 16,777,216 | +| 24 | 512 | 16,777,216 | +| 24 | 1,024 | 16,777,216 | +| 24 | 2,048 | 16,777,216 | +| 26 | 512 | 67,108,864 | +| 26 | 1,024 | 67,108,864 | +| 26 | 2,048 | 67,108,864 | +| 30 | 512 | 1,073,741,824 | +| 30 | 1,024 | 1,073,741,824 | +| 30 | 2,048 | 1,073,741,824 | + +Note that the number of cNFTs that can be stored on the tree depends entirely on +the max depth, while the buffer size will determine the number of concurrent +changes (mints, transfers, etc.) within the same slot that can occur to the +tree. In other words, choose the max depth that corresponds to the number of +NFTs you need the tree to hold, then choose one of the options for max buffer +size based on the traffic you expect you'll need to support. + +Next, choose the canopy depth. Increasing the canopy depth increases the +composability of your cNFTs. Any time your or another developer's code attempts +to verify a cNFT down the road, the code will have to pass in as many proof +nodes as there are “layers” in your tree. So for a max depth of 20, you'll need +to pass in 20 proof nodes. Not only is this tedious, but since each proof node +is 32 bytes it's possible to max out transaction sizes very quickly. + +For example, if your tree has a very low canopy depth, an NFT marketplace may +only be able to support simple NFTs transfers rather than support an onchain +bidding system for your cNFTs. The canopy effectively caches proof nodes +onchain, so you don’t have to pass all of them into the transaction, allowing +for more complex transactions. + +Increasing any of these three values increases the size of the account, thereby +increasing the cost associated with creating it. Weigh the benefits accordingly +when choosing the values. + +Once you know these values, you can use the `createTree` method from the +`@metaplex-foundation/mpl-bubblegum` package to create your tree. This +instruction creates and initializes two accounts: + +1. A `Merkle Tree` account - this holds the merkle hash and is used to verify + the authenticity of data stored. + +2. A `Tree Config` account - this holds additional data specific to compressed + NFTs such as the tree creator, whether the tree is public, and + [other fields - see the Bubblehum program source](https://github.com/metaplex-foundation/mpl-bubblegum/blob/42ffed35da6b2a673efacd63030a360eac3ae64e/programs/bubblegum/program/src/state/mod.rs#L17). + +#### Setting up Umi + +The `mpl-bubblegum` package is a plugin and cannot be used without the Umi +library from Metaplex. Umi is a framework for making JS/TS clients for onchain +programs that was created by Metaplex. + +Note that Umi has different implementations for many concepts than web3.js, +including Keypairs, PublicKeys, and Connections. However, it is easy to convert +from web3.js versions of these items to the Umi equivalents. + +To get started, we need to create an Umi instance + +```typescript +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); +``` + +The above code initializes an empty Umi instance without any signer or plugin +attached to it. You can find the exhaustive list of the plugins available +[on this Metaplex docs page](https://developers.metaplex.com/umi/metaplex-umi-plugins) + +The next part is to add in our imports and attach a signer to our Umi instance. + +```typescript +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { createTree, mplBubblegum } from "@metaplex-foundation/mpl-bubblegum"; +import { keypairIdentity } from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { getKeypairFromFile } from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); + +console.log("Loaded UMI with Bubblegum"); +``` + +#### Use Bubblegum to Initialize Your Tree + +With Umi instantiated, we are ready to call the `createTree` method to +instantiate the Merkle tree and tree config accounts. + +```typescript +const merkleTree = generateSigner(umi); +const builder = await createTree(umi, { + merkleTree, + maxDepth: 14, + maxBufferSize: 64, +}); +await builder.sendAndConfirm(umi); +``` + +The three values supplied i.e. the `merkleTree`, `maxDepth` and `maxBufferSize` +are required in order to create the tree while the rest are optional. For +example, the`tree creator` defaults to the Umi instance identity, while the +`public field to false. + +When set to true, `public` allows anyone to mint from the initialized tree and +if false, only the tree creator will be able to mint from the tree. + +Feel free to look at the code for the +[create_tree instruction handler](https://github.com/metaplex-foundation/mpl-bubblegum/blob/42ffed35da6b2a673efacd63030a360eac3ae64e/programs/bubblegum/program/src/processor/create_tree.rs#L40) +and +[create_tree's expected accounts](https://github.com/metaplex-foundation/mpl-bubblegum/blob/42ffed35da6b2a673efacd63030a360eac3ae64e/programs/bubblegum/program/src/processor/create_tree.rs#L20). + +#### Mint cNFTs + +With the Merkle tree account and its corresponding Bubblegum tree config account +initialized, it’s possible to mint cNFTs to the tree. The Bubblegum library, +provides two instructions we can make use of depending on whether the minted +asset will belong to a collection. + +The two instructions are + +1. **MintV1** + +```typescript +await mintV1(umi, { + leafOwner, + merkleTree, + metadata: { + name: "My Compressed NFT", + uri: "https://example.com/my-cnft.json", + sellerFeeBasisPoints: 0, // 0% + collection: none(), + creators: [ + { address: umi.identity.publicKey, verified: false, share: 100 }, + ], + }, +}).sendAndConfirm(umi); +``` + +2. **mintToCollectionV1** + +```typescript +await mintToCollectionV1(umi, { + leafOwner, + merkleTree, + collectionMint, + metadata: { + name: "My Compressed NFT", + uri: "https://example.com/my-cnft.json", + sellerFeeBasisPoints: 0, // 0% + collection: { key: collectionMint, verified: false }, + creators: [ + { address: umi.identity.publicKey, verified: false, share: 100 }, + ], + }, +}).sendAndConfirm(umi); +``` + +Both functions will require you to pass in the NFT metadata and a list of +accounts required to mint the cNFT such as the `leafOwner`, `merkleTree` account +etc. + +### Interact with cNFTs + +It's important to note that cNFTs _are not_ SPL tokens. That means your code +needs to follow different conventions to handle cNFT functionality like +fetching, querying, transferring, etc. + +#### Fetch cNFT data + +The simplest way to fetch data from an existing cNFT is to use the +[Digital Asset Standard Read API](https://developers.metaplex.com/das-api) (Read +API). Note that this is separate from the standard JSON RPC. To use the Read +API, you’ll need to use a supporting RPC Provider. Metaplex maintains a (likely +non-exhaustive) +[list of RPC providers that support the DAS Read API](https://developers.metaplex.com/rpc-providers#rpcs-with-das-support). + +In this lesson we’ll be using +[Helius](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api) +as they have free support for Devnet. + +You might need to update your RPC connection endpoint in the Umi instantiation + +```typescript +const umi = createUmi( + "https://devnet.helius-rpc.com/?api-key=YOUR-HELIUS-API-KEY", +); +``` + +To use the Read API to fetch a specific cNFT, you need to have the cNFT’s asset +ID. However, after minting cNFTs, you’ll have at most two pieces of information: + +1. The transaction signature +2. The leaf index (possibly) + +The only real guarantee is that you'll have the transaction signature. It is +**possible** to locate the leaf index from there, but it involves some fairly +complex parsing. The short story is you must retrieve the relevant instruction +logs from the `Noop program` and parse them to find the leaf index. We’ll cover +this more in depth in a future lesson. For now, we’ll assume you know the leaf +index. + +This is a reasonable assumption for most mints given that the minting will be +controlled by your code and can be set up sequentially so that your code can +track which index is going to be used for each mint. I.e. the first mint will +use index 0, the second index 1, etc. + +Once you have the leaf index, you can derive the cNFT's corresponding asset ID. +When using Bubblegum, the asset ID is a PDA derived using the Bubblegum program +ID and the following seeds: + +1. The static string `asset` represented in utf8 encoding +2. The Merkle tree address +3. The leaf index + +The indexer essentially observes transaction logs from the `Noop program` as +they happen and stores the cNFT metadata that was hashed and stored in the +Merkle tree. This enables them to surface that data when requested. This asset +ID is what the indexer uses to identify the particular asset. + +For simplicity, you can just use the `findLeafAssetIdPda` helper function from +the Bubblegum library. + +```typescript +const [assetId, bump] = await findLeafAssetIdPda(umi, { + merkleTree, + leafIndex, +}); +``` + +With the asset ID, fetching the cNFT is fairly straightforward. Simply use the +`getAsset` method provided by the supporting RPC provider and the `dasApi` +library: + +```typescript +const [assetId, bump] = await findLeafAssetIdPda(umi, { + merkleTree, + leafIndex, +}); + +const rpcAsset = await umi.rpc.getAsset(assetId); +``` + +This will return a JSON object that is comprehensive of what a traditional NFT's +on- and offchain metadata would look like combined. For example, you can find +the cNFT attributes at `content.metadata.attributes` or the image at +`content.files.uri`. + +#### Query cNFTs + +The Read API also includes ways to get multiple assets, query by owner, creator, +and more. For example, Helius supports the following methods: + +- `getAsset` +- `getSignaturesForAsset` +- `searchAssets` +- `getAssetProof` +- `getAssetsByOwner` +- `getAssetsByAuthority` +- `getAssetsByCreator` +- `getAssetsByGroup` + +We won't go over most of these directly, but be sure to look through the +[Helius docs](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api) +to learn how to use them correctly. + +#### Transfer cNFTs + +Just as with a standard SPL token transfer, security is paramount. An SPL token +transfer, however, makes verifying transfer authority very easy. It's built into +the SPL Token program and standard signing. A compressed token's ownership is +more difficult to verify. The actual verification will happen program-side, but +your client-side code needs to provide additional information to make it +possible. + +While there is a Bubblegum `createTransferInstruction` helper function, there is +more assembly required than usual. Specifically, the Bubblegum program needs to +verify that the entirety of the cNFT's data is what the client asserts before a +transfer can occur. The entirety of the cNFT data has been hashed and stored as +a single leaf on the Merkle tree, and the Merkle tree is simply a hash of all +the tree's leafs and branches. Because of this, you can't simply tell the +program what account to look at and have it compare that account's `authority` +or `owner` field to the transaction signer. + +Instead, you need to provide the entirety of the cNFT data and any of the Merkle +tree's proof information that isn't stored in the canopy. That way, the program +can independently prove that the provided cNFT data, and therefore the cNFT +owner, is accurate. Only then can the program safely determine if the +transaction signer should, in fact, be allowed to transfer the cNFT. + +In broad terms, this involves a five step process: + +1. Fetch the cNFT's asset data from the indexer +2. Fetch the cNFT's proof from the indexer +3. Fetch the Merkle tree account from the Solana blockchain +4. Prepare the asset proof as a list of `AccountMeta` objects +5. Build and send the Bubblegum transfer instruction + +Fortunately, we can make use of the `transfer` method which takes care of all +these steps. + +```typescript +const assetWithProof = await getAssetWithProof(umi, assetId); + +await transfer(umi, { + ...assetWithProof, + leafOwner: currentLeafOwner, + newLeafOwner: newLeafOwner.publicKey, +}).sendAndConfirm(umi); +``` + +### Conclusion + +We've covered the primary skills needed to interact with cNFTs, but haven't been +fully comprehensive. You can also use Bubblegum to do things like burn, verify, +delegate, and more. We won't go through these, but these instructions are +similar to the mint and transfer process. If you need this additional +functionality, take a look at the +[Bubblegum docs](https://developers.metaplex.com/bubblegum) on how to leverage +the helper functions it provides. + +## Lab + +Let's jump in and practice creating and working with cNFTs. Together, we'll +build as simple a script as possible that will let us mint a cNFT collection +from a Merkle tree. + +#### 1. Create a new project + +To begin create and initialize an empty NPM project and change directory into +it. + +```bash +mkdir cnft-demo +npm init -y +cd cnft-demo +``` + +Install all the required dependencies + +```bash +npm i @solana/web3.js@1 @solana-developers/helpers@2.5.2 @metaplex-foundation/mpl-token-metadata @metaplex-foundation/mpl-bubblegum @metaplex-foundation/digital-asset-standard-api @metaplex-foundation/umi-bundle-defaults + +npm i --save-dev esrun +``` + +In this first script, we will learn about creating a tree, hence let's create +the file `create-tree.ts` + +```bash +mkdir src && touch src/create-tree.ts +``` + +This Umi instantiation code will be repeated in a lot of files, so feel free to +create a wrapper file to instantiate it: + +```typescript title="create-tree.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { createTree, mplBubblegum } from "@metaplex-foundation/mpl-bubblegum"; +import { generateSigner, keypairIdentity } from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); +``` + +In the code above, we load the user's keypair wallet from the system wallet +located at `.config/solana/id.json`, instantiate a new Umi instance and assign +the keypair to it. We also assign the Bubblegum and dasApi plugins to it as +well. + +#### 2. Create the Merkle tree account + +We’ll start by creating the Merkle tree account. To do this we will use the +`createTree` method from Metaplex Bubblegum program. + +This function takes in three default values + +- `merkleTree` - The Merkle tree account address +- `maxDepth` - Determines the max number of leaves the tree will hold and + therefore the max number of cNFTs that the tree can contain. +- `maxBufferSize` - Determines how many concurrent changes can occur in the tree + in parallel. + +You can also supply in optional fields such as + +- `treeCreator` - The address of the tree authority, defaults to current + `umi.identity` instance. +- `public` - Determines whether anyone else apart from the tree creator will be + able to mint cNFTs from the tree. + +```typescript title="create-tree.ts" +const merkleTree = generateSigner(umi); +const builder = await createTree(umi, { + merkleTree, + maxDepth: 14, + maxBufferSize: 64, +}); +await builder.sendAndConfirm(umi); + +let explorerLink = getExplorerLink("address", merkleTree.publicKey, "devnet"); +console.log(`Explorer link: ${explorerLink}`); +console.log("Merkle tree address is :", merkleTree.publicKey); +console.log("✅ Finished successfully!"); +``` + +Run the `create-tree.ts` script using esrun + +```bash +npx esrun create-tree.ts +``` + +Make sure to remember the Merkle tree address as we will be using it in the next +step when minting compressed NFTs. + +Your output will be similar to this + +```bash +Explorer link: https://explorer.solana.com/address/ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h?cluster=devnet +Merkle tree address is : ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h +✅ Finished successfully! +``` + +Congratulations! You've created a Bubblegum tree. Follow the Explorer link to +make sure that the process finished successfully, + +![Solana Explorer with details about created Merkle tree](/assets/courses/unboxed/solana-explorer-create-tree.png) + +#### 3. Mint cNFTs to your tree + +Believe it or not, that's all you needed to do to set up your tree to compressed +NFTs! Now let's turn our attention to minting. + +First, let's create a new file called `mint-compressed-nft-to-collection.ts`, +add our imports and instantiate Umi + +```typescript title="mint-compressed-nft-to-collection.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { + findLeafAssetIdPda, + LeafSchema, + mintToCollectionV1, + mplBubblegum, + parseLeafFromMintToCollectionV1Transaction, +} from "@metaplex-foundation/mpl-bubblegum"; +import { + keypairIdentity, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { getKeypairFromFile } from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); +``` + +I am going to be +[recycling a Collection NFT](https://explorer.solana.com/address/D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1?cluster=devnet) +I already created in the NFTs with Metaplex lesson, but if you'd like to create +a new collection for this lesson, check out the code +[on this repo](https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/create-collection.ts) + + + +Find the code to create a Metaplex Collection NFT in our [NFTs with Metaplex lesson](/developers/courses/tokens-and-nfts/nfts-with-metaplex#add-the-nft-to-a-collection). + + + +To mint a compressed NFT to a collection we will need + +- `leafOwner` - The recipient of the compressed NFT + +- `merkleTree` - The Merkle tree address we created in the previous step + +- `collection` - The collection our cNFT will belong to. This is not required, + and you can leave it out if your cNFT doesn't belong to a collection. + +- `metadata` - Your offchain metadata. This lesson won't focus onto how to + prepare your metadata, but you can check out the + [recommended structure from Metaplex](https://developers.metaplex.com/token-metadata/token-standard#the-non-fungible-standard). + +Our cNFT will use this structure we already prepared earlier. + +```json title="nft.json" +{ + "name": "My NFT", + "symbol": "MN", + "description": "My NFT Description", + "image": "https://lycozm33rkk5ozjqldiuzc6drazmdp5d5g3g7foh3gz6rz5zp7va.arweave.net/XgTss3uKlddlMFjRTIvDiDLBv6Pptm-Vx9mz6Oe5f-o", + "attributes": [ + { + "trait_type": "Background", + "value": "transparent" + }, + { + "trait_type": "Shape", + "value": "sphere" + } + ] +} +``` + +Putting it all into code, we will have + +```typescript title="mint-compressed-nft-to-collection.ts" +const merkleTree = UMIPublicKey("ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h"); + +const collectionMint = UMIPublicKey( + "D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1", +); + +const uintSig = await( + await mintToCollectionV1(umi, { + leafOwner: umi.identity.publicKey, + merkleTree, + collectionMint, + metadata: { + name: "My NFT", + uri: "https://chocolate-wet-narwhal-846.mypinata.cloud/ipfs/QmeBRVEmASS3pyK9YZDkRUtAham74JBUZQE3WD4u4Hibv9", + sellerFeeBasisPoints: 0, // 0% + collection: { key: collectionMint, verified: false }, + creators: [ + { + address: umi.identity.publicKey, + verified: false, + share: 100, + }, + ], + }, + }).sendAndConfirm(umi), +).signature; + +const b64Sig = base58.deserialize(uintSig); +console.log(b64Sig); +``` + +The difference between the first statement is that we are returning the byte +array representing the transaction signature. + +We need this has in order to be able to get the leaf schema and with this schema +derive the asset ID. + +```typescript title="mint-compressed-nft-to-collection.ts" +const leaf: LeafSchema = await parseLeafFromMintToCollectionV1Transaction( + umi, + uintSig, +); +const assetId = findLeafAssetIdPda(umi, { + merkleTree, + leafIndex: leaf.nonce, +})[0]; +``` + +With everything in place, we can now run our script +`mint-compressed-nft-to-collection.ts` + +```bash +npx esrun mint-compressed-nft-to-collection.ts +``` + +Your output should resemble + +```bash +asset id: D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS +✅ Finished successfully! +``` + +We aren't returning the Explorer link because this address won't exists on the +Solana state but is indexed by RPCs that support the DAS API. + +In the next step we will query this address to fetch out cNFT details. + +#### 4. Read existing cNFT data + +Now that we’ve written code to mint cNFTs, let’s see if we can actually fetch +their data. + +Create a new file `fetch-cnft-details.ts` + +```bash +fetch-cnft-details.ts +``` + +Import our packages and instantiate Umi. Here we will finally make use of the +`umi.use(dasApi())` we've been importing. + +In the instantiation of Umi, we are going to make a change to our connection +endpoint and use an RPC that supports the DAS API. + +Be sure to update this with your Helius API keys which you can get from the +[developer dashboard page](https://dashboard.helius.dev/signup?redirectTo=onboarding) + +```typescript title="fetch-cnft-details.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { mplBubblegum } from "@metaplex-foundation/mpl-bubblegum"; +import { + keypairIdentity, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { getKeypairFromFile } from "@solana-developers/helpers"; + +const umi = createUmi( + "https://devnet.helius-rpc.com/?api-key=YOUR-HELIUS-API-KEY", +); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); +``` + +Fetching a compressed NFT details is as simple as calling the `getAsset` method +with the `assetId` from the previous step. + +```typescript title="fetch-cnft-details.ts" +const assetId = UMIPublicKey("D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS"); + +// @ts-ignore +const rpcAsset = await umi.rpc.getAsset(assetId); +console.log(rpcAsset); +``` + +Let’s start by declaring a function `logNftDetails` that takes as parameters +`treeAddress` and `nftsMinted`. + +The output of our console.log would output + +```json +{ + interface: 'V1_NFT', + id: 'D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS', + content: { + '$schema': 'https://schema.metaplex.com/nft1.0.json', + json_uri: 'https://chocolate-wet-narwhal-846.mypinata.cloud/ipfs/QmeBRVEmASS3pyK9YZDkRUtAham74JBUZQE3WD4u4Hibv9', + files: [ [Object] ], + metadata: { + attributes: [Array], + description: 'My NFT Description', + name: 'My NFT', + symbol: '', + token_standard: 'NonFungible' + }, + links: { + image: 'https://lycozm33rkk5ozjqldiuzc6drazmdp5d5g3g7foh3gz6rz5zp7va.arweave.net/XgTss3uKlddlMFjRTIvDiDLBv6Pptm-Vx9mz6Oe5f-o' + } + }, + authorities: [ + { + address: '4sk8Ds1T4bYnN4j23sMbVyHYABBXQ53NoyzVrXGd3ja4', + scopes: [Array] + } + ], + compression: { + eligible: false, + compressed: true, + data_hash: '2UgKwnTkguefRg3P5J33UPkNebunNMFLZTuqvnBErqhr', + creator_hash: '4zKvSQgcRhJFqjQTeCjxuGjWydmWTBVfCB5eK4YkRTfm', + asset_hash: '2DwKkMFYJHDSgTECiycuBApMt65f3N1ZwEbRugRZymwJ', + tree: 'ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h', + seq: 4, + leaf_id: 3 + }, + grouping: [ + { + group_key: 'collection', + group_value: 'D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1' + } + ], + royalty: { + royalty_model: 'creators', + target: null, + percent: 0, + basis_points: 0, + primary_sale_happened: false, + locked: false + }, + creators: [ + { + address: '4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF', + share: 100, + verified: false + } + ], + ownership: { + frozen: false, + delegated: false, + delegate: null, + ownership_model: 'single', + owner: '4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF' + }, + supply: { print_max_supply: 0, print_current_supply: 0, edition_nonce: null }, + mutable: true, + burnt: false +} +``` + +Remember, the Read API also includes ways to get multiple assets, query by +owner, creator, etc., and more. Be sure to look through the +[Helius docs](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api) +to see what's available. + +#### 5. Transfer a cNFT + +The last thing we're going to add to our script is a cNFT transfer. Just as with +a standard SPL token transfer, security is paramount. Unlike with a standard SPL +token transfer, however, to build a secure transfer with state compression of +any kind, the program performing the transfer needs the entire asset data. + +Fortunately for us can get the asset data with the `getAssetWithProof` method. + +Le't first create a new file `transfer-asset.ts`, and populate it with the code +for instantiating a new Umi client. + +```typescript title="transfer-asset.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { + getAssetWithProof, + mplBubblegum, + transfer, +} from "@metaplex-foundation/mpl-bubblegum"; +import { + keypairIdentity, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { base58 } from "@metaplex-foundation/umi/serializers"; +import { + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); +``` + +We are not ready to transfer our asset. Using the `assetId` for our cNFT, we can +call the `transfer` method from the Bubblegum library + +```typescript title="transfer-asset.ts" +const assetId = UMIPublicKey("D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS"); + +//@ts-ignore +const assetWithProof = await getAssetWithProof(umi, assetId); + +let uintSig = await( + await transfer(umi, { + ...assetWithProof, + leafOwner: umi.identity.publicKey, + newLeafOwner: UMIPublicKey("J63YroB8AwjDVjKuxjcYFKypVM3aBeQrfrVmNBxfmThB"), + }).sendAndConfirm(umi), +).signature; + +const b64sig = base58.deserialize(uintSig); + +let explorerLink = getExplorerLink("transaction", b64sig, "devnet"); +console.log(`Explorer link: ${explorerLink}`); +console.log("✅ Finished successfully!"); +``` + +Running our script with `npx esrun transfer-asset.ts`, should output something +similar to this if successful: + +```bash +Explorer link: https://explorer.solana.com/tx/3sNgN7Gnh5FqcJ7ZuUEXFDw5WeojpwkDjdfvTNWy68YCEJUF8frpnUJdHhHFXAtoopsytzkKewh39Rf7phFQ2hCF?cluster=devnet +✅ Finished successfully! +``` + +Open the explorer link, and scroll to the bottom to observer your tx logs, + +![Solana Explorer showing logs of the transfer cnft instruction](/assets/courses/unboxed/solana-explorer-showing-cnft-transfer-logs.png) + +Congratulations! Now you know how to mint, read, and transfer cNFTs. If you +wanted, you could update the max depth, max buffer size, and canopy depth to +larger values and as long as you have enough Devnet SOL, this script will let +you mint up to 10k cNFTs for a small fraction of what it would cost to mint 10k +traditional NFTs. + +Inspect the cNFT on Solana Explorer! Just like previously, if you have any +issues, you should fix them yourself, but if needed the +[solution code](https://github.com/solana-foundation/compressed-nfts) is +available. + +### Challenge + +It's your turn to take these concepts for a spin on your own! We're not going to +be overly prescriptive at this point, but here are some ideas: + +1. Create your own production cNFT collection +2. Build a UI for this lesson's lab that will let you mint a cNFT and display it +3. See if you can replicate some of the lab script's functionality in an onchain + program, i.e. write a program that can mint cNFTs + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=db156789-2400-4972-904f-40375582384a)! + + diff --git a/content/courses/state-compression/generalized-state-compression.mdx b/content/courses/state-compression/generalized-state-compression.mdx new file mode 100644 index 000000000..00370782b --- /dev/null +++ b/content/courses/state-compression/generalized-state-compression.mdx @@ -0,0 +1,1653 @@ +--- +title: Generalized State Compression objectives + +objectives: + - Explain the flow of Solana’s state compression logic. + - Explain the difference between a Merkle tree and a concurrent Merkle tree + - Implement generic state compression in a basic Solana program + +description: + Understand how state compression - the technology behind compressed NFTs works + - and learn how to apply it in your Solana programs. +--- + +## Summary + +- State compression on Solana is primarily used for compressed NFTs (cNFTs), but + it can be applied to any data type +- State Compression lowers the amount of data you have to store onchain using + Merkle trees. +- A Merkle tree compresses data by hashing pairs of data repeatedly until a + single root hash is produced. This root hash is then stored onchain. +- Each leaf on a Merkle tree is a hash of that leaf’s data. +- A concurrent Merkle tree is a specialized version of a Merkle tree. Unlike a + standard Merkle tree, it allows multiple updates simultaneously without + affecting transaction validity. +- Data in a state-compressed program is not stored onchain. So you have to use + indexers to keep an offchain cache of the data. It’s this offchain cache data + that is used to then verify against the onchain Merkle tree. + +## Lesson + +Previously, we talked about state compression in the context of compressed NFTs. + +While compressed NFTs are the main use case for state compression, you can apply +state compression to any Solana program. In this lesson, we’ll discuss state +compression in general terms so you can use it across your Solana projects. + +### A theoretical overview of state compression + +Normally, data in Solana programs is serialized (usually with borsh) and stored +directly in an account. This makes it easy to read and write the data through +the program. The account data is trustworthy because only the program can modify +it. + +However to verify the integrity of the data, then there’s no need to store the +actual data onchain. Instead, we can store hashes of the data, which can be used +to prove or verify its accuracy. This is called _state compression_. + +These hashes take up far less storage space than the original data. The full +data can be stored in a cheaper, offchain location, and only needs to be +verified against the onchain hash when accessed. + +The Solana State Compression program uses a program known as a **concurrent +Merkle tree**. A concurrent Merkle tree is a special kind of binary tree that +deterministically hashes data, i.e. the same inputs will always produce the same +Merkle root. + +The final hash, called a _Merkle root_, is significantly smaller in size than +all the original full data sets combined. This is why it’s called "compression". +And it’s this hash that’s stored onchain. + +**Outlined below are the steps to this process, in order:** + +1. Take a piece of data. +2. Create a hash of that data. +3. Store the hash as a "leaf" at the bottom of the tree. +4. Hash pairs of leaves together to create branches. +5. Hash pairs of branches together. +6. Repeat this process until you reach the top of the tree. +7. The top of the tree contains a final "root hash." +8. Store this root hash onchain as proof of the data. +9. To verify the data, recompute the hashes and compare the final hash to the + onchain root hash. + +This method comes with some trade-offs: + +1. The data isn’t stored onchain, so it’s harder to access. +2. Developers must decide how often to verify the data against the onchain hash. +3. If the data changes, the entire data set must be sent to the program, along + with the new data. You’ll also need proof that the data matches the hash. + +These considerations will guide you when deciding whether, when, and how to +implement state compression in your programs. With that quick overview, let’s go +into more technical detail. + +#### Concurrent Merkle trees + +Since a Merkle tree is represented as a single hash, any change to a leaf node +alters the root hash. This becomes problematic when multiple transactions in the +same slot try to update leaf data in the same slot. Since transactions are +executed serially i.e. one after the other — all but the first will fail since +the root hash and proof passed in will have been invalidated by the first +transaction executed. + +In short, a standard Merkle tree can only handle one leaf update per +[slot](/docs/terminology#slot). This significantly limits the +throughput in a state-compressed program that depends on a single Merkle tree +for its state. + +Thankfully, this issue can be addressed using a _concurrent_ Merkle tree. Unlike +a regular Merkle tree, a concurrent Merkle tree keeps a secure changelog of +recent updates, along with their root hash and the proof needed to derive it. +When multiple transactions in the same slot attempt to modify leaf data, the +changelog serves as a reference, enabling concurrent updates to the tree. + +How does the concurrent Merkle tree achieve this? In a standard Merkle tree, +only the root hash is stored. However, a concurrent Merkle tree includes extra +data that ensures subsequent writes can succeed. + +This includes: + +1. The root hash - The same root hash found in a regular Merkle tree. +2. A changelog buffer - A buffer containing proof data for recent root hash + changes, allowing further writes in the same slot to succeed. +3. A canopy - To update a specific leaf, you need the entire proof path from the + leaf to the root hash. The canopy stores intermediate proof nodes along this + path so that not all of them need to be sent from the client to the program. + +### Key Parameters for Configuring a Concurrent Merkle Tree + +As a developer, you are responsible for controlling three key parameters that +directly affect the tree’s size, cost, and the number of concurrent changes it +can handle: + +1. **Max Depth** +2. **Max Buffer Size** +3. **Canopy Depth** + +Let’s take a brief overview of each parameter. + +#### Max Depth + +The **max depth** determines how many levels or "hops" are required to reach the +root of the tree from any leaf. Since Merkle trees are structured as binary +trees, where each leaf is paired with only one other leaf, the max depth can be +used to calculate the total number of nodes in the tree with the formula: +`2^maxDepth`. + +Here’s a quick TypeScript function for illustration: + +```typescript +const getMaxDepth = (itemCount: number) => { + if (itemCount === 0) { + return 0; + } + return Math.ceil(Math.log2(itemCount)); +}; +``` + +A max depth of 20 would allow for over one million leaves, making it suitable +for storing large datasets like NFTs. + +#### Max Buffer Size + +The **max buffer size** controls how many concurrent updates can be made to the +tree within a single slot while keeping the root hash valid. In a standard +Merkle tree, only the first transaction in a slot would be successful since it +updates the root hash, causing all subsequent transactions to fail due to hash +mismatches. However, in a concurrent Merkle tree, the buffer maintains a log of +changes, allowing multiple transactions to update the tree simultaneously by +checking the appropriate root hash from the buffer. A larger buffer size +increases throughput by enabling more concurrent changes. + +#### Canopy Depth + +The **canopy depth** specifies how many proof nodes are stored onchain for any +given proof path. To verify any leaf in the tree, you need a complete proof +path, which includes one proof node for every layer of the tree. For a tree with +a max depth of 14, there will be 14 proof nodes in total. Each proof node adds +32 bytes to the transaction, and without careful management, large trees could +exceed the transaction size limit. + +Storing more proof nodes onchain (i.e., having a deeper canopy) allows other +programs to interact with your tree without exceeding transaction limits, but it +also uses more onchain storage. Consider the complexity of interactions with +your tree when deciding on an appropriate canopy depth. + +### Balancing Trade-offs + +These three values—max depth, max buffer size, and canopy depth—all come with +trade-offs. Increasing any of them will enlarge the account used to store the +tree, raising the cost of creating the tree. + +- **Max Depth:** This is straightforward to determine based on how much data + needs to be stored. For example, if you need to store 1 million compressed + NFTs (cNFTs), where each cNFT is a leaf, you would need a max depth of 20 + (`2^maxDepth > 1 million`). +- **Max Buffer Size:** The choice of buffer size is mainly a question of + throughput—how many concurrent updates are required? A larger buffer allows + for more updates in the same slot. +- **Canopy Depth:** A deeper canopy improves composability, enabling other + programs to interact with your state-compressed program without exceeding + transaction size limits. Omitting the canopy is discouraged, as it could cause + issues with transaction size, especially when other programs are involved. + +### Data Access in a State-Compressed Program + +In a state-compressed program, the actual data isn’t stored directly onchain. +Instead, the concurrent Merkle tree structure is stored, while the raw data +resides in the blockchain’s more affordable ledger state. This makes accessing +the data more challenging, but not impossible. + +The Solana ledger is essentially a list of entries containing signed +transactions, which can be traced back to the Genesis block theoretically. This +means any data that has ever been included in a transaction is stored in the +ledger. + +Since the state compression process happens onchain, all the data is still in +the ledger state. In theory, you could retrieve the original data by replaying +the entire chain state from the start. However, it’s far more practical (though +still somewhat complex) to use an indexer to track and index the data as the +transactions happen. This creates an offchain "cache" of the data that can be +easily accessed and verified against the onchain root hash. + +While this process may seem complex at first, it becomes clearer with practice. + +### State Compression Tooling + +While understanding the theory behind state compression is crucial, you don’t +have to build it all from scratch. Talented engineers have already developed +essential tools like the SPL State Compression Program and the Noop Program to +simplify the process. + +#### SPL State Compression and Noop Programs + +The SPL State Compression Program is designed to streamline and standardize the +creation and management of concurrent Merkle trees across the Solana ecosystem. +It provides Instruction Handlers for initializing Merkle trees, handling tree +leaves (such as adding, updating, or removing data), and verifying the integrity +of leaf data. + +Additionally, the State Compression Program works in conjunction with a separate +"Noop" program. A [no-op program]() +does nothing - literally 'no operation.' The Solana Noop Program only logs data +to the ledger state, however that logging is essential to state compression: + +When you store compressed data, it’s passed to the State Compression Program, +which hashes the data and emits it as an "event" to the Noop Program. While the +hash is stored in the concurrent Merkle tree, the raw data can still be accessed +via the Noop Program’s transaction logs. + +### Indexing Data for Easy Lookup + +Typically, accessing onchain data is as simple as fetching the relevant account. +However, with state compression, it’s not that straightforward. + +As mentioned earlier, the data now resides in the ledger state rather than in an +account. The most accessible place to find the complete data is in the logs of +the Noop instruction. While this data remains in the ledger state indefinitely, +it may become inaccessible through validators after a certain period. + +Validators don’t store all transactions back to the Genesis block to save space +and improve performance. The length of time you can access Noop instruction logs +varies depending on the validator. Eventually, the logs will become unavailable +if you’re relying on direct access to them. + +In theory, it’s possible to replay transaction states back to the genesis block, +but this approach is impractical for most teams and isn’t efficient. Some RPC +providers have adopted the +[Digital Asset Standard (DAS)](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api) +to enable efficient querying of compressed NFTs and other assets. However, as of +now, DAS does not support arbitrary state compression. + +You essentially have two main options: + +1. Use an indexing provider to create a custom indexing solution for your + program, which will monitor the events sent to the Noop program and store the + relevant data offchain. +2. Build your indexing solution that stores transaction data offchain. + +For many dApps, option 2 can be a practical choice. Larger-scale applications, +however, may need to rely on infrastructure providers to manage their indexing +needs. + +### State Compression Development Process + +#### Create Rust Types + +In a typical Anchor program, developers often start by defining the Rust types +that represent accounts. For a state-compressed program, however, the focus +shifts to defining types that align with the Merkle tree structure. + +In state compression, your onchain account will primarily store the Merkle tree. +The more practical data will be serialized and logged to the Noop program for +easier access and management. Your Rust types should encompass all data stored +in the leaf nodes and any contextual information necessary for interpreting that +data. For instance, if you’re developing a simple messaging program, your +`Message` struct might look something like this: + +```rust +const DISCRIMINATOR_SIZE: usize = 8; +const PUBKEY_SIZE: usize = 32; + +/// A log entry for messages sent between two public keys. +#[derive(AnchorSerialize, AnchorDeserialize)] +pub struct MessageLog { + /// The leaf node hash for message logging. + pub leaf_node: [u8; DISCRIMINATOR_SIZE + PUBKEY_SIZE], + /// The public key of the message sender. + pub from: Pubkey, + /// The public key of the message recipient. + pub to: Pubkey, + /// The actual message content. + pub message: String, +} + +/// Constructs a new `MessageLog`. +/// +/// # Arguments +/// +/// * `leaf_node` - A 32-byte array representing the leaf node hash. +/// * `from` - The public key of the message sender. +/// * `to` - The public key of the message recipient. +/// * `message` - The message to be sent. +/// +/// # Returns +/// +/// Returns a new `MessageLog` instance. +pub fn new_message_log(leaf_node: [u8; DISCRIMINATOR_SIZE + PUBKEY_SIZE], from: Pubkey, to: Pubkey, message: String) -> MessageLog { + MessageLog { leaf_node, from, to, message } +} +``` + +To be absolutely clear, the **`MessageLog` is not an account you will read +from**. Instead, your program will create an instance of `MessageLog` using +inputs from Instructions Handler, rather than constructing it from data read +from an account. We will cover how to read data from compressed accounts later. + +#### Initialize a New Tree + +To set up a new Merkle tree, clients need to perform two distinct steps. + +1. First, they allocate the account by calling the System Program. +2. Next, they use a custom program to initialize the new account. This + initialization involves setting the maximum depth and buffer size for the + Merkle tree. + +The initialization Instruction Handler must create a CPI (Cross-Program +Invocation) to call the `init_empty_merkle_tree` instruction from the State +Compression Program. You’ll need to provide the maximum depth and buffer size as +arguments to this instruction Handler. + +- **Max depth**: Defines the maximum number of hops needed to travel from any + leaf to the root of the tree. +- **Max buffer size**: Specifies the space allocated for storing a changelog of + tree updates. This changelog is essential for supporting concurrent updates + within the same block. + +For instance, if you are initializing a tree to store messages between users, +your Instruction Handler might look like this: + +```rust +/// Initializes an empty Merkle tree for storing messages with a specified depth and buffer size. +/// +/// This function creates a CPI (Cross-Program Invocation) call to initialize the Merkle tree account +/// using the provided authority and compression program. The PDA (Program Derived Address) seeds are used for +/// signing the transaction. +/// +/// # Arguments +/// +/// * `ctx` - The context containing the accounts required for Merkle tree initialization. +/// * `max_depth` - The maximum depth of the Merkle tree. +/// * `max_buffer_size` - The maximum buffer size of the Merkle tree. +/// +/// # Returns +/// +/// This function returns a `Result<()>`, indicating success or failure. +/// +/// # Errors +/// +/// This function will return an error if the CPI call to `init_empty_merkle_tree` fails. +pub fn create_messages_tree( + ctx: Context, + max_depth: u32, // Max depth of the Merkle tree + max_buffer_size: u32 // Max buffer size of the Merkle tree +) -> Result<()> { + // Get the address for the Merkle tree account + let merkle_tree = ctx.accounts.merkle_tree.key(); + + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[ + &[ + merkle_tree.as_ref(), // The address of the Merkle tree account + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA + ], + ]; + + // Create CPI context for `init_empty_merkle_tree` instruction handler + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + Initialize { + authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be initialized + noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data + }, + signers_seeds // The seeds for PDAs signing + ); + + // CPI to initialize an empty Merkle tree with the given max depth and buffer size + init_empty_merkle_tree(cpi_ctx, max_depth, max_buffer_size)?; + + Ok(()) +} +``` + +#### Adding Hashes to the Tree + +Once the Merkle tree is initialized, you can begin adding data hashes to it. +This process involves passing the uncompressed data to an Instruction handler +within your program, which will hash the data, log it to the Noop Program, and +then use the State Compression Program’s `append` instruction to add the hash to +the tree. Here’s how the Instruction Handler operates in detail: + +1. **Hash the Data**: Use the `hashv` function from the `keccak` crate to hash + the data. It’s recommended to include the data owner or authority in the hash + to ensure that only the proper authority can modify it. +2. **Log the Data**: Create a log object representing the data you want to log + to the Noop Program. Then, call `wrap_application_data_v1` to issue a CPI + (Cross-Program Invocation) to the Noop Program with this object. This makes + the uncompressed data easily accessible to any client, such as indexers, that + may need it. You could also develop a custom client to observe and index data + for your application specifically. + +3. **Append the Hash**: Construct and issue a CPI to the State Compression + Program’s `append` Instruction. This will take the hash generated in step 1 + and append it to the next available leaf on the Merkle tree. As with previous + steps, this requires the Merkle tree address and tree authority bump as + signature seeds. + +When applied to a messaging system, the resulting implementation might look like +this: + +```rust +/// Appends a message to the Merkle tree. +/// +/// This function hashes the message and the sender’s public key to create a leaf node, +/// logs the message using the noop program, and appends the leaf node to the Merkle tree. +/// +/// # Arguments +/// +/// * `ctx` - The context containing the accounts required for appending the message. +/// * `message` - The message to append to the Merkle tree. +/// +/// # Returns +/// +/// This function returns a `Result<()>`, indicating success or failure. +/// +/// # Errors +/// +/// This function will return an error if any of the CPI calls (logging or appending) fail. +pub fn append_message(ctx: Context, message: String) -> Result<()> { + // Hash the message + sender’s public key to create a leaf node + let leaf_node = keccak::hashv(&[message.as_bytes(), ctx.accounts.sender.key().as_ref()]).to_bytes(); + + // Create a new "MessageLog" using the leaf node hash, sender, recipient, and message + let message_log = new_message_log( + leaf_node.clone(), + ctx.accounts.sender.key().clone(), + ctx.accounts.recipient.key().clone(), + message, + ); + + // Log the "MessageLog" data using the noop program + wrap_application_data_v1(message_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; + + // Get the Merkle tree account address + let merkle_tree = ctx.accounts.merkle_tree.key(); + + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[ + &[ + merkle_tree.as_ref(), // The address of the Merkle tree account as a seed + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA + ], + ]; + + // Create a CPI context and append the leaf node to the Merkle tree + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + Modify { + authority: ctx.accounts.tree_authority.to_account_info(), // Authority for the Merkle tree, using a PDA + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified + noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data + }, + signers_seeds, // The seeds for PDAs signing + ); + + // CPI call to append the leaf node to the Merkle tree + append(cpi_ctx, leaf_node)?; + + Ok(()) +} +``` + +#### Updating Hashes + +To update a leaf in a Merkle tree, you’ll need to generate a new hash to replace +the existing one. This process requires four key inputs: + +1. The index of the leaf you wish to update +2. The root hash of the Merkle tree +3. The original data you want to modify +4. The updated data + +Using these inputs, you can follow a series of steps similar to those used when +initially appending data to the tree: + +1. **Verify Update Authority**: The first step, unique to updates, is to verify + the authority of the entity making the update. This generally involves + checking that the signer of the `update` transaction is indeed the owner or + authority of the leaf at the specified index. Since the data in the leaf is + hashed, you can’t directly compare the authority’s public key to a stored + value. Instead, compute the previous hash using the old data and the + `authority` listed in the account validation struct. Then, invoke a CPI to + the State Compression Program’s `verify_leaf` instruction to confirm the hash + matches. + +2. **Hash the New Data**: This step mirrors the hashing process for appending + data. Use the `hashv` function from the `keccak` crate to hash the new data + and the update authority, converting each to its corresponding byte + representation. + +3. **Log the New Data**: As with the initial append operation, create a log + object to represent the new data, and use `wrap_application_data_v1` to + invoke the Noop Program via CPI. This ensures that the new uncompressed data + is logged and accessible offchain. + +4. **Replace the Existing Leaf Hash**: This step is slightly different from + appending new data. Here, you’ll need to invoke a CPI to the State + Compression Program’s `replace_leaf` instruction. This operation will replace + the existing hash at the specified leaf index with the new hash. You’ll need + to provide the old hash, the new hash, and the leaf index. As usual, the + Merkle tree address and tree authority bump are required as signature seeds. + +When combined, the instructions for updating a hash might look like this: + +```rust +/// Updates a message in the Merkle tree. +/// +/// This function verifies the old message in the Merkle tree by checking its leaf node, +/// and then replaces it with a new message by modifying the Merkle tree’s leaf node. +/// +/// # Arguments +/// +/// * `ctx` - The context containing the accounts required for updating the message. +/// * `index` - The index of the leaf node to update. +/// * `root` - The root hash of the Merkle tree. +/// * `old_message` - The old message that is currently in the Merkle tree. +/// * `new_message` - The new message to replace the old message. +/// +/// # Returns +/// +/// This function returns a `Result<()>`, indicating success or failure. +/// +/// # Errors +/// +/// This function will return an error if verification or replacement of the Merkle tree leaf fails. +pub fn update_message( + ctx: Context, + index: u32, + root: [u8; 32], + old_message: String, + new_message: String +) -> Result<()> { + // Hash the old message + sender’s public key to create the old leaf node + let old_leaf = keccak::hashv(&[old_message.as_bytes(), ctx.accounts.sender.key().as_ref()]).to_bytes(); + + // Get the Merkle tree account address + let merkle_tree = ctx.accounts.merkle_tree.key(); + + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[ + &[ + merkle_tree.as_ref(), // The address of the Merkle tree account as a seed + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA + ], + ]; + + // Verify the old leaf node in the Merkle tree + { + // If the old and new messages are the same, no update is needed + if old_message == new_message { + msg!("Messages are the same!"); + return Ok(()); + } + + // Create CPI context for verifying the leaf node + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + VerifyLeaf { + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be verified + }, + signers_seeds, // The seeds for PDAs signing + ); + + // Verify the old leaf node in the Merkle tree + verify_leaf(cpi_ctx, root, old_leaf, index)?; + } + + // Hash the new message + sender’s public key to create the new leaf node + let new_leaf = keccak::hashv(&[new_message.as_bytes(), ctx.accounts.sender.key().as_ref()]).to_bytes(); + + // Log the new message for indexers using the noop program + let message_log = new_message_log( + new_leaf.clone(), + ctx.accounts.sender.key().clone(), + ctx.accounts.recipient.key().clone(), + new_message, + ); + wrap_application_data_v1(message_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; + + // Replace the old leaf with the new leaf in the Merkle tree + { + // Create CPI context for replacing the leaf node + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + Modify { + authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified + noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data + }, + signers_seeds, // The seeds for PDAs signing + ); + + // Replace the old leaf node with the new one in the Merkle tree + replace_leaf(cpi_ctx, root, old_leaf, new_leaf, index)?; + } + + Ok(()) +} +``` + +#### Deleting Hashes + +As of now, the State Compression Program does not have a dedicated `delete` +instruction. + +Instead, you can simulate deletion by updating the leaf data with a value that +signals it has been "deleted." + +The exact value you choose will depend on your specific use case and security +requirements. For some, this may involve setting all data fields to zero, while +others might prefer storing a predefined static string that marks the leaf as +deleted. This approach allows you to handle deletions in a way that suits your +application’s needs without compromising data integrity. + +#### Accessing Data from a Client + +We’ve covered creating, updating, and deleting data in state compression, but +reading data presents its unique challenges. + +Accessing compressed data from a client can be tricky because the Merkle tree +stores only data hashes, which cannot be used to recover the original data. +Additionally, the uncompressed data logged to the Noop program is not retained +indefinitely. + +To access this data, you generally have two options: + +1. **Work with an indexing provider** to develop a custom solution tailored to + your program. This allows you to write client-side code to retrieve and + access the data based on how the indexer provides it. +2. **Create your own pseudo-indexer** to store and retrieve the data, offering a + lighter-weight solution. + +If your project is decentralized and expects widespread interaction beyond your +frontend, option 2 might not be sufficient. However, if you have control over +most program interactions, this approach can work. + +There’s no one-size-fits-all solution here. Two potential strategies include: + +1. **Store raw data**: One approach is to store the raw data in a database + simultaneously by sending it to the program. This allows you to keep a record + of the data, along with the Merkle tree leaf where the data was hashed and + stored. + +2. **Create a transaction observer**: Another approach is to create a server + that observes the transactions your program executes. This server would fetch + transactions, look up the related Noop logs, decode them, and store the data. + +When writing tests in the lab, we’ll simulate both of these approaches, although +instead of using a database, the data will be stored in memory for the test’s +duration. + +The process of setting this up can be a bit complex. For a given transaction, +you’ll retrieve it from the RPC provider, extract the inner instructions related +to the Noop program, and use the `deserializeApplicationDataEvent` function from +the `@solana/spl-account-compression` JS package to decode the logs. Then, +you’ll use Borsh to deserialize the data. Here’s an example from the messaging +program to illustrate the process: + +```typescript +export async function getMessageLog( + connection: Connection, + txSignature: string, +) { + // Confirm the transaction, otherwise the getTransaction sometimes returns null + const latestBlockHash = await connection.getLatestBlockhash(); + await connection.confirmTransaction({ + blockhash: latestBlockHash.blockhash, + lastValidBlockHeight: latestBlockHash.lastValidBlockHeight, + signature: txSignature, + }); + + // Get the transaction info using the tx signature + const txInfo = await connection.getTransaction(txSignature, { + maxSupportedTransactionVersion: 0, + }); + + // Get the inner instructions related to the program instruction at index 0 + // We only send one instruction in test transaction, so we can assume the first + const innerIx = txInfo!.meta?.innerInstructions?.[0]?.instructions; + + // Get the inner instructions that match the SPL_NOOP_PROGRAM_ID + const noopInnerIx = innerIx.filter( + instruction => + txInfo?.transaction.message.staticAccountKeys[ + instruction.programIdIndex + ].toBase58() === SPL_NOOP_PROGRAM_ID.toBase58(), + ); + + let messageLog: MessageLog; + for (let i = noopInnerIx.length - 1; i >= 0; i--) { + try { + // Try to decode and deserialize the instruction data + const applicationDataEvent = deserializeApplicationDataEvent( + Buffer.from(bs58.decode(noopInnerIx[i]?.data!)), + ); + + // Get the application data + const applicationData = applicationDataEvent.fields[0].applicationData; + + // Deserialize the application data into MessageLog instance + messageLog = deserialize( + MessageLogBorshSchema, + MessageLog, + Buffer.from(applicationData), + ); + + if (messageLog !== undefined) { + break; + } + } catch (__) {} + } + + return messageLog; +} +``` + +### Conclusion + +Implementing generalized state compression may be challenging, but it is +entirely achievable using the available tools. As the ecosystem evolves, these +tools and programs will continue to improve, making the process more +streamlined. If you discover solutions that enhance your development experience, +please don’t hesitate to share them with the community! + + + +Remember to write comprehensive tests for your state compression implementation. This ensures your program behaves correctly and helps catch potential issues early in the development process. + + + +## Lab: Building a Note-Taking App with Generalized State Compression + +In this lab, we’ll walk through the process of developing an Anchor program that +uses custom state compression to power a basic note-taking app. This will give +you hands-on experience in working with compressed data and help reinforce key +concepts around state compression on Solana. + +#### 1. Set up the Project + +Start by initializing an Anchor program: + +```bash +anchor init compressed-notes +``` + +Next, we’ll add the `spl-account-compression` crate with the `cpi` feature +enabled. To do this, update the `Cargo.toml` file located at +`programs/compressed-notes` by adding the following dependency: + +```toml +[dependencies] +anchor-lang = "0.28.0" +spl-account-compression = { version="0.2.0", features = ["cpi"] } +solana-program = "1.16.0" +``` + +We’ll be running tests locally, but we’ll need both the State Compression +Program and the Noop Program from the Mainnet to do so. To make sure these +programs are available on our local cluster, we need to include them in the +`Anchor.toml` file located in the root directory. Here’s how you can add them: + +In `Anchor.toml`, update the programs section with the following entries: + +```toml +[test.validator] +url = "https://api.mainnet-beta.solana.com" + +[[test.validator.clone]] +address = "noopb9bkMVfRPU8AsbpTUg8AQkHtKwMYZiFUjNRtMmV" + +[[test.validator.clone]] +address = "cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK" +``` + +Finally, let’s set up the `lib.rs` file for the remainder of the demo. Start by +removing the `initialize` instruction and the `Initialize` accounts struct. +Next, add the necessary imports as indicated in the code snippet, making sure to +include **_your_** program ID. + +```rust +use anchor_lang::{ + prelude::*, + solana_program::keccak, +}; +use spl_account_compression::{ + Noop, + program::SplAccountCompression, + cpi::{ + accounts::{Initialize, Modify, VerifyLeaf}, + init_empty_merkle_tree, verify_leaf, replace_leaf, append, + }, + wrap_application_data_v1, +}; + +// Replace with your program ID +declare_id!("PROGRAM_PUBLIC_KEY_GOES_HERE"); + +/// A program that manages compressed notes using a Merkle tree for efficient storage and verification. +#[program] +pub mod compressed_notes { + use super::*; + + // Define your program instructions here. + + /// Initializes a new Merkle tree for storing messages. + /// + /// This function creates a Merkle tree with the specified maximum depth and buffer size. + /// + /// # Arguments + /// + /// * `ctx` - The context containing the accounts required for initializing the tree. + /// * `max_depth` - The maximum depth of the Merkle tree. + /// * `max_buffer_size` - The maximum buffer size of the Merkle tree. + pub fn create_messages_tree( + ctx: Context, + max_depth: u32, + max_buffer_size: u32, + ) -> Result<()> { + // Tree creation logic here + Ok(()) + } + + /// Appends a new message to the Merkle tree. + /// + /// This function hashes the message and adds it as a leaf node to the tree. + /// + /// # Arguments + /// + /// * `ctx` - The context containing the accounts required for appending the message. + /// * `message` - The message to append to the Merkle tree. + pub fn append_message(ctx: Context, message: String) -> Result<()> { + // Message appending logic here + Ok(()) + } + + /// Updates an existing message in the Merkle tree. + /// + /// This function verifies the old message and replaces it with the new message in the tree. + /// + /// # Arguments + /// + /// * `ctx` - The context containing the accounts required for updating the message. + /// * `index` - The index of the message in the tree. + /// * `root` - The root of the Merkle tree. + /// * `old_message` - The old message to be replaced. + /// * `new_message` - The new message to replace the old message. + pub fn update_message( + ctx: Context, + index: u32, + root: [u8; 32], + old_message: String, + new_message: String, + ) -> Result<()> { + // Message updating logic here + Ok(()) + } + + // Add more functions as needed +} + +// Add structs for accounts, state, etc., here + +/// Struct for holding the account information required for message operations. +#[derive(Accounts)] +pub struct MessageAccounts<'info> { + /// The Merkle tree account. + #[account(mut)] + pub merkle_tree: AccountInfo<'info>, + /// The authority for the Merkle tree. + pub tree_authority: AccountInfo<'info>, + /// The sender’s account. + pub sender: Signer<'info>, + /// The recipient’s account. + pub recipient: AccountInfo<'info>, + /// The compression program (Noop program). + pub compression_program: Program<'info, SplAccountCompression>, + /// The log wrapper account for logging data. + pub log_wrapper: AccountInfo<'info>, +} +``` + +For the remainder of this demo, we’ll be making updates directly in the `lib.rs` +file. This approach simplifies the explanations. You can modify the structure as +needed. + +It’s a good idea to build your project now to confirm that your environment is +set up correctly and to reduce build times in the future. + +#### 2. Define `Note` schema + +Next, we’ll define the structure of a note within our program. Each note should +have the following attributes: + +- `leaf_node` - a 32-byte array representing the hash stored on the leaf node. +- `owner` - the public key of the note’s owner. +- `note` - a string containing the text of the note. + +```rust +#[derive(AnchorSerialize, AnchorDeserialize, Clone)] +/// A struct representing a log entry in the Merkle tree for a note. +pub struct NoteLog { + /// The leaf node hash generated from the note data. + pub leaf_node: [u8; 32], + /// The public key of the note’s owner. + pub owner: Pubkey, + /// The content of the note. + pub note: String, +} + +/// Constructs a new note log from a given leaf node, owner, and note message. +/// +/// # Arguments +/// +/// * `leaf_node` - A 32-byte array representing the hash of the note. +/// * `owner` - The public key of the note’s owner. +/// * `note` - The note message content. +/// +/// # Returns +/// +/// A new `NoteLog` struct containing the provided data. +pub fn create_note_log(leaf_node: [u8; 32], owner: Pubkey, note: String) -> NoteLog { + NoteLog { leaf_node, owner, note } +} +``` + +In a traditional Anchor program, a note would typically be represented by a +`Note` struct using the `account` macro. However, because we’re using state +compression we use `NoteLog`, a struct with the `AnchorSerialize` macro applied. + +#### 3. Define Account Constraints + +All our instruction handlers will use the same +[account constraints](https://www.anchor-lang.com/docs/account-constraints): + +- `owner` - The creator and owner of the note, who must sign the transaction. +- `tree_authority` - The authority for the Merkle tree, used for signing + compression-related CPIs. +- `merkle_tree` - The address of the Merkle tree where note hashes are stored; + this will be unchecked as it’s validated by the State Compression Program. +- `log_wrapper` - The address of the Noop Program. +- `compression_program` - The address of the State Compression Program. + +```rust +#[derive(Accounts)] +/// Accounts required for interacting with the Merkle tree for note management. +pub struct NoteAccounts<'info> { + /// The payer for the transaction, who also owns the note. + #[account(mut)] + pub owner: Signer<'info>, + + /// The PDA (Program Derived Address) authority for the Merkle tree. + /// This account is only used for signing and is derived from the Merkle tree address. + #[account( + seeds = [merkle_tree.key().as_ref()], + bump, + )] + pub tree_authority: SystemAccount<'info>, + + /// The Merkle tree account, where the notes are stored. + /// This account is validated by the SPL Account Compression program. + /// + /// The `UncheckedAccount` type is used since the account’s validation is deferred to the CPI. + #[account(mut)] + pub merkle_tree: UncheckedAccount<'info>, + + /// The Noop program used for logging data. + /// This is part of the SPL Account Compression stack and logs the note operations. + pub log_wrapper: Program<'info, Noop>, + + /// The SPL Account Compression program used for Merkle tree operations. + pub compression_program: Program<'info, SplAccountCompression>, +} +``` + +#### 4. Create `create_note_tree` Instruction handler + +Next, we’ll make the `create_note_tree` instruction handler, to initialize the +already allocated Merkle tree account. + +To implement this, you’ll need to build a CPI to invoke the +`init_empty_merkle_tree` instruction from the State Compression Program. The +`NoteAccounts` struct will provide the necessary accounts, but you’ll also need +to include two additional arguments: + +1. **`max_depth`** - Specifies the maximum depth of the Merkle tree, indicating + the longest path from any leaf to the root. +2. **`max_buffer_size`** - Defines the maximum buffer size for the Merkle tree, + which determines the space allocated for recording tree updates. This buffer + is crucial for supporting concurrent updates within the same block. + +```rust +#[program] +pub mod compressed_notes { + use super::*; + + /// Instruction to create a new note tree (Merkle tree) for storing compressed notes. + /// + /// # Arguments + /// * `ctx` - The context that includes the accounts required for this transaction. + /// * `max_depth` - The maximum depth of the Merkle tree. + /// * `max_buffer_size` - The maximum buffer size of the Merkle tree. + /// + /// # Returns + /// * `Result<()>` - Returns a success or error result. + pub fn create_note_tree( + ctx: Context, + max_depth: u32, // Max depth of the Merkle tree + max_buffer_size: u32, // Max buffer size of the Merkle tree + ) -> Result<()> { + // Get the address for the Merkle tree account + let merkle_tree = ctx.accounts.merkle_tree.key(); + + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[&[ + merkle_tree.as_ref(), // The Merkle tree account address as the seed + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the tree authority PDA + ]]; + + // Create a CPI (Cross-Program Invocation) context for initializing the empty Merkle tree. + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL Account Compression program + Initialize { + authority: ctx.accounts.tree_authority.to_account_info(), // PDA authority for the Merkle tree + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account + noop: ctx.accounts.log_wrapper.to_account_info(), // The Noop program for logging data + }, + signers_seeds, // The seeds for PDAs signing + ); + + // CPI call to initialize an empty Merkle tree with the specified depth and buffer size. + init_empty_merkle_tree(cpi_ctx, max_depth, max_buffer_size)?; + + Ok(()) + } + + // Additional functions for the program can go here... +} +``` + +Make sure that when setting up your CPI, you include both the Merkle tree +address and the tree authority bump in the signer seeds. + +#### 5. Create `append_note` Instruction handler + +Let’s create the `append_note` instruction handler. This will compress a raw +note into a hash and store it on the Merkle tree, while also logging the note to +the Noop program to ensure all data remains available onchain. + +Here’s how to accomplish this: + +1. **Hash the Data**: Utilize the `hashv` function from the `keccak` crate to + compute a hash of the note and the owner’s public key. Both should be + converted to their byte representations. It’s essential to hash the owner + along with the note to facilitate ownership verification during updates. + +2. **Log the Data**: Create a `NoteLog` instance with the hash from step 1, the + owner’s public key, and the note as a `String`. Then, use + `wrap_application_data_v1` to issue a CPI to the Noop program with this + `NoteLog` instance. This ensures the complete note (not just the hash) is + available to clients, similar to how indexers manage cNFTs. You might also + develop an observing client to simulate indexer functionality specific to + your application. + +3. **Append to the Merkle Tree**: Build and issue a CPI to the State Compression + Program’s `append` instruction. This will add the hash from step 1 to the + next available leaf on your Merkle tree. Ensure that the Merkle tree address + and the tree authority bump are included as signature seeds. + +```rust +#[program] +pub mod compressed_notes { + use super::*; + + //... + + /// Instruction to append a note to the Merkle tree. + /// + /// # Arguments + /// * `ctx` - The context containing accounts needed for this transaction. + /// * `note` - The note message to append as a leaf node in the Merkle tree. + /// + /// # Returns + /// * `Result<()>` - Returns a success or error result. + pub fn append_note(ctx: Context, note: String) -> Result<()> { + // Step 1: Hash the note message to create a leaf node for the Merkle tree + let leaf_node = keccak::hashv(&[note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); + + // Step 2: Create a new NoteLog instance containing the leaf node, owner, and note + let note_log = NoteLog::new(leaf_node.clone(), ctx.accounts.owner.key().clone(), note); + + // Step 3: Log the NoteLog data using the Noop program + wrap_application_data_v1(note_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; + + // Step 4: Get the Merkle tree account key (address) + let merkle_tree = ctx.accounts.merkle_tree.key(); + + // Step 5: The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[&[ + merkle_tree.as_ref(), // The address of the Merkle tree account as a seed + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA + ]]; + + // Step 6: Create a CPI (Cross-Program Invocation) context to modify the Merkle tree + let cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // SPL Account Compression program + Modify { + authority: ctx.accounts.tree_authority.to_account_info(), // The PDA authority for the + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to modify + noop: ctx.accounts.log_wrapper.to_account_info(), // The Noop program for logging data + }, + signers_seeds, // Seeds for PDAs with that will sign the transaction + ); + + // Step 7: Append the leaf node to the Merkle tree using CPI + append(cpi_ctx, leaf_node)?; + + Ok(()) + } + + //... +} +``` + +#### 6. Create `update_note` Instruction Handler + +The final instruction we’ll implement is `update_note`, which will replace an +existing leaf with a new hash that represents the updated note data. + +To perform this update, you’ll need the following parameters: + +1. **Index**: The index of the leaf to be updated. +2. **Root**: The root hash of the Merkle tree. +3. **Old Note**: The string representation of the note that is being updated. +4. **New Note**: The string representation of the updated note. + +The process for this instruction is similar to `append_note`, with some +additional steps: + +1. **Verify Ownership**: Before updating, prove that the `owner` executing this + instruction is the rightful owner of the leaf at the specified index. Since + the leaf data is compressed as a hash, you can’t directly compare the + `owner`'s public key. Instead, compute the previous hash using the old note + data and the `owner` from the account validation struct. Then, use this + computed hash to build and issue a CPI to the State Compression Program’s + `verify_leaf` instruction. + +2. **Hash the New Data**: Hash the new note and the owner’s public key using the + `hashv` function from the `keccak` crate, converting each to its byte + representation. + +3. **Log the New Data**: Create a `NoteLog` instance with the new hash from step + 2, the owner’s public key, and the new note. Call `wrap_application_data_v1` + to issue a CPI to the Noop program with this `NoteLog` instance, ensuring the + updated note data is available to clients. + +4. **Replace the Leaf**: Build and issue a CPI to the State Compression + Program’s `replace_leaf` instruction. This will replace the old hash with the + new hash at the specified leaf index. Ensure the Merkle tree address and the + tree authority bump are included as signature seeds. + +```rust +#[program] +pub mod compressed_notes { + use super::*; + + //... + + /// Instruction to update a note in the Merkle tree. + /// + /// # Arguments + /// * `ctx` - The context containing accounts needed for this transaction. + /// * `index` - The index of the note to update in the Merkle tree. + /// * `root` - The root hash of the Merkle tree for verification. + /// * `old_note` - The current note to be updated. + /// * `new_note` - The new note that will replace the old one. + /// + /// # Returns + /// * `Result<()>` - Returns a success or error result. + pub fn update_note( + ctx: Context, + index: u32, + root: [u8; 32], + old_note: String, + new_note: String, + ) -> Result<()> { + // Step 1: Hash the old note to generate the corresponding leaf node + let old_leaf = keccak::hashv(&[old_note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); + + // Step 2: Get the address of the Merkle tree account + let merkle_tree = ctx.accounts.merkle_tree.key(); + + // Step 3: The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[&[ + merkle_tree.as_ref(), // The address of the Merkle tree account as a seed + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA + ]]; + + // Step 4: Check if the old note and new note are the same + if old_note == new_note { + msg!("Notes are the same!"); + return Ok(()); + } + + // Step 5: Verify the leaf node in the Merkle tree + let verify_cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + VerifyLeaf { + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified + }, + signers_seeds, // The seeds for PDAs signing + ); + // Verify or fail + verify_leaf(verify_cpi_ctx, root, old_leaf, index)?; + + // Step 6: Hash the new note to create the new leaf node + let new_leaf = keccak::hashv(&[new_note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); + + // Step 7: Create a NoteLog entry for the new note + let note_log = NoteLog::new(new_leaf.clone(), ctx.accounts.owner.key().clone(), new_note); + + // Step 8: Log the NoteLog data using the Noop program + wrap_application_data_v1(note_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; + + // Step 9: Prepare to replace the old leaf node with the new one in the Merkle tree + let modify_cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + Modify { + authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified + noop: ctx.accounts.log_wrapper.to_account_info(), // The Noop program to log data + }, + signers_seeds, // The seeds for PDAs signing + ); + + // Step 10: Replace the old leaf node with the new leaf node in the Merkle tree + replace_leaf(modify_cpi_ctx, root, old_leaf, new_leaf, index)?; + + Ok(()) + } +} +``` + +#### 7. Client Test Setup + +To ensure our program functions correctly, we’ll set up and write some tests. +Here’s what you need to do for the setup: + +1. **Install Dependencies**: We’ll be using the + `@solana/spl-account-compression` package for our tests. Install it using the + following command: + +```bash +yarn add @solana/spl-account-compression +``` + +2. **Create Utility File**: To simplify testing, we’ve provided a utility file. + Create a `utils.ts` file in the `tests` directory and add the provided + contents. We’ll go over the details of this file shortly. + +```typescript +import { + SPL_NOOP_PROGRAM_ID, + deserializeApplicationDataEvent, +} from "@solana/spl-account-compression"; +import { Connection, PublicKey } from "@solana/web3.js"; +import { bs58 } from "@coral-xyz/anchor/dist/cjs/utils/bytes"; +import { deserialize } from "borsh"; +import { keccak256 } from "js-sha3"; + +class NoteLog { + leafNode: Uint8Array; + owner: PublicKey; + note: string; + + constructor(properties: { + leafNode: Uint8Array; + owner: Uint8Array; + note: string; + }) { + this.leafNode = properties.leafNode; + this.owner = new PublicKey(properties.owner); + this.note = properties.note; + } +} + +// A map that describes the Note structure for Borsh deserialization +const NoteLogBorshSchema = new Map([ + [ + NoteLog, + { + kind: "struct", + fields: [ + ["leafNode", [32]], // Array of 32 `u8` + ["owner", [32]], // Pubkey + ["note", "string"], + ], + }, + ], +]); + +export function getHash(note: string, owner: PublicKey) { + const noteBuffer = Buffer.from(note); + const publicKeyBuffer = Buffer.from(owner.toBytes()); + const concatenatedBuffer = Buffer.concat([noteBuffer, publicKeyBuffer]); + const concatenatedUint8Array = new Uint8Array( + concatenatedBuffer.buffer, + concatenatedBuffer.byteOffset, + concatenatedBuffer.byteLength, + ); + return keccak256(concatenatedUint8Array); +} + +export async function getNoteLog(connection: Connection, txSignature: string) { + // Confirm the transaction, otherwise the getTransaction sometimes returns null + const latestBlockHash = await connection.getLatestBlockhash(); + await connection.confirmTransaction({ + blockhash: latestBlockHash.blockhash, + lastValidBlockHeight: latestBlockHash.lastValidBlockHeight, + signature: txSignature, + }); + + // Get the transaction info using the tx signature + const txInfo = await connection.getTransaction(txSignature, { + maxSupportedTransactionVersion: 0, + }); + + // Get the inner instructions related to the program instruction at index 0 + // We only send one instruction in test transaction, so we can assume the first + const innerIx = txInfo!.meta?.innerInstructions?.[0]?.instructions; + + // Get the inner instructions that match the SPL_NOOP_PROGRAM_ID + const noopInnerIx = innerIx.filter( + instruction => + txInfo?.transaction.message.staticAccountKeys[ + instruction.programIdIndex + ].toBase58() === SPL_NOOP_PROGRAM_ID.toBase58(), + ); + + let noteLog: NoteLog; + for (let i = noopInnerIx.length - 1; i >= 0; i--) { + try { + // Try to decode and deserialize the instruction data + const applicationDataEvent = deserializeApplicationDataEvent( + Buffer.from(bs58.decode(noopInnerIx[i]?.data!)), + ); + + // Get the application data + const applicationData = applicationDataEvent.fields[0].applicationData; + + // Deserialize the application data into NoteLog instance + noteLog = deserialize( + NoteLogBorshSchema, + NoteLog, + Buffer.from(applicationData), + ); + + if (noteLog !== undefined) { + break; + } + } catch (__) {} + } + + return noteLog; +} +``` + +The `utils.ts` file contains three key components: + +1. **`NoteLog` Class**: This class represents the note log that we’ll extract + from the Noop program logs. It also includes the Borsh schema, named + `NoteLogBorshSchema`, which is used for deserialization. + +2. **`getHash` Function**: This function generates a hash from the note and its + owner, allowing us to compare it against the data in the Merkle tree. + +3. **`getNoteLog` Function**: This function searches through the transaction + logs to locate the Noop program logs then deserializes and retrieves the + corresponding `NoteLog`. + +#### 8. Write Client Tests + +With our packages and utility file set up, we’re ready to dive into writing the +tests. We will create four tests for our program: + +1. **Create Note Tree**: This test will initialize the Merkle tree for storing + note hashes. +2. **Add Note**: This test will invoke the `append_note` instruction to add a + note to the tree. +3. **adds max size note to the Merkle tree**: This test will also use the + `append_note` instruction, but with a note that reaches the maximum allowable + size of 1232 bytes in a single transaction. +4. **Updates the first note in the Merkle tree**: This test will use the + `update_note` instruction to modify the first note that was added. + +The first test is mainly for setup purposes. For the remaining three tests, we +will check that the note hash in the Merkle tree matches the expected value +based on the note content and the signer. + +We’ll start by setting up our imports. This includes a variety of components +from Anchor, `@solana/web3.js`, `@solana/spl-account-compression`, and our own +utility functions. + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { CompressedNotes } from "../target/types/compressed_notes"; +import { + Keypair, + Transaction, + PublicKey, + sendAndConfirmTransaction, + Connection, +} from "@solana/web3.js"; +import { + ValidDepthSizePair, + createAllocTreeIx, + SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, + SPL_NOOP_PROGRAM_ID, + ConcurrentMerkleTreeAccount, +} from "@solana/spl-account-compression"; +import { getHash, getNoteLog } from "./utils"; +import { assert } from "chai"; +``` + +Next, we’ll set up the state variables needed for our tests. This setup will +include: + +1. **Default Anchor Setup**: Configure the basic environment for Anchor testing. +2. **Merkle Tree Keypair**: Generate a keypair for the Merkle tree. +3. **Tree Authority**: Create a keypair for the authority of the Merkle tree. +4. **Notes**: Define some sample notes to use in the tests. + +```typescript +describe("compressed-notes", () => { + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + const connection = new Connection( + provider.connection.rpcEndpoint, + "confirmed", + ); + + const wallet = provider.wallet as anchor.Wallet; + const program = anchor.workspace.CompressedNotes as Program; + + // Generate a new keypair for the Merkle tree account + const merkleTree = Keypair.generate(); + + // Derive the PDA to use as the tree authority for the Merkle tree account + const [treeAuthority] = PublicKey.findProgramAddressSync( + [merkleTree.publicKey.toBuffer()], + program.programId, + ); + + const firstNote = "hello world"; + const secondNote = "0".repeat(917); + const updatedNote = "updated note"; + + describe("Merkle Tree Operations", () => { + // Tests will go here + }); +}); +``` + +Now, let’s dive into the `Create Note Tree` test. This test will accomplish two +key tasks: + +1. **Allocate a New Merkle Tree Account**: Create a new account for the Merkle + tree, specifying a max depth of 3, a max buffer size of 8, and a canopy depth + of 0. +2. **Initialize the Account**: Use our program’s `createNoteTree` instruction to + set up the newly allocated Merkle tree account. + +```typescript +it("creates a new note tree", async () => { + const maxDepthSizePair: ValidDepthSizePair = { + maxDepth: 3, + maxBufferSize: 8, + }; + + const canopyDepth = 0; + + // Instruction to create a new account with the required space for the tree + const allocTreeIx = await createAllocTreeIx( + connection, + merkleTree.publicKey, + wallet.publicKey, + maxDepthSizePair, + canopyDepth, + ); + + // Instruction to initialize the tree through the Note program + const ix = await program.methods + .createNoteTree(maxDepthSizePair.maxDepth, maxDepthSizePair.maxBufferSize) + .accounts({ + owner: wallet.publicKey, + merkleTree: merkleTree.publicKey, + treeAuthority, + logWrapper: SPL_NOOP_PROGRAM_ID, + compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, + }) + .instruction(); + + const tx = new Transaction().add(allocTreeIx, ix); + await sendAndConfirmTransaction(connection, tx, [wallet.payer, merkleTree]); + + // Fetch the Merkle tree account to confirm it’s initialized + const merkleTreeAccount = + await ConcurrentMerkleTreeAccount.fromAccountAddress( + connection, + merkleTree.publicKey, + ); + assert(merkleTreeAccount, "Merkle tree should be initialized"); +}); + +it("adds a note to the Merkle tree", async () => { + const txSignature = await program.methods + .appendNote(firstNote) + .accounts({ + owner: wallet.publicKey, + merkleTree: merkleTree.publicKey, + treeAuthority, + logWrapper: SPL_NOOP_PROGRAM_ID, + compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, + }) + .rpc(); + + const noteLog = await getNoteLog(connection, txSignature); + const hash = getHash(firstNote, wallet.publicKey); + + assert( + hash === Buffer.from(noteLog.leafNode).toString("hex"), + "Leaf node hash should match", + ); + assert(firstNote === noteLog.note, "Note should match the appended note"); +}); + +it("adds max size note to the Merkle tree", async () => { + const txSignature = await program.methods + .appendNote(secondNote) + .accounts({ + owner: wallet.publicKey, + merkleTree: merkleTree.publicKey, + treeAuthority, + logWrapper: SPL_NOOP_PROGRAM_ID, + compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, + }) + .rpc(); + + const noteLog = await getNoteLog(connection, txSignature); + const hash = getHash(secondNote, wallet.publicKey); + + assert( + hash === Buffer.from(noteLog.leafNode).toString("hex"), + "Leaf node hash should match", + ); + assert( + secondNote === noteLog.note, + "Note should match the appended max size note", + ); +}); + +it("updates the first note in the Merkle tree", async () => { + const merkleTreeAccount = + await ConcurrentMerkleTreeAccount.fromAccountAddress( + connection, + merkleTree.publicKey, + ); + const root = merkleTreeAccount.getCurrentRoot(); + + const txSignature = await program.methods + .updateNote(0, root, firstNote, updatedNote) + .accounts({ + owner: wallet.publicKey, + merkleTree: merkleTree.publicKey, + treeAuthority, + logWrapper: SPL_NOOP_PROGRAM_ID, + compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, + }) + .rpc(); + + const noteLog = await getNoteLog(connection, txSignature); + const hash = getHash(updatedNote, wallet.publicKey); + + assert( + hash === Buffer.from(noteLog.leafNode).toString("hex"), + "Leaf node hash should match after update", + ); + assert( + updatedNote === noteLog.note, + "Updated note should match the logged note", + ); +}); +``` + +That’s a wrap—congratulations! Run `anchor test`, and you should see all four +tests passing. + +If you encounter any issues, don’t hesitate to revisit the demo or check out the +complete solution code in the +[Compressed Notes repository](https://github.com/unboxed-software/anchor-compressed-notes). + +### Challenge + +Now that you’ve got the hang of state compression, it’s time to add a new +feature to the Compressed Notes program. Your task is to implement an +instruction that allows users to delete an existing note. Keep in mind that you +can’t physically remove a leaf from the Merkle tree, so you’ll need to come up +with a method to signify that a note has been deleted. + +Good luck, and happy coding! + +For a straightforward example of how to implement a delete function, check out +the +[`main` branch on GitHub](https://github.com/Unboxed-Software/anchor-compressed-notes/tree/main). + + + +Push your code to GitHub and [let us know what you think of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=60f6b072-eaeb-469c-b32e-5fea4b72d1d1)! + + +``` diff --git a/content/courses/state-compression/index.mdx b/content/courses/state-compression/index.mdx new file mode 100644 index 000000000..343d86594 --- /dev/null +++ b/content/courses/state-compression/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: State Compression +description: Use state compression for large NFT collections and other large-data cases. +--- diff --git a/content/courses/state-compression/meta.json b/content/courses/state-compression/meta.json new file mode 100644 index 000000000..08d4c38b0 --- /dev/null +++ b/content/courses/state-compression/meta.json @@ -0,0 +1,3 @@ +{ + "pages": ["compressed-nfts", "generalized-state-compression"] +} diff --git a/content/courses/token-extensions/close-mint.mdx b/content/courses/token-extensions/close-mint.mdx new file mode 100644 index 000000000..60d54eaa2 --- /dev/null +++ b/content/courses/token-extensions/close-mint.mdx @@ -0,0 +1,665 @@ +--- +title: Close Mint Extension +objectives: + - Create a mint that is closable + - Describe all of the prerequisites needed to close a mint +description: "Create a mint that can be closed once the tokens are burnt." +--- + +## Summary + +- The original Token Program only allowed closing token accounts, but not mint + accounts. +- Token Extensions Program includes the `close mint` extension which allows mint + accounts to be closed. +- To close a mint with the `close mint` extension, the supply of said mint needs + to be 0. +- The `mintCloseAuthority` can be updated by calling `setAuthority` + +## Overview + +The original Token Program only allows owners to close token accounts, not mint +accounts. So if you create a mint, you'll never be able to close the account. +This has resulted in a lot of wasted space on the blockchain. To remedy this, +the Token Extensions Program introduced the `close mint` extension. This simply +allows a mint account to be closed and the lamports refunded. The only caveat, +is the supply of said mint needs to be 0. + +This extension is a nice improvement for developers, who may have thousands of +mint accounts that could be cleaned up and be refunded for. Additionally it's +great for NFT holders who wish to burn their NFT. They will now be able to +recuperate all of the costs, ie closing the mint, metadata and token accounts. +Whereas before, if someone burned an NFT would only recuperate the metadata and +token account's rents. Note, the burner would also have to be the +`mintCloseAuthority`. + +The `close mint` extension, adds an additional field `mintCloseAuthority` to the +mint account. This is the address of the authority to actually close the +account. + +Again, for a mint to be closed with this extension, the supply has to be 0. So +if any of this token is minted, it will have to be burned first. + +### Create Mint with Close Authority + +Initializing the mint with the close authority extension involves three +instructions: + +- `SystemProgram.createAccount` +- `createInitializeMintCloseAuthorityInstruction` +- `createInitializeMintInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. However like all Token Extensions Program +mints, we need to calculate the size and cost of the mint. This can be +accomplished by using `getMintLen` and `getMinimumBalanceForRentExemption`. In +this case, we'll call `getMintLen` with only the +`ExtensionType.MintCloseAuthority`. + +To get the mint length and create account instruction, do the following: + +```ts +const extensions = [ExtensionType.MintCloseAuthority]; +const mintLength = getMintLen(extensions); + +const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer, + newAccountPubkey: mint, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second instruction `createInitializeMintCloseAuthorityInstruction` +initializes the close authority extension. The only notable parameter is the +`mintCloseAuthority` in the second position. This is the address that can close +the mint. + +```ts +const initializeMintCloseAuthorityInstruction = + createInitializeMintCloseAuthorityInstruction( + mint, + authority, + TOKEN_2022_PROGRAM_ID, + ); +``` + +The last instruction `createInitializeMintInstruction` initializes the mint. + +```ts +const initializeMintInstruction = createInitializeMintInstruction( + mint, + decimals, + payer.publicKey, + null, + TOKEN_2022_PROGRAM_ID, +); +``` + +Finally we add the instructions to the transaction and submit it to the Solana +network. + +```typescript +const mintTransaction = new Transaction().add( + createAccountInstruction, + initializeMintCloseAuthorityInstruction, + initializeMintInstruction, +); + +const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, +); +``` + +When the transaction is sent, a new mint account is created with the specified +close authority. + +### Close Mint with Close Authority + +To close a mint with the `close mint` extension, all that is needed is to call +the `closeAccount` function. + +Remember, that to close the mint account, the total supply has to be 0. So if +any tokens exist, they have to be burned first. You can do this with the `burn` +function. + + + +The `closeAccount` function works for mints and token +accounts alike. + + + +```ts +// burn tokens to 0 +const burnSignature = await burn( + connection, // connection - Connection to use + payer, // payer -Payer of the transaction fees + sourceAccount, // account - Account to burn tokens from + mintKeypair.publicKey, // mint - Mint for the account + sourceKeypair, // owner - Account owner + sourceAccountInfo.amount, // amount - Amount to burn + [], // multiSigners - Signing accounts if `owner` is a multisig + { commitment: "finalized" }, // confirmOptions - Options for confirming the transaction + TOKEN_2022_PROGRAM_ID, // programId - SPL Token program account +); + +// account can be closed as total supply is now 0 +await closeAccount( + connection, // connection - Connection to use + payer, // payer - Payer of the transaction fees + mintKeypair.publicKey, // account - Account to close + payer.publicKey, // destination - Account to receive the remaining balance of the closed account + payer, // authority - Authority which is allowed to close the account + [], // multiSigners - Signing accounts if `authority` is a multisig + { commitment: "finalized" }, // confirmOptions - Options for confirming the transaction + TOKEN_2022_PROGRAM_ID, // programIdSPL Token program account +); +``` + +### Update Close Mint Authority + +To change the `closeMintAuthority` you can call the `setAuthority` function and +pass in the right accounts, as well as the `authorityType`, which in this case +is `AuthorityType.CloseMint` + +```ts +/** + * Assign a new authority to the account + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param account Address of the account + * @param currentAuthority Current authority of the specified type + * @param authorityType Type of authority to set + * @param newAuthority New authority of the account + * @param multiSigners Signing accounts if `currentAuthority` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ + +await setAuthority( + connection, + payer, + mint, + currentAuthority, + AuthorityType.CloseMint, + newAuthority, + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +## Lab + +In this lab, we'll create a token mint account with the `close mint` extension. +We will then mint some of the tokens and see what happens when we try to close +it with a non-zero supply (hint, the close transaction will fail). Lastly, we +will burn the supply and close the account. + +### 1. Getting Started + +To get started, create an empty directory named `close-mint` and navigate to it. +We'll be initializing a brand new project. Run `npm init` and follow through the +prompts. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun dotenv typescript +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { Connection, Keypair, LAMPORTS_PER_SOL } from "@solana/web3.js"; +import { initializeKeypair } from "@solana-developers/helpers"; +// import { createClosableMint } from './create-mint' // - uncomment this in a later step +import { + TOKEN_2022_PROGRAM_ID, + burn, + closeAccount, + createAccount, + getAccount, + getMint, + mintTo, +} from "@solana/spl-token"; +import dotenv from "dotenv"; +dotenv.config(); + +/** + * Create a connection and initialize a keypair if one doesn't already exists. + * If a keypair exists, airdrop a SOL if needed. + */ +const connection = new Connection("http://127.0.0.1:8899"); +const payer = await initializeKeypair(connection); + +console.log(`public key: ${payer.publicKey.toBase58()}`); + +const mintKeypair = Keypair.generate(); +const mint = mintKeypair.publicKey; +console.log("\nmint public key: " + mintKeypair.publicKey.toBase58() + "\n\n"); + +// CREATE A MINT WITH CLOSE AUTHORITY + +// MINT TOKEN + +// VERIFY SUPPLY + +// TRY CLOSING WITH NON ZERO SUPPLY + +// BURN SUPPLY + +// CLOSE MINT +``` + +`index.ts` creates a connection to the specified validator node and calls +`initializeKeypair`. This is where we'll end up calling the rest of our script +once we've written it. + +Go ahead and run the script. You should see the `payer` and `mint` public key +logged to your terminal. + +```bash +esrun src/index.ts +``` + +If you run into an error in `initializeKeypair` with airdropping, follow the +next step. + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +```typescript +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +``` + +Alternatively, if you'd like to use testnet or devnet, import the +`clusterApiUrl` from `@solana/web3.js` and pass it to the connection as such: + +```typescript +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +``` + +If you decide to use devnet, and have issues with airdropping SOL, feel free to +add the `keypairPath` parameter to `initializeKeypair`. You can get this from +running `solana config get` in your terminal. And then go to +[faucet.solana.com](https://faucet.solana.com/) and airdrop some SOL to your +address. You can get your address from running `solana address` in your +terminal. + +For example, assuming `keypairPath` is `/home/.config/solana/id.json` + +```typescript +const payer = initializeKeypair(connection, { + keypairPath: "/home/.config/solana/id.json", +}); +``` + +### 3. Create a mint with close authority + +Let's create a closable mint by creating the function `createClosableMint` in a +new file `src/create-mint.ts`. + +To create a closable mint, we need several instructions: + +- `getMintLen`: Gets the space needed for the mint account +- `SystemProgram.getMinimumBalanceForRentExemption`: Tells us the cost of the + rent for the mint account +- `SystemProgram.createAccount`: Creates the instruction to allocates space on + Solana for the mint account +- `createInitializeMintCloseAuthorityInstruction`: Creates the instruction to + initialize the close mint extension - this takes the `closeMintAuthority` as a + parameter. +- `createInitializeMintInstruction`: Creates the instruction to initialize the + mint +- `sendAndConfirmTransaction`: Sends the transaction to the blockchain + +We'll call all of these functions in turn. But before that, let's define the +inputs to our `createClosableMint` function: + +- `connection: Connection` : The connection object +- `payer: Keypair` : Payer for the transaction +- `mintKeypair: Keypair` : Keypair for new mint +- `decimals: number` : Mint decimals + +Putting it all together we get: + +```ts +import { + sendAndConfirmTransaction, + Connection, + Keypair, + SystemProgram, + Transaction, + TransactionSignature, +} from "@solana/web3.js"; + +import { + ExtensionType, + createInitializeMintInstruction, + getMintLen, + TOKEN_2022_PROGRAM_ID, + createInitializeMintCloseAuthorityInstruction, +} from "@solana/spl-token"; + +export async function createClosableMint( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number, +): Promise { + const extensions = [ExtensionType.MintCloseAuthority]; + const mintLength = getMintLen(extensions); + + const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + + console.log("Creating a transaction with close mint instruction..."); + const mintTransaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeMintCloseAuthorityInstruction( + mintKeypair.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + null, + TOKEN_2022_PROGRAM_ID, + ), + ); + + console.log("Sending transaction..."); + const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, + ); + + return signature; +} +``` + +Now let's call this function in `src/index.ts`. First you'll need to import our +new function by uncommenting the 3rd line. + +```ts +import { createClosableMint } from "./create-mint"; +``` + +Then paste the following under the right comment section: + +```ts +// CREATE A MINT WITH CLOSE AUTHORITY +const decimals = 9; + +await createClosableMint(connection, payer, mintKeypair, decimals); +``` + +This will create a transaction with close mint instruction. + +Feel free to run this and check that everything is working: + +```bash +esrun src/index.ts +``` + +### 4. Closing the mint + +We're going to close the mint, but first, lets explore what happens when we have +a supply when trying to close (hint, it'll fail). + +To do this, we are going to mint some tokens, try to close, then burn the tokens +and then actually close. + +#### 4.1 Mint a token + +In `src/index.ts`, create an account and mint 1 token to that account. + +We can accomplish this by calling 2 functions: `createAccount` and `mintTo`: + +```ts +// MINT TOKEN +/** + * Creating an account and mint 1 token to that account + */ +console.log("Creating an account..."); +const sourceKeypair = Keypair.generate(); +const sourceAccount = await createAccount( + connection, + payer, + mint, + sourceKeypair.publicKey, + undefined, + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); + +console.log("Minting 1 token...\n\n"); +const amount = 1 * LAMPORTS_PER_SOL; +await mintTo( + connection, + payer, + mint, + sourceAccount, + payer, + amount, + [payer], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); +``` + +Now we can verify that the mint supply is non-zero by fetching the mint info. +Underneath the minting functions, add the following code block: + +```ts +// VERIFY SUPPLY +/** + * Get mint information to verify supply + */ +let mintInfo = await getMint( + connection, + mintKeypair.publicKey, + "finalized", + TOKEN_2022_PROGRAM_ID, +); +console.log("Initial supply: ", mintInfo.supply); +``` + +Let's run the script and check the initial supply: + +```bash +esrun src/index.ts +``` + +You should see the following in your terminal: + +```bash +Initial supply: 1000000000n +``` + +#### 4.2 Closing the mint with non zero supply + +Now we'll attempt to close the mint when supply is non-zero. We know this is +going to fail, since the `close mint` extension requires a non-zero supply. So +to see the resulting error message, we'll wrap the `closeAccount` function in a +try catch and log out the error: + +```ts +// TRY CLOSING WITH NON ZERO SUPPLY +/** + * Try closing the mint account when supply is not 0 + * + * Should throw `SendTransactionError` + */ +try { + await closeAccount( + connection, + payer, + mintKeypair.publicKey, + payer.publicKey, + payer, + [], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, + ); +} catch (e) { + console.log( + "Close account fails here because the supply is not zero. Check the program logs:", + (e as any).logs, + "\n\n", + ); +} +``` + +Give this a run: + +```bash +esrun src/index.ts +``` + +We'll see that the program throws an error along with the program logs. You +should see the following: + +```bash +Close account fails here because the supply is not zero. +``` + +#### 4.3 Burning the supply + +Let's burn the whole supply so we can actually close the mint. We do this by +calling `burn`: + +```ts +// BURN SUPPLY +const sourceAccountInfo = await getAccount( + connection, + sourceAccount, + "finalized", + TOKEN_2022_PROGRAM_ID, +); + +console.log("Burning the supply..."); +const burnSignature = await burn( + connection, + payer, + sourceAccount, + mintKeypair.publicKey, + sourceKeypair, + sourceAccountInfo.amount, + [], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); +``` + +#### 4.4 Close the mint + +With no tokens in circulation, we can now close the mint. At this point, we can +simply call `closeAccount`, however, for the sake of visualizing how this works, +we'll do the following: + + - Retrieve Mint Information: Initially, we fetch and inspect the mint's details, particularly focusing on the supply, which should be zero at this stage. This shows that the mint is eligible to be closed. + + - Verify Account Status: Next, we confirm the status of the account to ensure that it is still open and active. + + - Close the Account: Once we've verified the account's open status, we proceed to close the mint account. + + - Confirm Closure: Finally, after invoking the `closeAccount` function, we check the account status once more to confirm that it has indeed been closed successfully. + +We can accomplish all of this with the following functions: + +- `getMint`: Grabs the mint account and deserializes the information +- `getAccountInfo`: Grabs the mint account, so we can check it exists - we'll + call this before and after the close. +- `closeAccount`: Closes the mint + +Putting this all together we get: + +```ts +// CLOSE MINT +mintInfo = await getMint( + connection, + mintKeypair.publicKey, + "finalized", + TOKEN_2022_PROGRAM_ID, +); + +console.log("After burn supply: ", mintInfo.supply); + +const accountInfoBeforeClose = await connection.getAccountInfo( + mintKeypair.publicKey, + "finalized", +); + +console.log("Account closed? ", accountInfoBeforeClose === null); + +console.log("Closing account after burning the supply..."); +const closeSignature = await closeAccount( + connection, + payer, + mintKeypair.publicKey, + payer.publicKey, + payer, + [], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); + +const accountInfoAfterClose = await connection.getAccountInfo( + mintKeypair.publicKey, + "finalized", +); + +console.log("Account closed? ", accountInfoAfterClose === null); +``` + +Run the script one last time. + +```bash +esrun src/index.ts +``` + +You should see the whole process of creating a closable mint, minting a token, +trying to close, burning the token, and finally closing the account. + +That's it! We have successfully created a mint with close authority. If you get +stuck at any point, you can find working code in the `solution` branch of +[this repository](https://github.com/Unboxed-Software/solana-lab-close-mint-account/tree/solution). + +## Challenge + +For the challenge, try and create your own mint and mint to several token +accounts, then create a script to burn all of those token accounts, then close +the mint. diff --git a/content/courses/token-extensions/cpi-guard.mdx b/content/courses/token-extensions/cpi-guard.mdx new file mode 100644 index 000000000..0d3d1fd45 --- /dev/null +++ b/content/courses/token-extensions/cpi-guard.mdx @@ -0,0 +1,1406 @@ +--- +title: CPI Guard +objectives: + - Explain what the CPI Guard protects against + - Write code to test the CPI Guard +description: + "Create tokens that don't allow certain accounts to be invoked from other + programs." +--- + +## Summary + +- `CPI Guard` is a token account extension from the Token Extensions Program +- The `CPI Guard` extension prohibits certain actions inside cross-program + invocations. When enabled, the guard provides protections against various + potentially malicious actions on a token account +- `CPI Guard` can be enabled or disabled at will +- These protections are enforced within the `Token Extensions Program` itself + +## Overview + +CPI Guard is an extension that prohibits certain actions inside cross-program +invocations, protecting users from implicitly signing for actions they can't +see, such as those hidden in programs that aren't the System or Token programs. + +A specific example of this is when the CPI guard is enabled, no CPI can approve +a delegate over a token account. This is handy, because if a malicious CPI calls +`set_delegate` no immediate balance change will be apparent, however the +attacker now has transfer and burn authority over the token account. CPI guard +makes this impossible. + +Users may choose to enable or disable the CPI Guard extension on their token +account at will. When enabled, it has the following effects during a CPI: + +- Transfer: the signing authority must be the owner or previously established + account delegate +- Burn: the signing authority must be the owner or previously established + account delegate +- Approve: prohibited - no delegates can be approved within the CPI +- Close Account: the lamport destination must be the account owner +- Set Close Authority: prohibited unless unsetting +- Set Owner: always prohibited, including outside CPI + +The CPI Guard is a token account extension, meaning each individual Token +Extensions Program token account has to enable it. + +### How the CPI Guard Works + +The CPI Guard can be enabled and disabled on a token account that was created +with enough space for the extension. The `Token Extensions Program` runs a few +checks in the logic related to the above actions to determine if it should allow +an instruction to continue or not related to CPI Guards. Generally, what it does +is the following: + +- Check if the account has the CPI Guard extension +- Check if CPI Guard is enabled on the token account +- Check if the function is being executed within a CPI + +A good way to think about the CPI Guard token extension is simply as a lock that +is either enabled or disabled. The guard uses a +[data struct called `CpiGuard`](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/extension/cpi_guard/mod.rs#L24) +that stores a boolean value. That value indicates whether the guard is enabled +or disabled. The CPI Guard extension only has two instructions, `Enable` and +`Disable`. They each toggle this boolean value. + +```rust +pub struct CpiGuard { + /// Lock privileged token operations from happening via CPI + pub lock_cpi: PodBool, +} +``` + +The CPI Guard has two additional helper functions that the +`Token Extensions Program` is able to use to help determine when the CPI Guard +is enabled and when the instruction is being executed as part of a CPI. The +first, `cpi_guard_enabled()`, simply returns the current value of the +`CpiGuard.lock_cpi` field if the extension exists on the account, otherwise, it +returns false. The rest of the program can use this function to determine if the +guard is enabled or not. + +```rust +/// Determine if CPI Guard is enabled for this account +pub fn cpi_guard_enabled(account_state: &StateWithExtensionsMut) -> bool { + if let Ok(extension) = account_state.get_extension::() { + return extension.lock_cpi.into(); + } + false +} +``` + +The second helper function is called `in_cpi()` and determines whether or not +the current instruction is within a CPI. The function is able to determine if +it's currently in a CPI by calling +[`get_stack_height()` from the `solana_program` rust crate](https://docs.rs/solana-program/latest/solana_program/instruction/fn.get_stack_height.html). +This function returns the current stack height of instructions. Instructions +created at the initial transaction level will have a height of +[`TRANSACTION_LEVEL_STACK_HEIGHT`](https://docs.rs/solana-program/latest/solana_program/instruction/constant.TRANSACTION_LEVEL_STACK_HEIGHT.html) +or 1. The first inner invoked transaction, or CPI, will have a height of +`TRANSACTION_LEVEL_STACK_HEIGHT` + 1 and so on. With this information, we know +that if `get_stack_height()` returns a value greater than +`TRANSACTION_LEVEL_STACK_HEIGHT`, we're currently in a CPI! This is exactly what +the `in_cpi()` function checks. If +`get_stack_height() > TRANSACTION_LEVEL_STACK_HEIGHT`, it returns `True`. +Otherwise, it returns `False`. + +```rust +/// Determine if we are in CPI +pub fn in_cpi() -> bool { + get_stack_height() > TRANSACTION_LEVEL_STACK_HEIGHT +} +``` + +Using these two helper functions, the `Token Extensions Program` can easily +determine if it should reject an instruction or not. + +### Toggle CPI Guard + +To toggle the CPI Guard on/off, a Token Account must have been initialized for +this specific extension. Then, an instruction can be sent to enable the CPI +Guard. This can only be done from a client. _You cannot toggle the CPI Guard via +CPI_. The `Enable` instruction +[checks if it was invoked via CPI and will return an error if so](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/extension/cpi_guard/processor.rs#L44). +This means only the end user can toggle the CPI Guard. + +```rust +// inside process_toggle_cpi_guard() +if in_cpi() { + return Err(TokenError::CpiGuardSettingsLocked.into()); +} +``` + +You can enable the CPI using the +[`@solana/spl-token` Typescript package](https://solana-labs.github.io/solana-program-library/token/js/modules.html). +Here is an example. + +```typescript +// create token account with the CPI Guard extension +const tokenAccount = tokenAccountKeypair.publicKey; +const extensions = [ExtensionType.CpiGuard]; +const tokenAccountLen = getAccountLen(extensions); +const lamports = + await connection.getMinimumBalanceForRentExemption(tokenAccountLen); + +const createTokenAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: tokenAccount, + space: tokenAccountLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, +}); + +// create 'enable CPI Guard' instruction +const enableCpiGuardInstruction = createEnableCpiGuardInstruction( + tokenAccount, + owner.publicKey, + [], + TOKEN_2022_PROGRAM_ID, +); + +const initializeAccountInstruction = createInitializeAccountInstruction( + tokenAccount, + mint, + owner.publicKey, + TOKEN_2022_PROGRAM_ID, +); + +// construct transaction with these instructions +const transaction = new Transaction().add( + createTokenAccountInstruction, + initializeAccountInstruction, + enableCpiGuardInstruction, +); + +transaction.feePayer = payer.publicKey; +// Send transaction +await sendAndConfirmTransaction(connection, transaction, [ + payer, + owner, + tokenAccountKeypair, +]); +``` + +You can also use the +[`enableCpiGuard`](https://solana-labs.github.io/solana-program-library/token/js/functions/enableCpiGuard.html) +and +[`disableCpiGuard`](https://solana-labs.github.io/solana-program-library/token/js/functions/disableCpiGuard.html) +helper functions from the `@solana/spl-token` API after the account as been +initialized. + +```typescript +// enable CPI Guard +await enableCpiGuard( + connection, // connection + payer, // payer + userTokenAccount.publicKey, // account + payer, // owner + [], // multiSigners +); + +// disable CPI Guard +await disableCpiGuard( + connection, // connection + payer, // payer + userTokenAccount.publicKey, // account + payer, // owner + [], // multiSigners +); +``` + +### CPI Guard Protections + +#### Transfer + +The transfer feature of the CPI Guard prevents anyone but the account delegate +from authorizing a transfer instruction. This is enforced in the various +transfer functions in the `Token Extensions Program`. For example, +[looking at the `transfer` instruction](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/processor.rs#L428) +we can see a check that will return an error if the required circumstances are +met. + +Using the helper functions we discussed above, the program is able to determine +if it should throw an error or not. + +```rust +// inside process_transfer in the token extensions program +if let Ok(cpi_guard) = source_account.get_extension::() { + if cpi_guard.lock_cpi.into() && in_cpi() { + return Err(TokenError::CpiGuardTransferBlocked.into()); + } +} +``` + +This guard means that not even the owner of a token account can transfer tokens +out of the account while another account is an authorized delegate. + +#### Burn + +This CPI Guard also ensures only the account delegate can burn tokens from a +token account, just like the transfer protection. + +The `process_burn` function in the `Token Extension Program` functions in the +same way as the transfer instructions. It will +[return an error under the same circumstances](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/processor.rs#L1076). + +```rust +// inside process_burn in the token extensions program +if let Ok(cpi_guard) = source_account.get_extension::() { + if cpi_guard.lock_cpi.into() && in_cpi() { + return Err(TokenError::CpiGuardBurnBlocked.into()); + } +} +``` + +This guard means that not even the owner of a token account can burn tokens out +of the account while another account is an authorized delegate. + +#### Approve + +The CPI Guard prevents from approving a delegate of a token account via CPI. You +can approve a delegate via a client instruction, but not CPI. The +`process_approve` function of the `Token Extension Program` runs the +[same checks to determine if the guard is enabled and its currently in a CPI](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/processor.rs#L583). + +This means an end user is not at risk of signing a transaction that indirectly +approves a delegate over their token account without the knowledge of the user. +Before, the user was at the mercy of their wallet to notify them of transactions +like this ahead of time. + +#### Close + +To close a token account via CPI, having the guard enabled means that the +`Token Extensions Program` will check that the +[destination account receiving the token account's lamports is the account owner](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/processor.rs#L1128). + +Here is the exact code block from the `process_close_account` function. + +```rust +if !source_account + .base + .is_owned_by_system_program_or_incinerator() +{ + if let Ok(cpi_guard) = source_account.get_extension::() { + if cpi_guard.lock_cpi.into() + && in_cpi() + && !cmp_pubkeys(destination_account_info.key, &source_account.base.owner) + { + return Err(TokenError::CpiGuardCloseAccountBlocked.into()); + } + } +... +} +``` + +This guard protects the user from signing a transaction that closes a token +account they own and transferring that account's lamports to another account via +CPI. This would be hard to detect from an end user's perspective without +inspecting the instructions themselves. This guard ensures those lamports are +transferred only to their owner when closing a token account via CPI. + +#### Set Close Authority + +The CPI Guard prevents from setting the `CloseAccount` authority via CPI, you +can unset a previously set `CloseAccount` authority however. The +`Token Extension Program` enforces this by +[checking if a value has been passed in the `new_authority` parameter](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/processor.rs#L697) +to the `process_set_authority` function. + +```rust +AuthorityType::CloseAccount => { + let authority = account.base.close_authority.unwrap_or(account.base.owner); + Self::validate_owner( + program_id, + &authority, + authority_info, + authority_info_data_len, + account_info_iter.as_slice(), + )?; + + if let Ok(cpi_guard) = account.get_extension::() { + if cpi_guard.lock_cpi.into() && in_cpi() && new_authority.is_some() { + return Err(TokenError::CpiGuardSetAuthorityBlocked.into()); + } + } + + account.base.close_authority = new_authority; +} +``` + +This guard prevents the user from signing a transaction that gives another +account the ability to close their Token account behind the scenes. + +#### Set Owner + +The CPI Guard prevents from changing the account owner in all circumstances, +whether via CPI or not. The account authority is updated in the same +`process_set_authority` function as the `CloseAccount` authority in the previous +section. If the instruction is attempting to update the authority of an account +with the CPI Guard enabled, the +[function will return one of two possible errors](https://github.com/solana-labs/solana-program-library/blob/ce8e4d565edcbd26e75d00d0e34e9d5f9786a646/token/program-2022/src/processor.rs#L662). + +If the instruction is being executed in a CPI, the function will return a +`CpiGuardSetAuthorityBlocked` error. Otherwise it will return a +`CpiGuardOwnerChangeBlocked` error. + +```rust +if let Ok(cpi_guard) = account.get_extension::() { + if cpi_guard.lock_cpi.into() && in_cpi() { + return Err(TokenError::CpiGuardSetAuthorityBlocked.into()); + } else if cpi_guard.lock_cpi.into() { + return Err(TokenError::CpiGuardOwnerChangeBlocked.into()); + } +} +``` + +This guard prevents from changing the ownership of a Token account at all times +when enabled. + +## Lab + +This lab will primarily focus on writing tests in TypeScript, but we'll need to +run a program locally against these tests. For this reason, we'll need to go +through a few steps to ensure a proper environment on your machine for the +program to run. The onchain program has already been written for you and is +included in the lab starter code. + +The onchain program contains a few instructions that showcase what the CPI Guard +can protect against. We'll write tests invoking these instructions both with a +CPI Guard enabled and disabled. + +The tests have been broken up into individual files in the `/tests` directory. +Each file serves as its own unit test that will invoke a specific instruction on +our program and illustrate a specific CPI Guard. + +The program has five instructions: `malicious_close_account`, +`prohibited_approve_account`, `prohibited_set_authority`, `unauthorized_burn`, +`set_owner`. + +Each of these instructions makes a CPI to the `Token Extensions Program` and +attempts to take an action on the given token account that is potentially +malicious unknowingly to the signer of the original transaction. We won't test +the `Transfer` guard as it is same as the `Burn` guard. + +#### 1. Verify Solana/Anchor/Rust Versions + +We'll be interacting with the `Token Extensions Program` in this lab and that +requires you to have Solana CLI version ≥ 1.18.0. + +To check your version run: + +```bash +solana --version +``` + +If the version printed out after running `solana --version` is less than +`1.18.0` then you can update the CLI version manually. Note, at the time of +writing this, you cannot simply run the `solana-install update` command. This +command will not update the CLI to the correct version for us, so we have to +explicitly download version `1.18.0`. You can do so with the following command: + +```bash +solana-install init 1.18.0 +``` + +If you run into this error at any point attempting to build the program, that +likely means you do not have the correct version of the Solana CLI installed. + +```bash +anchor build +error: package `solana-program v1.18.0` cannot be built because it requires rustc 1.72.0 or newer, while the currently active rustc version is 1.68.0-dev +Either upgrade to rustc 1.72.0 or newer, or use +cargo update -p solana-program@1.18.0 --precise ver +where `ver` is the latest version of `solana-program` supporting rustc 1.68.0-dev +``` + +You will also want the `0.29.0` version of the Anchor CLI installed. You can +follow the steps listed here to update via avm +https://www.anchor-lang.com/docs/avm + +or simply run + +```bash +avm install 0.29.0 +avm use 0.29.0 +``` + +At the time of writing, the latest version of the Anchor CLI is `0.29.0` + +Now, we can check our rust version. + +```bash +rustc --version +``` + +At the time of writing, version `1.26.0` was used for the Rust compiler. If you +would like to update, you can do so via `rustup` +https://doc.rust-lang.org/book/ch01-01-installation.html + +```bash +rustup update +``` + +Now, we should have all the correct versions installed. + +#### 2. Get starter code and add dependencies + +Let's grab the starter branch. + +```bash +git clone https://github.com/Unboxed-Software/solana-lab-cpi-guard +cd solana-lab-cpi-guard +git checkout starter +``` + +#### 3. Update Program ID and Anchor Keypair + +Once in the starter branch, run + +`anchor keys sync` + +This will replace the program ID in various locations with your new program +keypair. + +Then set your developer keypair path in `Anchor.toml`. + +```toml +[provider] +cluster = "Localnet" +wallet = "~/.config/solana/id.json" +``` + +"~/.config/solana/id.json" is the most common keypair path, but if you're +unsure, just run: + +```bash +solana config get +``` + +#### 4. Confirm the program builds + +Let's build the starter code to confirm we have everything configured correctly. +If it does not build, please revisit the steps above. + +```bash +anchor build +``` + +You can safely ignore the warnings of the build script, these will go away as we +add in the necessary code. + +Feel free to run the provided tests to make sure the rest of the dev environment +is setup correctly. You'll have to install the node dependencies using `npm` or +`yarn`. The tests should run, but they do not do anything currently. + +```bash +yarn install +anchor test +``` + +#### 5. Create token with CPI Guard + +Before we write any tests, let's create a helper function that will create a +Token account with the CPI Guard extension. Let's do this in a new file +`tests/token-helper.ts` and a new function called +`createTokenAccountWithCPIGuard`. + +Internally, this function will call: + +- `SystemProgram.createAccount`: Allocates space for the token account +- `createInitializeAccountInstruction`: Initializes the token account +- `createEnableCpiGuardInstruction`: Enables the CPI Guard + +```ts +import { + ExtensionType, + TOKEN_2022_PROGRAM_ID, + createEnableCpiGuardInstruction, + createInitializeAccountInstruction, + getAccountLen, +} from "@solana/spl-token"; +import { + Connection, + Keypair, + PublicKey, + SystemProgram, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +export async function createTokenAccountWithCPIGuard( + connection: Connection, + payer: Keypair, + owner: Keypair, + tokenAccountKeypair: Keypair, + mint: PublicKey, +): Promise { + const tokenAccount = tokenAccountKeypair.publicKey; + + const extensions = [ExtensionType.CpiGuard]; + + const tokenAccountLen = getAccountLen(extensions); + const lamports = + await connection.getMinimumBalanceForRentExemption(tokenAccountLen); + + const createTokenAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: tokenAccount, + space: tokenAccountLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, + }); + + const initializeAccountInstruction = createInitializeAccountInstruction( + tokenAccount, + mint, + owner.publicKey, + TOKEN_2022_PROGRAM_ID, + ); + + const enableCpiGuardInstruction = createEnableCpiGuardInstruction( + tokenAccount, + owner.publicKey, + [], + TOKEN_2022_PROGRAM_ID, + ); + + const transaction = new Transaction().add( + createTokenAccountInstruction, + initializeAccountInstruction, + enableCpiGuardInstruction, + ); + + transaction.feePayer = payer.publicKey; + + // Send transaction + return await sendAndConfirmTransaction(connection, transaction, [ + payer, + owner, + tokenAccountKeypair, + ]); +} +``` + +#### 5. Approve delegate + +The first CPI Guard we'll test is the approve delegate functionality. The CPI +Guard prevents approving a delegate of a token account with the CPI Guard +enabled via CPI completely. It's important to note that you can approve a +delegate on a CPI Guarded account, just not with a CPI. To do so, you must send +an instruction directly to the `Token Extensions Program` from a client rather +than via another program. + +Before we write our test, we need to take a look at the program code we are +testing. The `prohibited_approve_account` instruction is what we'll be targeting +here. + +```rust +// inside src/lib.rs +pub fn prohibited_approve_account(ctx: Context, amount: u64) -> Result<()> { + msg!("Invoked ProhibitedApproveAccount"); + + msg!( + "Approving delegate: {} to transfer up to {} tokens.", + ctx.accounts.delegate.key(), + amount + ); + + approve( + CpiContext::new( + ctx.accounts.token_program.to_account_info(), + Approve { + to: ctx.accounts.token_account.to_account_info(), + delegate: ctx.accounts.delegate.to_account_info(), + authority: ctx.accounts.authority.to_account_info(), + }, + ), + amount, + ) +} +... + +#[derive(Accounts)] +pub struct ApproveAccount<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account( + mut, + token::token_program = token_program, + token::authority = authority + )] + pub token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + /// CHECK: delegate to approve + #[account(mut)] + pub delegate: AccountInfo<'info>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +If you are familiar with Solana programs, then this should look like a pretty +simple instruction. The instruction expects an `authority` account as a `Signer` +and a `token_account` that `authority` is the authority of. + +The instruction then invokes the `Approve` instruction on the +`Token Extensions Program` and attempts to assign `delegate` as the delegate +over the given `token_account`. + +Let's open the `/tests/approve-delegate-example.ts` file to begin testing this +instruction. Take a look at the starting code. We have a payer, some test +keypairs and an `airdropIfRequired` function that will run before the tests. + +Once you feel comfortable with the starting code, we can move on to the 'Approve +Delegate' tests. We will make tests that invoke the same exact instruction in +our target program, with and without CPI guard. + +To test our instruction, we first need to create our token mint and a token +account with extensions. + +```typescript +it("stops 'Approve Delegate' when CPI guard is enabled", async () => { + await createMint( + provider.connection, + payer, + provider.wallet.publicKey, + undefined, + 6, + testTokenMint, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + await createTokenAccountWithCPIGuard( + provider.connection, + payer, + payer, + userTokenAccount, + testTokenMint.publicKey, + ); +}); +``` + +Now let's send a transaction to our program that will attempt to invoke the +'Approve delegate' instruction on the `Token Extensions Program`. + +```typescript +// inside "allows 'Approve Delegate' when CPI guard is disabled" test block +try { + const tx = await program.methods + .prohibitedApproveAccount(new anchor.BN(1000)) + .accounts({ + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + delegate: maliciousAccount.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); + + console.log("Your transaction signature", tx); +} catch (e) { + assert( + e.message == + "failed to send transaction: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x2d", + ); + console.log( + "CPI Guard is enabled, and a program attempted to approve a delegate", + ); +} +``` + +Notice we wrap this in a try/catch block. This is because this instruction +should fail if the CPI Guard works correctly. We catch the error and assert that +the error message is what we expect. + +Now, we essentially do the same thing for the +`"allows 'Approve Delegate' when CPI guard is disabled"` test, except we want to +pass in a token account without a CPI Guard. To do this, we can simply disable +the CPI Guard on the `userTokenAccount` and resend the transaction. + +```typescript +it("allows 'Approve Delegate' when CPI guard is disabled", async () => { + await disableCpiGuard( + provider.connection, + payer, + userTokenAccount.publicKey, + payer, + [], + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await program.methods + .prohibitedApproveAccount(new anchor.BN(1000)) + .accounts({ + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + delegate: maliciousAccount.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); +}); +``` + +This transaction will succeed and the `delegate` account will now have the +authority to transfer the given amount of tokens from the `userTokenAccount`. + +Feel free to save your work and run `anchor test`. All of the tests will run, +but these two are the only ones that are doing anything yet. They should both +pass at this point. + +#### 6. Close Account + +The close account instruction invokes the `close_account` instruction on the +`Token Extensions Program`. This closes the given token account. However, you +have the ability to define which account the returned rent lamports should be +transferred to. The CPI Guard ensures that this account is always the account +owner. + +```rust +pub fn malicious_close_account(ctx: Context) -> Result<()> { + msg!("Invoked MaliciousCloseAccount"); + + msg!( + "Token account to close : {}", + ctx.accounts.token_account.key() + ); + + close_account(CpiContext::new( + ctx.accounts.token_program.to_account_info(), + CloseAccount { + account: ctx.accounts.token_account.to_account_info(), + destination: ctx.accounts.destination.to_account_info(), + authority: ctx.accounts.authority.to_account_info(), + }, + )) +} + +... + +#[derive(Accounts)] +pub struct MaliciousCloseAccount<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account( + mut, + token::token_program = token_program, + token::authority = authority + )] + pub token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + /// CHECK: malicious account + #[account(mut)] + pub destination: AccountInfo<'info>, + pub token_program: Interface<'info, token_interface::TokenInterface>, + pub system_program: Program<'info, System>, +} +``` + +Our program just invokes the `close_account` instruction, but a potentially +malicious client could pass in a different account than the token account owner +as the `destination` account. This would be hard to see from a user's +perspective unless the wallet notified them. With CPI Guards enabled, the +`Token Extension Program` will simply reject the instruction if that is the +case. + +To test this, we'll open up the `/tests/close-account-example.ts` file. The +starting code here is the same as our previous test. + +First, let's create our mint and CPI guarded token account: + +```typescript +it("stops 'Close Account' when CPI guard in enabled", async () => { + await createMint( + provider.connection, + payer, + provider.wallet.publicKey, + undefined, + 6, + testTokenMint, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + await createTokenAccountWithCPIGuard( + provider.connection, + payer, + payer, + userTokenAccount, + testTokenMint.publicKey, + ); +}); +``` + +Now let's send a transaction to our `malicious_close_account` instruction. Since +we have the CPI Guard enabled on this token account, the transaction should +fail. Our test verifies it fails for the expected reason. + +```typescript +// inside "stops 'Close Account' when CPI guard in enabled" test block +try { + const tx = await program.methods + .maliciousCloseAccount() + .accounts({ + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + destination: maliciousAccount.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); + + console.log("Your transaction signature", tx); +} catch (e) { + assert( + e.message == + "failed to send transaction: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x2c", + ); + console.log( + "CPI Guard is enabled, and a program attempted to close an account without returning lamports to owner", + ); +} +``` + +Now, we can disable the CPI Guard and send the same exact transaction in the +`"Close Account without CPI Guard"` test. This transaction should succeed this +time. + +```typescript +it("Close Account without CPI Guard", async () => { + await disableCpiGuard( + provider.connection, + payer, + userTokenAccount.publicKey, + payer, + [], + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await program.methods + .maliciousCloseAccount() + .accounts({ + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + destination: maliciousAccount.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); +}); +``` + +#### 7. Set Authority + +Moving on to the `prohibited_set_authority` instruction, the CPI Guard protects +against a CPI setting the `CloseAccount` authority. + +```rust +pub fn prohibited_set_authority(ctx: Context) -> Result<()> { + msg!("Invoked ProhibitedSetAuthority"); + + msg!( + "Setting authority of token account: {} to address: {}", + ctx.accounts.token_account.key(), + ctx.accounts.new_authority.key() + ); + + set_authority( + CpiContext::new( + ctx.accounts.token_program.to_account_info(), + SetAuthority { + current_authority: ctx.accounts.authority.to_account_info(), + account_or_mint: ctx.accounts.token_account.to_account_info(), + }, + ), + spl_token_2022::instruction::AuthorityType::CloseAccount, + Some(ctx.accounts.new_authority.key()), + ) +} + +#[derive(Accounts)] +pub struct SetAuthorityAccount<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account( + mut, + token::token_program = token_program, + token::authority = authority + )] + pub token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + /// CHECK: delegate to approve + #[account(mut)] + pub new_authority: AccountInfo<'info>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +Our program instruction simply invokes the `SetAuthority` instruction and +indicates we want to set the +`spl_token_2022::instruction::AuthorityType::CloseAccount` authority of the +given token account. + +Open the `/tests/set-authority-example.ts` file. The starter code is the same as +the previous tests. + +Let's create our mint and CPI-guarded token account. Then, we can send a +transaction to our `prohibited_set_authority` instruction. + +```typescript +it("sets authority when CPI guard in enabled", async () => { + await createMint( + provider.connection, + payer, + provider.wallet.publicKey, + undefined, + 6, + testTokenMint, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + await createTokenAccountWithCPIGuard( + provider.connection, + payer, + payer, + userTokenAccount, + testTokenMint.publicKey, + ); + + try { + const tx = await program.methods + .prohibitedSetAuthority() + .accounts({ + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + newAuthority: maliciousAccount.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); + + console.log("Your transaction signature", tx); + } catch (e) { + assert( + e.message == + "failed to send transaction: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x2e", + ); + console.log( + "CPI Guard is enabled, and a program attempted to add or change an authority", + ); + } +}); +``` + +For the `"Set Authority Example"` test, we can disable the CPI Guard and re-send +the transaction. + +```typescript +it("Set Authority Example", async () => { + await disableCpiGuard( + provider.connection, + payer, + userTokenAccount.publicKey, + payer, + [], + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await program.methods + .prohibitedSetAuthority() + .accounts({ + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + newAuthority: maliciousAccount.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); +}); +``` + +#### 8. Burn + +The next instruction we'll test is the `unauthorized_burn` instruction from our +test program. This instruction invokes the `burn` instruction from the +`Token Extensions Program` and attempts to burn a given amount of tokens from +the given token account. + +The CPI Guard ensures that this is only possible if the signing authority is the +token account delegate. + +```rust +pub fn unauthorized_burn(ctx: Context, amount: u64) -> Result<()> { + msg!("Invoked Burn"); + + msg!( + "Burning {} tokens from address: {}", + amount, + ctx.accounts.token_account.key() + ); + + burn( + CpiContext::new( + ctx.accounts.token_program.to_account_info(), + Burn { + mint: ctx.accounts.token_mint.to_account_info(), + from: ctx.accounts.token_account.to_account_info(), + authority: ctx.accounts.authority.to_account_info(), + }, + ), + amount, + ) +} + +... + +#[derive(Accounts)] +pub struct BurnAccounts<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account( + mut, + token::token_program = token_program, + token::authority = authority + )] + pub token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + #[account( + mut, + mint::token_program = token_program + )] + pub token_mint: InterfaceAccount<'info, token_interface::Mint>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +To test this, open up the `tests/burn-example.ts` file. The starter code is the +same as the previous, except we swapped `maliciousAccount` to `delegate`. + +Then, we can create our mint and CPI-guarded token account. + +```typescript +it("stops 'Burn' without a delegate signature", async () => { + await createMint( + provider.connection, + payer, + provider.wallet.publicKey, + undefined, + 6, + testTokenMint, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await createTokenAccountWithCPIGuard( + provider.connection, + payer, + payer, + userTokenAccount, + testTokenMint.publicKey, + ); +}); +``` + +Now, let's mint some tokens to our test account. + +```typescript +// inside "stops 'Burn' without a delegate signature" test block +const mintToTx = await mintTo( + provider.connection, + payer, + testTokenMint.publicKey, + userTokenAccount.publicKey, + payer, + 1000, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +Now let's approve a delegate over our token account. This token account has a +CPI Guard enabled currently, but we are still able to approve a delegate. This +is because we are doing so by invoking the `Token Extensions Program` directly +and not via a CPI like our earlier example. + +```typescript +// inside "stops 'Burn' without a delegate signature" test block +const approveTx = await approve( + provider.connection, + payer, + userTokenAccount.publicKey, + delegate.publicKey, + payer, + 500, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +Now that we have a delegate over our token account, we can send a transaction to +our program to attempt to burn some tokens. We'll be passing in the `payer` +account as the authority. This account is the owner over the `userTokenAccount`, +but since we have approved the `delegate` account as the delegate, the CPI Guard +will prevent this transaction from going through. + +```typescript +// inside "stops 'Burn' without a delegate signature" test block +try { + const tx = await program.methods + .unauthorizedBurn(new anchor.BN(500)) + .accounts({ + // payer is not the delegate + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + tokenMint: testTokenMint.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); + + console.log("Your transaction signature", tx); +} catch (e) { + assert( + e.message == + "failed to send transaction: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x2b", + ); + console.log( + "CPI Guard is enabled, and a program attempted to burn user funds without using a delegate.", + ); +} +``` + +For the `"Burn without Delegate Signature Example"` test, we'll simply disable +the CPI Guard and re-send the transaction. + +```typescript +it("Burn without Delegate Signature Example", async () => { + await disableCpiGuard( + provider.connection, + payer, + userTokenAccount.publicKey, + payer, + [], + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + const tx = await program.methods + .unauthorizedBurn(new anchor.BN(500)) + .accounts({ + // payer is not the delegate + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + tokenMint: testTokenMint.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); +}); +``` + +#### 9. Set Owner + +The last CPI Guard we'll test is the `SetOwner` protection. With the CPI Guard +enabled, this action is always prohibited even outside of a CPI. To test this, +we'll attempt to set the owner of a token account from the client side, as well +as CPI via our test program. + +Here is the program instruction. + +```rust +pub fn set_owner(ctx: Context) -> Result<()> { + msg!("Invoked SetOwner"); + + msg!( + "Setting owner of token account: {} to address: {}", + ctx.accounts.token_account.key(), + ctx.accounts.new_owner.key() + ); + + set_authority( + CpiContext::new( + ctx.accounts.token_program.to_account_info(), + SetAuthority { + current_authority: ctx.accounts.authority.to_account_info(), + account_or_mint: ctx.accounts.token_account.to_account_info(), + }, + ), + spl_token_2022::instruction::AuthorityType::AccountOwner, + Some(ctx.accounts.new_owner.key()), + ) +} + +#[derive(Accounts)] +pub struct SetOwnerAccounts<'info> { + #[account(mut)] + pub authority: Signer<'info>, + #[account( + mut, + token::token_program = token_program, + token::authority = authority + )] + pub token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + /// CHECK: delegate to approve + #[account(mut)] + pub new_owner: AccountInfo<'info>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +Open up the `/tests/set-owner-example.ts` file. There are four tests we'll write +for this one. Two for setting the Owner without a CPI and two for setting the +owner via CPI. + +Notice we've taken out `delegate` and added `firstNonCPIGuardAccount`, +`secondNonCPIGuardAccount`, and `newOwner`. + +Starting with the first +`"stops 'Set Authority' without CPI on a CPI-guarded account"` test, we'll +create the mint and CPI-guarded token account. + +```typescript +it("stops 'Set Authority' without CPI on a CPI-guarded account", async () => { + await createMint( + provider.connection, + payer, + provider.wallet.publicKey, + undefined, + 6, + testTokenMint, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await createTokenAccountWithCPIGuard( + provider.connection, + payer, + payer, + userTokenAccount, + testTokenMint.publicKey, + ); +}); +``` + +Then, we'll try to send a transaction to the `set_authority` instruction of the +`Token Extensions Program` with the `setAuthority` function from the +`@solana/spl-token` library. + +```typescript +// inside the "stops 'Set Authority' without CPI on a CPI-guarded account" test block +try { + await setAuthority( + provider.connection, + payer, + userTokenAccount.publicKey, + payer, + AuthorityType.AccountOwner, + newOwner.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); +} catch (e) { + assert( + e.message == + "failed to send transaction: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x2f", + ); + console.log( + "Account ownership cannot be changed while CPI Guard is enabled.", + ); +} +``` + +This transaction should fail, so we wrap the call in a try/catch block and +ensure the error is the expected error. + +Next, we'll create another token account without the CPI Guard enabled and +attempt the same thing. + +```typescript +it("Set Authority without CPI on Non-CPI Guarded Account", async () => { + await createAccount( + provider.connection, + payer, + testTokenMint.publicKey, + payer.publicKey, + firstNonCPIGuardAccount, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await setAuthority( + provider.connection, + payer, + firstNonCPIGuardAccount.publicKey, + payer, + AuthorityType.AccountOwner, + newOwner.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); +}); +``` + +This test should succeed. + +Now, let's test this out using a CPI. To do that, we just have to send a +transaction to the `set_owner` instruction of our program. + +```typescript +it("[CPI Guard] Set Authority via CPI on CPI Guarded Account", async () => { + try { + await program.methods + .setOwner() + .accounts({ + authority: payer.publicKey, + tokenAccount: userTokenAccount.publicKey, + newOwner: newOwner.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); + } catch (e) { + assert( + e.message == + "failed to send transaction: Transaction simulation failed: Error processing Instruction 0: custom program error: 0x2e", + ); + console.log( + "CPI Guard is enabled, and a program attempted to add or change an authority.", + ); + } +}); +``` + +Lastly, we can create another token account without the CPI Guard enabled and +pass this to the program instruction. This time, the CPI should go through. + +```typescript +it("Set Authority via CPI on Non-CPI Guarded Account", async () => { + await createAccount( + provider.connection, + payer, + testTokenMint.publicKey, + payer.publicKey, + secondNonCPIGuardAccount, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await program.methods + .setOwner() + .accounts({ + authority: payer.publicKey, + tokenAccount: secondNonCPIGuardAccount.publicKey, + newOwner: newOwner.publicKey, + tokenProgram: TOKEN_2022_PROGRAM_ID, + }) + .signers([payer]) + .rpc(); +}); +``` + +And that is it! You should be able to save your work and run `anchor test`. All +of the tests we have written should pass. + +## Challenge + +Write some tests for the Transfer functionality. diff --git a/content/courses/token-extensions/default-account-state.mdx b/content/courses/token-extensions/default-account-state.mdx new file mode 100644 index 000000000..dcbea482c --- /dev/null +++ b/content/courses/token-extensions/default-account-state.mdx @@ -0,0 +1,784 @@ +--- +title: Default Account State +objectives: + - Create mint account with default account state of frozen + - Explain the use cases of default account state + - Experiment with the rules of the extension +description: + "Create token that requires interaction with a specific service to use." +--- + +## Summary + +- The `default state` extension enables developers to set new token accounts for + a mint with this extension to be frozen by default, requiring interaction with + a specific service to unfreeze and utilize the tokens. +- There are three states of token accounts: Initialized, Uninitialized, and + Frozen, which determine how a token account can be interacted with. +- When a token account is frozen, the balance cannot change. +- The `freezeAuthority` is the only address that can freeze and thaw a token + account +- The `default state` can be updated using `updateDefaultAccountState` +- The lab demonstrates creating a mint with the `default state` extension and + creating a new token account which is set to a frozen state upon creation. The + lab includes tests to ensure the extension works as intended for both minting + and transferring tokens in frozen and thawed states. + +## Overview + +The `default state` extension allows developers to force all new token accounts +to be in one of two states: "Initialized" or "Frozen". Most usefully, with this +extension any new token accounts created can be set to frozen. When a token +account is frozen, it's balance cannot change. Meaning it cannot be minted to, +transferred from or burned. Only the `freezeAuthority` can thaw a frozen +account. + +Imagine you're a Solana game dev, and you only want players of your game to +interact with your in-game token. You can make the player, sign up for the game +to thaw their token account and allow them to play and trade with other players. +This works because of the `default state` extension, where it is set that all +new token accounts are frozen. + +#### Types of States + +There are 3 types of state with the default account state extension: + +- Uninitialized: This state indicates that the token account has been created + but not yet initialized through the Token Program. +- Initialized: An account in the Initialized state has been properly set up + through the Token Program. This means it has a specified mint and an owner has + been assigned. +- Frozen: A Frozen account is one that has been temporarily disabled from + performing certain operations, specifically transferring and minting tokens. + +```ts +/** Token account state as stored by the program */ +export enum AccountState { + Uninitialized = 0, + Initialized = 1, + Frozen = 2, +} +``` + +However, `default state` only deals with the latter two: `Initialized` and +`Frozen`. When you freeze an account, the state is `Frozen`, when you thaw, it +is `Initialized`. + +### Adding default account state + +Initializing a mint with the default account state extension involves three +instructions: + +- `SystemProgram.createAccount` +- `createInitializeDefaultAccountStateInstruction` +- `createInitializeMintInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. This instruction accomplishes three things: + +- Allocates `space` +- Transfers `lamports` for rent +- Assigns to it's owning program + +To grab the size of the mint account, we call `getMintLen`, and to grab the +lamports needed for the space, we call `getMinimumBalanceForRentExemption`. + +```typescript +const mintLen = getMintLen([ExtensionType.DefaultAccountState]); +// Minimum lamports required for Mint Account +const lamports = await connection.getMinimumBalanceForRentExemption(mintLen); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second +instruction `createInitializeDefaultAccountStateInstruction` initializes the +default account state extension. + +```typescript +const initializeDefaultAccountStateInstruction = + createInitializeDefaultAccountStateInstruction( + mintKeypair.publicKey, // Mint + defaultState, // Default State + TOKEN_2022_PROGRAM_ID, + ); +``` + +The third instruction `createInitializeMintInstruction` initializes the mint. + +```typescript +const initializeMintInstruction = createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +Lastly, add all of these instructions to a transaction and send it to the +blockchain. + +```ts +const transaction = new Transaction().add( + createAccountInstruction, + initializeDefaultAccountStateInstruction, + initializeMintInstruction, +); + +return await sendAndConfirmTransaction(connection, transaction, [ + payer, + mintKeypair, +]); +``` + +### Updating the Default Account State + +You can always change the default account state assuming you have the authority +to do so. To do this, simply call `updateDefaultAccountState`. + +```ts +/** + * Update the default account state on a mint + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param mint Mint to modify + * @param state New account state to set on created accounts + * @param freezeAuthority Freeze authority of the mint + * @param multiSigners Signing accounts if `freezeAuthority` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ +export async function updateDefaultAccountState( + connection: Connection, + payer: Signer, + mint: PublicKey, + state: AccountState, + freezeAuthority: Signer | PublicKey, + multiSigners: Signer[] = [], + confirmOptions?: ConfirmOptions, + programId = TOKEN_2022_PROGRAM_ID, +): Promise; +``` + +### Updating the Freeze Authority + +Lastly, you may want to update the `freezeAuthority` to another account. Say you +want to handle the freezing and thawing by a program for example. You can do +this, by calling `setAuthority`, adding in the correct accounts and passing in +the `authorityType`, which in this case would be `AuthorityType.FreezeAccount`. + +```ts +/** + * Assign a new authority to the account + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param account Address of the account + * @param currentAuthority Current authority of the specified type + * @param authorityType Type of authority to set + * @param newAuthority New authority of the account + * @param multiSigners Signing accounts if `currentAuthority` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ + +await setAuthority( + connection, + payer, + mint, + currentAuthority, + AuthorityType.FreezeAccount, + newAuthority, + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +## Lab + +In this lab we will be creating a mint which all new token accounts are frozen +upon creation by using the `default state` extension. We will then write tests +to check if the extension is working as intended by attempting to mint and +transfer the tokens in a frozen and thawed account state. + +#### 1. Setup Environment + +To get started, create an empty directory named `default-account-state` and +navigate to it. We'll be initializing a brand new project. Run `npm init` and +follow through the prompts. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun dotenv typescript +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { + AccountState, + TOKEN_2022_PROGRAM_ID, + getAccount, + mintTo, + thawAccount, + transfer, + createAccount, +} from "@solana/spl-token"; +import { Connection, Keypair, PublicKey } from "@solana/web3.js"; +// import { createTokenExtensionMintWithDefaultState } from "./mint-helper"; // This will be uncommented later +import { initializeKeypair, makeKeypairs } from "@solana-developers/helpers"; + +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +const payer = await initializeKeypair(connection); + +const [mintKeypair, ourTokenAccountKeypair, otherTokenAccountKeypair] = + makeKeypairs(3); +const mint = mintKeypair.publicKey; +const decimals = 2; +const defaultState = AccountState.Frozen; + +const ourTokenAccount = ourTokenAccountKeypair.publicKey; + +// To satisfy the transferring tests +const otherTokenAccount = otherTokenAccountKeypair.publicKey; + +const amountToMint = 1000; +const amountToTransfer = 50; + +// CREATE MINT WITH DEFAULT STATE + +// CREATE TEST TOKEN ACCOUNTS + +// TEST: MINT WITHOUT THAWING + +// TEST: MINT WITH THAWING + +// TEST: TRANSFER WITHOUT THAWING + +// TEST: TRANSFER WITH THAWING +``` + +`index.ts` creates a connection to the specified validator node and calls +`initializeKeypair`. It also has a few variables we will be using in the rest of +this lab. The `index.ts` is where we'll end up calling the rest of our script +once we've written it. + +If you run into an error in `initializeKeypair` with airdropping, follow the +next step. + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +```typescript +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +``` + +Alternatively, if you'd like to use testnet or devnet, import the +`clusterApiUrl` from `@solana/web3.js` and pass it to the connection as such: + +```typescript +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +``` + +If you decide to use devnet, and have issues with airdropping sol. Feel free to +add the `keypairPath` parameter to `initializeKeypair`. You can get this from +running `solana config get` in your terminal. And then go to +[faucet.solana.com](https://faucet.solana.com/) and airdrop some sol to your +address. You can get your address from running `solana address` in your +terminal. + +#### 3. Helpers + +When we pasted the `index.ts` code from earlier, we added the following helpers: + +- `initializeKeypair`: This function creates the keypair for the `payer` and + also airdrops some SOL to it +- `makeKeypairs`: This function creates keypairs without airdropping any SOL + +Additionally we have some initial accounts: + +- `payer`: Used to pay for and be the authority for everything +- `mintKeypair`: Our mint that will have the `default state` extension +- `ourTokenAccountKeypair`: The token account owned by payer that we'll use for + testing +- `otherTokenAccountKeypair`: Another token used for testing + +#### 4. Create Mint with Default account state + +When creating a mint token with default state, we must create the account +instruction, initialize the default account state for the mint account and +initialize the mint itself. + +Create an asynchronous function named `createTokenExtensionMintWithDefaultState` +in `src/mint-helpers.ts`. This function will create the mint such that all new +token accounts will be “frozen” to start. The function will take the following +arguments: + +- `connection` : The connection object +- `payer` : Payer for the transaction +- `mintKeypair` : Keypair for the new mint +- `decimals` : Mint decimals +- `defaultState` : Mint token default state - eg: `AccountState.Frozen` + +The first step in creating a mint is reserving space on Solana with the +`SystemProgram.createAccount` method. This requires specifying the payer's +keypair, (the account that will fund the creation and provide SOL for rent +exemption), the new mint account's public key (`mintKeypair.publicKey`), the +space required to store the mint information on the blockchain, the amount of +SOL (lamports) necessary to exempt the account from rent and the ID of the token +program that will manage this mint account (`TOKEN_2022_PROGRAM_ID`). + +```typescript +const mintLen = getMintLen([ExtensionType.DefaultAccountState]); +// Minimum lamports required for Mint Account +const lamports = await connection.getMinimumBalanceForRentExemption(mintLen); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +After the mint account creation, the next step involves initializing it with the +default state. The `createInitializeDefaultAccountStateInstruction` function is +used to generate an instruction that enables the mint to set `defaultState` of +any new token accounts. + +```typescript +const initializeDefaultAccountStateInstruction = + createInitializeDefaultAccountStateInstruction( + mintKeypair.publicKey, + defaultState, + TOKEN_2022_PROGRAM_ID, + ); +``` + +Next, lets add the mint instruction by calling `createInitializeMintInstruction` +and passing in the required arguments. This function is provided by the SPL +Token package and it constructs a transaction instruction that initializes a new +mint. + +```typescript +const initializeMintInstruction = createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, // Designated Mint Authority + payer.publicKey, // Designated Freeze Authority + TOKEN_2022_PROGRAM_ID, +); +``` + +Lastly, let's add all of the instructions to a transaction and send it to the +blockchain: + +```typescript +const transaction = new Transaction().add( + createAccountInstruction, + initializeDefaultAccountStateInstruction, + initializeMintInstruction, +); + +return await sendAndConfirmTransaction(connection, transaction, [ + payer, + mintKeypair, +]); +``` + +Putting it all together, the final `src/mint-helpers.ts` file will look like +this: + +```ts +import { + AccountState, + ExtensionType, + TOKEN_2022_PROGRAM_ID, + createInitializeDefaultAccountStateInstruction, + createInitializeMintInstruction, + getMintLen, +} from "@solana/spl-token"; +import { + Connection, + Keypair, + SystemProgram, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +/** + * Creates the token mint with the default state + * @param connection + * @param payer + * @param mintKeypair + * @param decimals + * @param defaultState + * @returns signature of the transaction + */ +export async function createTokenExtensionMintWithDefaultState( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number = 2, + defaultState: AccountState, +): Promise { + const mintLen = getMintLen([ExtensionType.DefaultAccountState]); + // Minimum lamports required for Mint Account + const lamports = await connection.getMinimumBalanceForRentExemption(mintLen); + + const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, + }); + + const initializeDefaultAccountStateInstruction = + createInitializeDefaultAccountStateInstruction( + mintKeypair.publicKey, + defaultState, + TOKEN_2022_PROGRAM_ID, + ); + + const initializeMintInstruction = createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, // Designated Mint Authority + payer.publicKey, // Designated Freeze Authority + TOKEN_2022_PROGRAM_ID, + ); + + const transaction = new Transaction().add( + createAccountInstruction, + initializeDefaultAccountStateInstruction, + initializeMintInstruction, + ); + + return await sendAndConfirmTransaction(connection, transaction, [ + payer, + mintKeypair, + ]); +} +``` + +#### 6. Test Setup + +Now that we have the ability to create a mint with a default state for all of +it's new token accounts, let's write some tests to see how it functions. + +#### 6.1 Create Mint with Default State + +Let's first create a mint with the default state of `frozen`. To do this we call +the `createTokenExtensionMintWithDefaultState` function we just created in out +`index.ts` file: + +```ts +// CREATE MINT WITH DEFAULT STATE +await createTokenExtensionMintWithDefaultState( + connection, + payer, + mintKeypair, + decimals, + defaultState, +); +``` + +#### 6.2 Create Test Token Accounts + +Now, let's create two new Token accounts to test with. We can accomplish this by +calling the `createAccount` helper provided by the SPL Token library. We will +use the keypairs we generated at the beginning: `ourTokenAccountKeypair` and +`otherTokenAccountKeypair`. + +```typescript +// CREATE TEST TOKEN ACCOUNTS +// Transferring from account +await createAccount( + connection, + payer, + mint, + payer.publicKey, + ourTokenAccountKeypair, + undefined, + TOKEN_2022_PROGRAM_ID, +); +// Transferring to account +await createAccount( + connection, + payer, + mint, + payer.publicKey, + otherTokenAccountKeypair, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +#### 7 Tests + +Now let's write some tests to show the interactions that can be had with the +`default state` extension. + +We'll write four tests in total: + +- Minting without thawing the recipient's account +- Minting with thawing the recipient's account +- Transferring without thawing the recipient's account +- Transferring with thawing the recipient's account + +#### 7.1 Minting without thawing the recipient's account + +This test will attempt to mint a token to `ourTokenAccount` without thawing the +account. This test is expected to fail as the account will be frozen on the mint +attempt. Remember: when a token account is frozen, the balance cannot change. + +To do this, let's wrap a `mintTo` function in a `try catch` and print out the +respected result: + +```typescript +// TEST: MINT WITHOUT THAWING +try { + // Attempt to mint without thawing + await mintTo( + connection, + payer, + mint, + ourTokenAccount, + payer.publicKey, + amountToMint, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + console.error("Should not have minted..."); +} catch (error) { + console.log( + "✅ - We expected this to fail because the account is still frozen.", + ); +} +``` + +Test this by running the script: + +```bash +esrun src/index.ts +``` + +We should see the following error logged out in the terminal, meaning the +extension is working as intended. +`✅ - We expected this to fail because the account is still frozen.` + +#### 7.2 Minting with thawing the recipient's account + +This test will attempt to mint a token after thawing the token account. This +test is expected to pass as the account will be thawed on the mint attempt. + +We can create this test by calling `thawAccount` and then `mintTo`: + +```typescript +// TEST: MINT WITH THAWING +// Unfreeze frozen token +await thawAccount( + connection, + payer, + ourTokenAccount, + mint, + payer.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +// Mint tokens to tokenAccount +await mintTo( + connection, + payer, + mint, + ourTokenAccount, + payer.publicKey, + amountToMint, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const ourTokenAccountWithTokens = await getAccount( + connection, + ourTokenAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +console.log( + `✅ - The new account balance is ${Number(ourTokenAccountWithTokens.amount)} after thawing and minting.`, +); +``` + +Go ahead and run the script, the transaction should succeed. + +```bash +esrun src/index.ts +``` + +#### 7.3 Transferring without thawing the recipient's account + +Now that we've tested minting, we can test transferring our tokens frozen and +not. First lets test a transfer without thawing the recipient's token account. +Remember, by default, the `otherTokenAccountKeypair` is frozen due to the +extension. + +Again, we expect this test to fail, since the `otherTokenAccountKeypair` is +frozen and it's balance cannot change. + +To test this, let's wrap a `transfer` function in a `try catch`: + +```typescript +// TEST: TRANSFER WITHOUT THAWING +try { + await transfer( + connection, + payer, + ourTokenAccount, + otherTokenAccount, + payer, + amountToTransfer, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + console.error("Should not have minted..."); +} catch (error) { + console.log( + "✅ - We expected this to fail because the account is still frozen.", + ); +} +``` + +Run the test and see the results: + +```bash +esrun src/index.ts +``` + +#### 7.4 Transferring with thawing the recipient's account + +The last test we'll create tests transferring tokens after thawing the token +account we will be transferring to. This test is expected to pass, since all +token accounts will now be thawed. + +We'll do this by calling `thawAccount` and then `transfer`: + +```typescript +// TEST: TRANSFER WITH THAWING +// Unfreeze frozen token +await thawAccount( + connection, + payer, + otherTokenAccount, + mint, + payer.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +await transfer( + connection, + payer, + ourTokenAccount, + otherTokenAccount, + payer, + amountToTransfer, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const otherTokenAccountWithTokens = await getAccount( + connection, + otherTokenAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +console.log( + `✅ - The new account balance is ${Number( + otherTokenAccountWithTokens.amount, + )} after thawing and transferring.`, +); +``` + +Run all of the tests one last time and see the results: + +```bash +esrun src/index.ts +``` + +Remember the key takeaways: + +- The `default state` extension, enforces the default state on _all_ new token + accounts. +- Frozen account's balance cannot change. + +Congratulations! We've just created and tested a mint using the default account +extension! + +## Challenge + +Add tests for burning tokens from frozen and thawed token accounts (hint, one +will fail, one will succeed). + +To get you started: + +```ts +// TEST: Burn tokens in frozen account +await freezeAccount( + connection, + payer, + ourTokenAccount, + mint, + payer.publicKey, + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); + +await burn( + connection, + payer, + ourTokenAccount, + mint, + payer.publicKey, + 1, + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` diff --git a/content/courses/token-extensions/group-member.mdx b/content/courses/token-extensions/group-member.mdx new file mode 100644 index 000000000..96eb49ed9 --- /dev/null +++ b/content/courses/token-extensions/group-member.mdx @@ -0,0 +1,1076 @@ +--- +title: Group, Group Pointer, Member, Member Pointer +objectives: + - Create an NFT collection using the group, group pointer, member, and member + pointer extensions. + - Update the authority and max size of a group. +description: "Make an NFT collection using token extensions." +--- + +## Summary + +- 'token groups' are commonly used to implement NFT collections. +- The `group pointer` extension sets a group account on the token mint, to hold + token group information. +- The `group` extension allows us to save group data within the mint itself. +- The `member pointer` extension sets an individual member account on the token + mint, to hold information about the token's membership within a group. +- The `member` extension allows us to save member data within the mint itself. + +## Overview + +SPL tokens are valuable alone but can be combined for extra functionality. We +can do this in the Token Extensions Program by combining the `group`, +`group pointer`, `member`, and `member pointer` extensions. The most common use +case for these extensions is to create a collection of NFTs. + +To create a collection of NFTs we need two parts: the "collection" NFT and all +of the NFTs within the collection. We can do this entirely using token +extensions. The "collection" NFT can be a single mint combining the `metadata`, +`metadata pointer`, `group`, and `group pointer` extensions. And then each +individual NFT within the collection can be an array of mints combining the +`metadata`, `metadata pointer`, `member`, and `member pointer` extensions. + +Although NFT collections are a common use-case, groups and members can be +applied to any token type. + +A quick note on `group pointer` vs `group`. The `group pointer` extension saves +the address of any onchain account that follows to the +[Token-Group Interface](https://github.com/solana-labs/solana-program-library/tree/master/token-group/interface). +While the `group` extension saves the Token-Group Interface data directly within +the mint account. Generally, these are used together where the `group pointer` +points to the mint itself. The same is true for `member pointer` vs `member`, +but with the member data. + +NOTE: A group can have many members, but a member can only belong to one group. + +### Group and Group Pointer + +The `group` and `group pointer` extensions define a token group. The onchain +data is as follows: + +- `update_authority`: The authority that can sign to update the group. +- `mint`: The mint of the group token. +- `size`: The current number of group members. +- `max_size`: The maximum number of group members. + +```rust +type OptionalNonZeroPubkey = Pubkey; // if all zeroes, interpreted as `None` +type PodU32 = [u8; 4]; +type Pubkey = [u8; 32]; + +/// Type discriminant: [214, 15, 63, 132, 49, 119, 209, 40] +/// First 8 bytes of `hash("spl_token_group_interface:group")` +pub struct TokenGroup { + /// The authority that can sign to update the group + pub update_authority: OptionalNonZeroPubkey, + /// The associated mint, used to counter spoofing to be sure that group + /// belongs to a particular mint + pub mint: Pubkey, + /// The current number of group members + pub size: PodU32, + /// The maximum number of group members + pub max_size: PodU32, +} +``` + +#### Creating a mint with group and group pointer + +Creating a mint with the `group` and `group pointer` involves four instructions: + +- `SystemProgram.createAccount` +- `createInitializeGroupPointerInstruction` +- `createInitializeMintInstruction` +- `createInitializeGroupInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. However like all Token Extensions Program +mints, we need to calculate the size and cost of the mint. This can be +accomplished by using `getMintLen` and `getMinimumBalanceForRentExemption`. In +this case, we'll call `getMintLen` with only the `ExtensionType.GroupPointer`. +Then we add `TOKEN_GROUP_SIZE` to the mint length to account for the group data. + +To get the mint length and create account instruction, do the following: + +```ts +// get mint length +const extensions = [ExtensionType.GroupPointer]; +const mintLength = getMintLen(extensions) + TOKEN_GROUP_SIZE; + +const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second instruction `createInitializeGroupPointerInstruction` initializes the +group pointer. It takes the mint, optional authority that can set the group +address, address that holds the group and the owning program as it's arguments. + +```ts +const initializeGroupPointerInstruction = + createInitializeGroupPointerInstruction( + mintKeypair.publicKey, + payer.publicKey, + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ); +``` + +The third instruction `createInitializeMintInstruction` initializes the mint. + +```ts +const initializeMintInstruction = createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +The fourth instruction `createInitializeGroupInstruction` actually initializes +the group and stores the configuration on the group account. + +```ts +const initializeGroupInstruction = createInitializeGroupInstruction({ + group: mintKeypair.publicKey, + maxSize: maxMembers, + mint: mintKeypair.publicKey, + mintAuthority: payer.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + updateAuthority: payer.publicKey, +}); +``` + +Finally, we add the instructions to the transaction and submit it to the Solana +network. + +```ts +const mintTransaction = new Transaction().add( + createAccountInstruction, + initializeGroupPointerInstruction, + initializeMintInstruction, + initializeGroupInstruction, +); + +const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, +); +``` + +### Update group authority + +To update the authority of a group, we just need the +`tokenGroupUpdateGroupAuthority` function. + +```ts +import { tokenGroupUpdateGroupAuthority } from "@solana/spl-token"; + +const signature = await tokenGroupUpdateGroupAuthority( + connection, //connection - Connection to use + payer, // payer - Payer for the transaction fees + mint.publicKey, // mint - Group mint + oldAuthority, // account - Public key of the old update authority + newAuthority, // account - Public key of the new update authority + undefined, // multiSigners - Signing accounts if `authority` is a multisig + { commitment: "finalized" }, // confirmOptions - Options for confirming thr transaction + TOKEN_2022_PROGRAM_ID, // programId - SPL Token program account +); +``` + +### Update max size of a group + +To update the max size of a group we just need the +`tokenGroupUpdateGroupMaxSize` function. + +```ts +import { tokenGroupUpdateGroupMaxSize } from "@solana/spl-token"; + +const signature = tokenGroupUpdateGroupMaxSize( + connection, //connection - Connection to use + payer, // payer - Payer for the transaction fees + mint.publicKey, // mint - Group mint + updateAuthority, // account - Update authority of the group + 4, // maxSize - new max size of the group + undefined, // multiSigners — Signing accounts if `authority` is a multisig + { commitment: "finalized" }, // confirmOptions - Options for confirming thr transaction + TOKEN_2022_PROGRAM_ID, // programId - SPL Token program account +); +``` + +### Member and Member Pointer + +The `member` and `member pointer` extensions define a token member. The onchain +data is as follows: + +- `mint`: The mint of the member token. +- `group`: The address of the group account. +- `member_number`: The member number (index within the group). + +```rust +/// Type discriminant: [254, 50, 168, 134, 88, 126, 100, 186] +/// First 8 bytes of `hash("spl_token_group_interface:member")` +pub struct TokenGroupMember { + /// The associated mint, used to counter spoofing to be sure that member + /// belongs to a particular mint + pub mint: Pubkey, + /// The pubkey of the `TokenGroup` + pub group: Pubkey, + /// The member number + pub member_number: PodU32, +} +``` + +#### Creating a mint with member pointer + +Creating a mint with the `member pointer` and `member` extensions involves four +instructions: + +- `SystemProgram.createAccount` +- `createInitializeGroupMemberPointerInstruction` +- `createInitializeMintInstruction` +- `createInitializeMemberInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. However, like all Token Extensions Program +mints, we need to calculate the size and cost of the mint. This can be +accomplished by using `getMintLen` and `getMinimumBalanceForRentExemption`. In +this case, we'll call `getMintLen` with the `ExtensionType.GroupMemberPointer`. +Then we have to add `TOKEN_GROUP_MEMBER_SIZE` to the mint length to account for +the member data. + +To get the mint length and create account instruction, do the following: + +```ts +// get mint length +const extensions = [ExtensionType.GroupMemberPointer]; +const mintLength = getMintLen(extensions) + TOKEN_GROUP_MEMBER_SIZE; + +const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second instruction `createInitializeGroupMemberPointerInstruction` +initializes the group member pointer. It takes the mint, optional authority that +can set the group address, address that holds the group, and the owning program +as its arguments. + +```ts +const initializeGroupMemberPointerInstruction = + createInitializeGroupMemberPointerInstruction( + mintKeypair.publicKey, + payer.publicKey, + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ); +``` + +The third instruction `createInitializeMintInstruction` initializes the mint. + +```ts +const initializeMintInstruction = createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +The fourth instruction `createInitializeMemberInstruction` actually initializes +the member and stores the configuration on the member account. This function +takes the group address as an argument and associates the member with that +group. + +```ts +const initializeMemberInstruction = createInitializeMemberInstruction({ + group: groupAddress, + groupUpdateAuthority: payer.publicKey, + member: mintKeypair.publicKey, + memberMint: mintKeypair.publicKey, + memberMintAuthority: payer.publicKey, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +Finally, we add the instructions to the transaction and submit it to the Solana +network. + +```ts +const mintTransaction = new Transaction().add( + createAccountInstruction, + initializeGroupMemberPointerInstruction, + initializeMintInstruction, + initializeMemberInstruction, +); + +const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, +); +``` + +### Fetch group and member data + +#### Get group pointer state + +To retrieve the state of the `group pointer` for a mint, we need to fetch the +account using `getMint` and then parse this data using the +`getGroupPointerState` function. This returns us the `GroupPointer` struct. + +```ts +/** GroupPointer as stored by the program */ +export interface GroupPointer { + /** Optional authority that can set the group address */ + authority: PublicKey | null; + /** Optional account address that holds the group */ + groupAddress: PublicKey | null; +} +``` + +To get the `GroupPointer` data, call the following: + +```ts +const groupMint = await getMint( + connection, + mint, + "confirmed", + TOKEN_2022_PROGRAM_ID, +); + +const groupPointerData: GroupPointer = getGroupPointerState(groupMint); +``` + +#### Get group state + +To retrieve the group state for a mint, we need to fetch the account using +`getMint` and then parse this data using the `getTokenGroupState` function. This +returns the `TokenGroup` struct. + +```ts +export interface TokenGroup { + /** The authority that can sign to update the group */ + updateAuthority?: PublicKey; + /** The associated mint, used to counter spoofing to be sure that group belongs to a particular mint */ + mint: PublicKey; + /** The current number of group members */ + size: number; + /** The maximum number of group members */ + maxSize: number; +} +``` + +To get the `TokenGroup` data, call the following: + +```ts +const groupMint = await getMint( + connection, + mint, + "confirmed", + TOKEN_2022_PROGRAM_ID, +); + +const groupData: TokenGroup = getTokenGroupState(groupMint); +``` + +#### Get group member pointer state + +To retrieve the `member pointer` state for a mint, we fetch the mint with +`getMint` and then parse with `getGroupMemberPointerState`. This returns us the +`GroupMemberPointer` struct. + +```ts +/** GroupMemberPointer as stored by the program */ +export interface GroupMemberPointer { + /** Optional authority that can set the member address */ + authority: PublicKey | null; + /** Optional account address that holds the member */ + memberAddress: PublicKey | null; +} +``` + +To get the `GroupMemberPointer` data, call the following: + +```ts +const memberMint = await getMint( + connection, + mint, + "confirmed", + TOKEN_2022_PROGRAM_ID, +); + +const memberPointerData = getGroupMemberPointerState(memberMint); +``` + +#### Get group member state + +To retrieve a mint's `member` state, we fetch the mint with `getMint` and then +parse with `getTokenGroupMemberState`. This returns the `TokenGroupMember` +struct. + +```ts +export interface TokenGroupMember { + /** The associated mint, used to counter spoofing to be sure that member belongs to a particular mint */ + mint: PublicKey; + /** The pubkey of the `TokenGroup` */ + group: PublicKey; + /** The member number */ + memberNumber: number; +} +``` + +To get the `TokenGroupMember` data, call the following: + +```ts +const memberMint = await getMint( + connection, + mint, + "confirmed", + TOKEN_2022_PROGRAM_ID, +); +const memberData = getTokenGroupMemberState(memberMint); +``` + +## Lab + +In this lab we'll create a Cool Cats NFT collection using the `group`, +`group pointer`, `member` and `member pointer` extensions in conjunction with +the `metadata` and `metadata pointer` extensions. + +The Cool Cats NFT collection will have a group NFT with three member NFTs within +it. + +#### 1. Getting started + +To get started, clone +[this](https://github.com/Unboxed-Software/solana-lab-group-member) repository's +`starter` branch. + +```bash +git clone https://github.com/Unboxed-Software/solana-lab-group-member.git +cd solana-lab-group-member +git checkout starter +npm install +``` + +The `starter` code comes with: + +- `index.ts`: creates a connection object and calls `initializeKeypair`. This is + where we will write our script. +- `assets`: folder which contains the image for our NFT collection. +- `helper.ts`: helper functions for uploading metadata. + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +`const connection = new Connection("http://127.0.0.1:8899", "confirmed");` + +With the validator setup correctly, you may run `index.ts` and confirm +everything is working. + +```bash +npx esrun src/index.ts +``` + +#### 3. Setup group metadata + +Before creating our group NFT, we must prepare and upload the group metadata. We +are using devnet Irys (Arweave) to upload the image and metadata. This +functionality is provided for you in the `helpers.ts`. + +For ease of this lesson, we've provided assets for the NFTs in the `assets` +directory. + +If you'd like to use your own files and metadata feel free! + +To get our group metadata ready we have to do the following: + +1. We need to format our metadata for upload using the `LabNFTMetadata` + interface from `helper.ts` +2. Call the `uploadOffChainMetadata` from `helpers.ts` +3. Format everything including the resulting uri from the previous step into the + +We need to format our metadata for upload (`LabNFTMetadata`), upload the image +and metadata (`uploadOffChainMetadata`), and finally format everything into the +`TokenMetadata` interface from the `@solana/spl-token-metadata` library. + +Note: We are using devnet Irys, which is free to upload to under 100kb. + +```ts +// Create group metadata + +const groupMetadata: LabNFTMetadata = { + mint: groupMintKeypair, + imagePath: "assets/collection.png", + tokenName: "cool-cats-collection", + tokenDescription: "Collection of Cool Cat NFTs", + tokenSymbol: "MEOW", + tokenExternalUrl: "https://solana.com/", + tokenAdditionalMetadata: {}, + tokenUri: "", +}; + +// Upload offchain metadata +groupMetadata.tokenUri = await uploadOffChainMetadata(payer, groupMetadata); + +// Format group token metadata +const collectionTokenMetadata: TokenMetadata = { + name: groupMetadata.tokenName, + mint: groupMintKeypair.publicKey, + symbol: groupMetadata.tokenSymbol, + uri: groupMetadata.tokenUri, + updateAuthority: payer.publicKey, + additionalMetadata: Object.entries( + groupMetadata.tokenAdditionalMetadata || [], + ).map(([trait_type, value]) => [trait_type, value]), +}; +``` + +Feel free to run the script and make sure everything uploads. + +```bash +npx esrun src/index.ts +``` + +#### 3. Create a mint with group and group pointer + +Let's create the group NFT by creating a mint with the `metadata`, +`metadata pointer`, `group` and `group pointer` extensions. + +This NFT is the visual representation of our collection. + +Let's first define the inputs to our new function `createTokenGroup`: + +- `connection`: Connection to the blockchain +- `payer`: The keypair paying for the transaction +- `mintKeypair`: The mint keypair +- `decimals`: The mint decimals ( 0 for NFTs ) +- `maxMembers`: The maximum number of members allowed in the group +- `metadata`: The metadata for the group mint + +```ts +export async function createTokenGroup( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number, + maxMembers: number, + metadata: TokenMetadata, +): Promise; +``` + +To make our NFT, we will store the metadata directly on the mint account using +the `metadata` and `metadata pointer` extensions. We'll also save some info +about the group with the `group` and `group pointer` extensions. + +To create our group NFT, we need the following instructions: + +- `SystemProgram.createAccount`: Allocates space on Solana for the mint account. + We can get the `mintLength` and `mintLamports` using `getMintLen` and + `getMinimumBalanceForRentExemption` respectively. +- `createInitializeGroupPointerInstruction`: Initializes the group pointer +- `createInitializeMetadataPointerInstruction`: Initializes the metadata pointer +- `createInitializeMintInstruction`: Initializes the mint +- `createInitializeGroupInstruction`: Initializes the group +- `createInitializeInstruction`: Initializes the metadata + +Finally, we need to add all of these instructions to a transaction and send it +to the Solana network, and return the signature. We can do this by calling +`sendAndConfirmTransaction`. + +```ts +import { + sendAndConfirmTransaction, + Connection, + Keypair, + SystemProgram, + Transaction, + TransactionSignature, +} from "@solana/web3.js"; + +import { + ExtensionType, + createInitializeMintInstruction, + getMintLen, + TOKEN_2022_PROGRAM_ID, + createInitializeGroupInstruction, + createInitializeGroupPointerInstruction, + TYPE_SIZE, + LENGTH_SIZE, + createInitializeMetadataPointerInstruction, + TOKEN_GROUP_SIZE, +} from "@solana/spl-token"; +import { + TokenMetadata, + createInitializeInstruction, + pack, +} from "@solana/spl-token-metadata"; + +export async function createTokenGroup( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number, + maxMembers: number, + metadata: TokenMetadata, +): Promise { + const extensions: ExtensionType[] = [ + ExtensionType.GroupPointer, + ExtensionType.MetadataPointer, + ]; + + const metadataLen = TYPE_SIZE + LENGTH_SIZE + pack(metadata).length + 500; + const mintLength = getMintLen(extensions); + const totalLen = mintLength + metadataLen + TOKEN_GROUP_SIZE; + + const mintLamports = + await connection.getMinimumBalanceForRentExemption(totalLen); + + const mintTransaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeGroupPointerInstruction( + mintKeypair.publicKey, + payer.publicKey, + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMetadataPointerInstruction( + mintKeypair.publicKey, + payer.publicKey, + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeGroupInstruction({ + group: mintKeypair.publicKey, + maxSize: maxMembers, + mint: mintKeypair.publicKey, + mintAuthority: payer.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + updateAuthority: payer.publicKey, + }), + createInitializeInstruction({ + metadata: mintKeypair.publicKey, + mint: mintKeypair.publicKey, + mintAuthority: payer.publicKey, + name: metadata.name, + programId: TOKEN_2022_PROGRAM_ID, + symbol: metadata.symbol, + updateAuthority: payer.publicKey, + uri: metadata.uri, + }), + ); + + const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + ); + + return signature; +} +``` + +Now that we have our function, let's call it in our `index.ts` file. + +```ts +// Create group +const signature = await createTokenGroup( + connection, + payer, + groupMintKeypair, + decimals, + maxMembers, + collectionTokenMetadata, +); + +console.log( + `Created collection mint with metadata:\n${getExplorerLink("tx", signature, "localnet")}\n`, +); +``` + +Before we run the script, lets fetch the newly created group NFT and print it's +contents. Let's do this in `index.ts`: + +```ts +// Fetch the group +const groupMint = await getMint( + connection, + groupMintKeypair.publicKey, + "confirmed", + TOKEN_2022_PROGRAM_ID, +); +const fetchedGroupMetadata = await getTokenMetadata( + connection, + groupMintKeypair.publicKey, +); +const metadataPointerState = getMetadataPointerState(groupMint); +const groupData = getGroupPointerState(groupMint); + +console.log("\n---------- GROUP DATA -------------\n"); +console.log("Group Mint: ", groupMint.address.toBase58()); +console.log( + "Metadata Pointer Account: ", + metadataPointerState?.metadataAddress?.toBase58(), +); +console.log("Group Pointer Account: ", groupData?.groupAddress?.toBase58()); +console.log("\n--- METADATA ---\n"); +console.log("Name: ", fetchedGroupMetadata?.name); +console.log("Symbol: ", fetchedGroupMetadata?.symbol); +console.log("Uri: ", fetchedGroupMetadata?.uri); +console.log("\n------------------------------------\n"); +``` + +Now we can run the script and see the group NFT we created. + +```bash +npx esrun src/index.ts +``` + +#### 4. Setup member NFT Metadata + +Now that we've created our group NFT, we can create the member NFTs. But before +we actually create them, we need to prepare their metadata. + +The flow is the exact same to what we did with the group NFT. + +1. We need to format our metadata for upload using the `LabNFTMetadata` + interface from `helper.ts` +2. Call the `uploadOffChainMetadata` from `helpers.ts` +3. Format everything including the resulting uri from the previous step into the + `TokenMetadata` interface from the `@solana/spl-token-metadata` library. + +However, since we have three members, we'll loop through each step for each +member. + +First, let's define the metadata for each member: + +```ts +// Define member metadata +const membersMetadata: LabNFTMetadata[] = [ + { + mint: cat0Mint, + imagePath: "assets/cat_0.png", + tokenName: "Cat 1", + tokenDescription: "Adorable cat", + tokenSymbol: "MEOW", + tokenExternalUrl: "https://solana.com/", + tokenAdditionalMetadata: {}, + tokenUri: "", + }, + { + mint: cat1Mint, + imagePath: "assets/cat_1.png", + tokenName: "Cat 2", + tokenDescription: "Sassy cat", + tokenSymbol: "MEOW", + tokenExternalUrl: "https://solana.com/", + tokenAdditionalMetadata: {}, + tokenUri: "", + }, + { + mint: cat2Mint, + imagePath: "assets/cat_2.png", + tokenName: "Cat 3", + tokenDescription: "Silly cat", + tokenSymbol: "MEOW", + tokenExternalUrl: "https://solana.com/", + tokenAdditionalMetadata: {}, + tokenUri: "", + }, +]; +``` + +Now let's loop through each member and upload their metadata. + +```ts +// Upload member metadata +for (const member of membersMetadata) { + member.tokenUri = await uploadOffChainMetadata(payer, member); +} +``` + +Finally, let's format the metadata for each member into the `TokenMetadata` +interface: + +Note: We'll want to carry over the keypair since we'll need it to create the +member NFTs. + +```ts +// Format token metadata +const memberTokenMetadata: { mintKeypair: Keypair; metadata: TokenMetadata }[] = + membersMetadata.map(member => ({ + mintKeypair: member.mint, + metadata: { + name: member.tokenName, + mint: member.mint.publicKey, + symbol: member.tokenSymbol, + uri: member.tokenUri, + updateAuthority: payer.publicKey, + additionalMetadata: Object.entries( + member.tokenAdditionalMetadata || [], + ).map(([trait_type, value]) => [trait_type, value]), + } as TokenMetadata, + })); +``` + +#### 5. Create member NFTs + +Just like the group NFT, we need to create the member NFTs. Let's do this in a +new file called `create-member.ts`. It will look very similar to the +`create-group.ts` file, except we'll use the `member` and `member pointer` +extensions instead of the `group` and `group pointer` extensions. + +First, let's define the inputs to our new function `createTokenMember`: + +- `connection`: Connection to the blockchain +- `payer`: The keypair paying for the transaction +- `mintKeypair`: The mint keypair +- `decimals`: The mint decimals ( 0 for NFTs ) +- `metadata`: The metadata for the group mint +- `groupAddress`: The address of the group account - in this case it's the group + mint itself + +```ts +export async function createTokenMember( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number, + metadata: TokenMetadata, + groupAddress: PublicKey, +): Promise; +``` + +Just like the group NFT, we need the following instructions: + +- `SystemProgram.createAccount`: Allocates space on Solana for the mint account. + We can get the `mintLength` and `mintLamports` using `getMintLen` and + `getMinimumBalanceForRentExemption` respectively. +- `createInitializeGroupMemberPointerInstruction`: Initializes the member + pointer +- `createInitializeMetadataPointerInstruction`: Initializes the metadata pointer +- `createInitializeMintInstruction`: Initializes the mint +- `createInitializeMemberInstruction`: Initializes the member +- `createInitializeInstruction`: Initializes the metadata + +Finally, we need to add these instructions to a transaction, send it to the +Solana network, and return the signature. We can do this by calling +`sendAndConfirmTransaction`. + +```ts +import { + sendAndConfirmTransaction, + Connection, + Keypair, + SystemProgram, + Transaction, + TransactionSignature, + PublicKey, +} from "@solana/web3.js"; + +import { + ExtensionType, + createInitializeMintInstruction, + getMintLen, + TOKEN_2022_PROGRAM_ID, + TYPE_SIZE, + LENGTH_SIZE, + createInitializeMetadataPointerInstruction, + TOKEN_GROUP_MEMBER_SIZE, + createInitializeGroupMemberPointerInstruction, + createInitializeMemberInstruction, +} from "@solana/spl-token"; +import { + TokenMetadata, + createInitializeInstruction, + pack, +} from "@solana/spl-token-metadata"; + +export async function createTokenMember( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number, + metadata: TokenMetadata, + groupAddress: PublicKey, +): Promise { + const extensions: ExtensionType[] = [ + ExtensionType.GroupMemberPointer, + ExtensionType.MetadataPointer, + ]; + + const metadataLen = TYPE_SIZE + LENGTH_SIZE + pack(metadata).length; + const mintLength = getMintLen(extensions); + const totalLen = mintLength + metadataLen + TOKEN_GROUP_MEMBER_SIZE; + + const mintLamports = + await connection.getMinimumBalanceForRentExemption(totalLen); + + const mintTransaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeGroupMemberPointerInstruction( + mintKeypair.publicKey, + payer.publicKey, + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMetadataPointerInstruction( + mintKeypair.publicKey, + payer.publicKey, + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMemberInstruction({ + group: groupAddress, + groupUpdateAuthority: payer.publicKey, + member: mintKeypair.publicKey, + memberMint: mintKeypair.publicKey, + memberMintAuthority: payer.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeInstruction({ + metadata: mintKeypair.publicKey, + mint: mintKeypair.publicKey, + mintAuthority: payer.publicKey, + name: metadata.name, + programId: TOKEN_2022_PROGRAM_ID, + symbol: metadata.symbol, + updateAuthority: payer.publicKey, + uri: metadata.uri, + }), + ); + + const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + ); + + return signature; +} +``` + +Let's add our new function to `index.ts` and call it for each member: + +```ts +// Create member mints +for (const memberMetadata of memberTokenMetadata) { + const signature = await createTokenMember( + connection, + payer, + memberMetadata.mintKeypair, + decimals, + memberMetadata.metadata, + groupMintKeypair.publicKey, + ); + + console.log( + `Created ${memberMetadata.metadata.name} NFT:\n${getExplorerLink("tx", signature, "localnet")}\n`, + ); +} +``` + +Let's fetch our newly created member NFTs and display their contents. + +```ts +for (const member of membersMetadata) { + const memberMint = await getMint( + connection, + member.mint.publicKey, + "confirmed", + TOKEN_2022_PROGRAM_ID, + ); + const memberMetadata = await getTokenMetadata( + connection, + member.mint.publicKey, + ); + const metadataPointerState = getMetadataPointerState(memberMint); + const memberPointerData = getGroupMemberPointerState(memberMint); + const memberData = getTokenGroupMemberState(memberMint); + + console.log("\n---------- MEMBER DATA -------------\n"); + console.log("Member Mint: ", memberMint.address.toBase58()); + console.log( + "Metadata Pointer Account: ", + metadataPointerState?.metadataAddress?.toBase58(), + ); + console.log("Group Account: ", memberData?.group?.toBase58()); + console.log( + "Member Pointer Account: ", + memberPointerData?.memberAddress?.toBase58(), + ); + console.log("Member Number: ", memberData?.memberNumber); + console.log("\n--- METADATA ---\n"); + console.log("Name: ", memberMetadata?.name); + console.log("Symbol: ", memberMetadata?.symbol); + console.log("Uri: ", memberMetadata?.uri); + console.log("\n------------------------------------\n"); +} +``` + +Lastly, let's run the script and see our full collection of NFTs! + +```bash +npx esrun src/index.ts +``` + +That's it! If you're having troubles feel free to check out the `solution` +[branch in the repository](https://github.com/Unboxed-Software/solana-lab-group-member/tree/solution). + +## Challenge + +Go create a NFT collection of your own using the the `group`, `group pointer`, +`member` and `member pointer` extensions. diff --git a/content/courses/token-extensions/immutable-owner.mdx b/content/courses/token-extensions/immutable-owner.mdx new file mode 100644 index 000000000..bfd83f4f3 --- /dev/null +++ b/content/courses/token-extensions/immutable-owner.mdx @@ -0,0 +1,485 @@ +--- +title: Immutable Owner +objectives: + - Create token accounts with an immutable owner + - Explain the use cases of the immutable owner extension + - Experiment with the rules of the extension +description: + "Make a token that ensures the account storing the tokens cannot change owner." +--- + +## Summary + +- The `immutable owner` extension ensures that once a token account is created, + its owner is unchangeable, securing the ownership against any modifications. +- Token accounts with this extension can have only one permanent state regarding + ownership: **Immutable**. +- Associated Token Accounts (ATAs) have the `immutable owner` extension enabled + by default. +- The `immutable owner` extension is a token account extension; enabled on each + token account, not the mint. + +## Overview + +Associated Token Accounts (ATAs) are uniquely determined by the owner and the +mint, streamlining the process of identifying the correct Token Account for a +specific owner. Initially, any token account could change its owner, even ATAs. +This led to security concerns, as users could mistakenly send funds to an +account no longer owned by the expected recipient. This can unknowingly lead to +the loss of funds should the owner change. + +The `immutable owner` extension, which is automatically applied to ATAs, +prevents any changes in ownership. This extension can also be enabled for new +Token Accounts created through the Token Extensions Program, guaranteeing that +once ownership is set it is permanent. This secures accounts against +unauthorized access and transfer attempts. + +It is important to note that this extension is a Token Account extension, +meaning it's on the token account, not the mint. + +### Creating token account with immutable owner + +All Token Extensions Program ATAs have immutable owners enabled by default. If +you want to create an ATA you may use `createAssociatedTokenAccount`. + +Outside of ATAs, which enable the immutable owner extension by default, you can +enable it manually on any Token Extensions Program token account. + +Initializing a token account with immutable owner involves three instructions: + +- `SystemProgram.createAccount` +- `createInitializeImmutableOwnerInstruction` +- `createInitializeAccountInstruction` + +Note: We are assuming a mint has already been created. + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the token account. This instruction accomplishes three things: + +- Allocates `space` +- Transfers `lamports` for rent +- Assigns to its owning program + +```typescript +const tokenAccountKeypair = Keypair.generate(); +const tokenAccount = tokenAccountKeypair.publicKey; +const extensions = [ExtensionType.ImmutableOwner]; + +const tokenAccountLen = getAccountLen(extensions); +const lamports = + await connection.getMinimumBalanceForRentExemption(tokenAccountLen); + +const createTokenAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: tokenAccount, + space: tokenAccountLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second instruction `createInitializeImmutableOwnerInstruction` initializes +the immutable owner extension. + +```typescript +const initializeImmutableOwnerInstruction = + createInitializeImmutableOwnerInstruction( + tokenAccount, + TOKEN_2022_PROGRAM_ID, + ); +``` + +The third instruction `createInitializeAccountInstruction` initializes the token +account. + +```typescript +const initializeAccountInstruction = createInitializeAccountInstruction( + tokenAccount, + mint, + owner.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +Lastly, add all of these instructions to a transaction and send it to the +blockchain. + +```ts +const transaction = new Transaction().add( + createTokenAccountInstruction, + initializeImmutableOwnerInstruction, + initializeAccountInstruction, +); + +transaction.feePayer = payer.publicKey; + +return await sendAndConfirmTransaction(connection, transaction, [ + payer, + owner, + tokenAccountKeypair, +]); +``` + +When the transaction with these three instructions is sent, a new token account +is created with the immutable owner extension. + +## Lab + +In this lab, we'll be creating a token account with an immutable owner. We'll +then write tests to check if the extension is working as intended by attempting +to transfer ownership of the token account. + +#### 1. Setup Environment + +To get started, create an empty directory named `immutable-owner` and navigate +to it. We'll be initializing a brand new project. Run `npm init -y` to make a +project with defaults. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun dotenv typescript +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { + AuthorityType, + TOKEN_2022_PROGRAM_ID, + createMint, + setAuthority, +} from "@solana/spl-token"; +import { + Connection, + Keypair, + LAMPORTS_PER_SOL, + PublicKey, +} from "@solana/web3.js"; +import { initializeKeypair, makeKeypairs } from "@solana-developers/helpers"; + +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +const payer = await initializeKeypair(connection); + +const [otherOwner, mintKeypair, ourTokenAccountKeypair] = makeKeypairs(3); +const ourTokenAccount = ourTokenAccountKeypair.publicKey; +``` + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use of +the local RPC URL. + +```typescript +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +``` + +Alternatively, if you'd like to use testnet or devnet, import the +`clusterApiUrl` from `@solana/web3.js` and pass it to the connection as such: + +```typescript +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +``` + +#### 3. Helpers + +When we pasted the `index.ts` code from earlier, we added the following helpers: + +- `initializeKeypair`: This function creates the keypair for the `payer` and + also airdrops 2 testnet SOL to it +- `makeKeypairs`: This function creates keypairs without airdropping any SOL + +Additionally, we have some initial accounts: + +- `payer`: Used to pay for and be the authority for everything +- `mintKeypair`: Our mint +- `ourTokenAccountKeypair`: The token account owned by the payer that we'll use + for testing +- `otherOwner`: The token account we'll try to transfer ownership of the two + immutable accounts to + +#### 4. Create mint + +Let's create the mint we'll be using for our token accounts. + +Inside of `src/index.ts`, the required dependencies will already be imported, +along with the aforementioned accounts. Add the following `createMint` function +beneath the existing code: + +```typescript +// CREATE MINT +const mint = await createMint( + connection, + payer, + mintKeypair.publicKey, + null, + 2, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +#### 5. Create Token Account with immutable owner + +Remember, all ATAs come with the `immutable owner` extension. However, we're +going to create a token account using a keypair. This requires us to create the +account, initialize the immutable owner extension, and initialize the account. + +Inside the `src` directory, create a new file named `token-helper.ts` and create +a new function within it called `createTokenAccountWithImmutableOwner`. This +function is where we'll be creating the associated token account with the +immutable owner. The function will take the following arguments: + +- `connection`: The connection object +- `mint`: Public key for the new mint +- `payer`: Payer for the transaction +- `owner`: Owner of the associated token account +- `tokenAccountKeypair`: The token account keypair associated with the token + account + +```ts +import { + ExtensionType, + TOKEN_2022_PROGRAM_ID, + createInitializeAccountInstruction, + createInitializeImmutableOwnerInstruction, + getAccountLen, +} from "@solana/spl-token"; +import { + Connection, + Keypair, + PublicKey, + SystemProgram, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +export async function createTokenAccountWithImmutableOwner( + connection: Connection, + mint: PublicKey, + payer: Keypair, + owner: Keypair, + tokenAccountKeypair: Keypair, +): Promise { + // Create account instruction + + // Enable immutable owner instruction + + // Initialize account instruction + + // Send to blockchain + + return "TODO Replace with signature"; +} +``` + +The first step in creating the token account is reserving space on Solana with +the **`SystemProgram.createAccount`** method. This requires specifying the +payer's keypair, (the account that will fund the creation and provide SOL for +rent exemption), the new token account's public key +(`tokenAccountKeypair.publicKey`), the space required to store the token +information on the blockchain, the amount of SOL (lamports) necessary to exempt +the account from rent and the ID of the token program that will manage this +token account (**`TOKEN_2022_PROGRAM_ID`**). + +```typescript +// CREATE ACCOUNT INSTRUCTION +const tokenAccount = tokenAccountKeypair.publicKey; + +const extensions = [ExtensionType.ImmutableOwner]; + +const tokenAccountLen = getAccountLen(extensions); +const lamports = + await connection.getMinimumBalanceForRentExemption(tokenAccountLen); + +const createTokenAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: tokenAccount, + space: tokenAccountLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +After the token account creation, the next instruction initializes the +`immutable owner` extension. The `createInitializeImmutableOwnerInstruction` +function is used to generate this instruction. + +```typescript +// ENABLE IMMUTABLE OWNER INSTRUCTION +const initializeImmutableOwnerInstruction = + createInitializeImmutableOwnerInstruction( + tokenAccount, + TOKEN_2022_PROGRAM_ID, + ); +``` + +We then add the initialize account instruction by calling +`createInitializeAccountInstruction` and passing in the required arguments. This +function is provided by the SPL Token package and it constructs a transaction +instruction that initializes a new token account. + +```typescript +// INITIALIZE ACCOUNT INSTRUCTION +const initializeAccountInstruction = createInitializeAccountInstruction( + tokenAccount, + mint, + owner.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +Now that the instructions have been created, the token account can be created +with an immutable owner. + +```typescript +// SEND TO BLOCKCHAIN +const transaction = new Transaction().add( + createTokenAccountInstruction, + initializeImmutableOwnerInstruction, + initializeAccountInstruction, +); + +transaction.feePayer = payer.publicKey; + +const signature = await sendAndConfirmTransaction(connection, transaction, [ + payer, + owner, + tokenAccountKeypair, +]); + +return signature; +``` + +Now that we've added the functionality for `token-helper`, we can create our +test token accounts. One of the two test token accounts will be created by +calling `createTokenAccountWithImmutableOwner`. The other will be created with +the baked-in SPL helper function `createAssociatedTokenAccount`. This helper +will create an associated token account which by default includes an immutable +owner. For the sake of this guide, we'll be testing against both of these +approaches. + +Back in `index.ts` underneath the mint variable, create the following two token +accounts: + +``` +// CREATE TEST TOKEN ACCOUNTS: Create explicitly with immutable owner instructions +const createOurTokenAccountSignature = await createTokenAccountWithImmutableOwner( + connection, + mint, + payer, + payer, + ourTokenAccountKeypair +); + +// CREATE TEST TOKEN ACCOUNTS: Create an associated token account with default immutable owner +const associatedTokenAccount = await createAssociatedTokenAccount( + connection, + payer, + mint, + payer.publicKey, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +That's it for the token accounts! Now we can move on and start testing that the +extension rules are applied correctly by running a few tests against it. + +If you'd like to test that everything is working, feel free to run the script. + +```bash +npx esrun src/index.ts +``` + +#### 6. Tests + +**Test trying to transfer owner** + +The first token account that is being created is the account is tied to +`ourTokenAccountKeypair`. We'll be attempting to transfer ownership of the +account to `otherOwner` which was generated earlier. This test is expected to +fail as the new authority is not the owner of the account upon creation. + +Add the following code to your `src/index.ts` file: + +```typescript +// TEST TRANSFER ATTEMPT ON IMMUTABLE ACCOUNT +try { + await setAuthority( + connection, + payer, + ourTokenAccount, + payer.publicKey, + AuthorityType.AccountOwner, + otherOwner.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + console.error("You should not be able to change the owner of the account."); +} catch (error) { + console.log( + `✅ - We expected this to fail because the account is immutable, and cannot change owner.`, + ); +} +``` + +We can now invoke the `setAuthority` function by running +`npx esrun src/index.ts`. We should see the following error logged out in the +terminal, meaning the extension is working as we need it to: +`✅ - We expected this to fail because the account is immutable, and cannot change owner.` + +**Test trying to transfer owner with associated token account** + +This test will attempt to transfer ownership to the Associated Token Account. +This test is also expected to fail as the new authority is not the owner of the +account upon creation. + +Below the previous test, add the following try/catch: + +```typescript +// TEST TRANSFER ATTEMPT ON ASSOCIATED IMMUTABLE ACCOUNT +try { + await setAuthority( + connection, + payer, + associatedTokenAccount, + payer.publicKey, + AuthorityType.AccountOwner, + otherOwner.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + console.error("You should not be able to change the owner of the account."); +} catch (error) { + console.log( + `✅ - We expected this to fail because the associated token account is immutable, and cannot change owner.`, + ); +} +``` + +Now we can run `npx esrun src/index.ts`. This test should log a failure message +similar to the one from the previous test. This means that both of our token +accounts are in fact immutable and working as intended. + +Congratulations! We've just created token accounts and tested the immutable +owner extension! If you are stuck at any point, you can find the working code on +the `solution` branch of +[this repository](https://github.com/Unboxed-Software/solana-lab-immutable-owner/tree/solution). + +## Challenge + +Go create your own token account with an immutable owner. diff --git a/content/courses/token-extensions/index.mdx b/content/courses/token-extensions/index.mdx new file mode 100644 index 000000000..e714c439b --- /dev/null +++ b/content/courses/token-extensions/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Token Extensions +description: Create tokens with features like non-transferability, transfer hooks, and more. +--- diff --git a/content/courses/token-extensions/interest-bearing-token.mdx b/content/courses/token-extensions/interest-bearing-token.mdx new file mode 100644 index 000000000..316d440b6 --- /dev/null +++ b/content/courses/token-extensions/interest-bearing-token.mdx @@ -0,0 +1,698 @@ +--- +title: Interest Bearing Token +objectives: + - Create a mint account with the interest bearing extension + - Explain the use cases of interest bearing tokens + - Experiment with the rules of the extension +description: "Make a token that earns interest over time." +--- + +## Summary + +- Creators can set an interest rate and store it directly on the mint account. +- The underlying token quantity for interest bearing tokens remains unchanged. +- The accrued interest can be displayed for UI purposes without the need to + frequently rebase or update to adjust for accrued interest. +- The lab demonstrates configuring a mint account that is set to mint with an + interest rate. The test case also shows how to update the interest rate, along + with retrieving the rate from the token. + +## Overview + +Tokens with values that either increase or decrease over time have practical +applications in the real world, with bonds being a prime example. Previously, +the ability to reflect this dynamic in tokens was limited to the use of proxy +contracts, necessitating frequent rebasing or updates. + +The `interest bearing token` extension helps with this. By leveraging the +`interest bearing token` extension and the `amount_to_ui_amount` function, users +can apply an interest rate to their tokens and retrieve the updated total, +including interest, at any given moment. + +The calculation of interest is done continuously, factoring in the network's +timestamp. However, discrepancies in the network's time could result in accrued +interest being slightly less than anticipated, though this situation is +uncommon. + +It's important to note that this mechanism does not generate new tokens and the +displayed amount simply includes the accumulated interest, making the change +purely aesthetic. That being said, this is a value stored on within the mint +account and programs can take advantage of this to create functionality beyond +pure aesthetics. + +### Adding interest rate to token + +Initializing an interest bearing token involves three instructions: + +- `SystemProgram.createAccount` +- `createInitializeTransferFeeConfigInstruction` +- `createInitializeMintInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. This instruction accomplishes three things: + +- Allocates `space` +- Transfers `lamports` for rent +- Assigns to it's owning program + +```typescript +SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mint, + space: mintLen, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }), +``` + +The second +instruction `createInitializeInterestBearingMintInstruction` initializes the +interest bearing token extension. The defining argument that dictates the +interest rate will be a variable we create named `rate`. The `rate` is defined +in [basis points](https://www.investopedia.com/terms/b/basispoint.asp). + +```typescript + createInitializeInterestBearingMintInstruction( + mint, + rateAuthority.publicKey, + rate, + TOKEN_2022_PROGRAM_ID, + ), +``` + +The third instruction `createInitializeMintInstruction` initializes the mint. + +```typescript +createInitializeMintInstruction( + mint, + decimals, + mintAuthority.publicKey, + null, + TOKEN_2022_PROGRAM_ID, +); +``` + +When the transaction with these three instructions is sent, a new interest +bearing token is created with the specified rate configuration. + +### Fetching accumulated interest + +To retrieve the accumulated interest on a token at any given point, first use +the `getAccount` function to fetch token information, including the amount and +any associated data, passing in the connection, payer's token account, and the +relevant program ID, `TOKEN_2022_PROGRAM_ID`. + +Next, utilize the `amountToUiAmount` function with the obtained token +information, along with additional parameters such as connection, payer, and +mint, to convert the token amount to its corresponding UI amount, which +inherently includes any accumulated interest. + +```typescript +const tokenInfo = await getAccount( + connection, + payerTokenAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +/** + * Get the amount as a string using mint-prescribed decimals + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param mint Mint for the account + * @param amount Amount of tokens to be converted to Ui Amount + * @param programId SPL Token program account + * + * @return Ui Amount generated + */ +const uiAmount = await amountToUiAmount( + connection, + payer, + mint, + tokenInfo.amount, + TOKEN_2022_PROGRAM_ID, +); + +console.log("UI Amount: ", uiAmount); +``` + +The return value of `uiAmount` is a string representation of the UI amount and +will look similar to this: `0.0000005000001557528245`. + +### Update rate authority + +Solana provides a helper function, `setAuthority`, to set a new authority on an +interest bearing token. + +Use the `setAuthority` function to assign a new authority to the account. You'll +need to provide the `connection`, the account paying for transaction fees +(payer), the token account to update (mint), the current authority's public key, +the type of authority to update (in this case, 7 represents the `InterestRate` +authority type), and the new authority's public key. + +After setting the new authority, use the `updateRateInterestBearingMint` +function to update the interest rate for the account. Pass in the necessary +parameters: `connection`, `payer`, `mint`, the new authority's public key, the +updated interest rate, and the program ID. + +```typescript +/** + * Assign a new authority to the account + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param account Address of the account + * @param currentAuthority Current authority of the specified type + * @param authorityType Type of authority to set + * @param newAuthority New authority of the account + * @param multiSigners Signing accounts if `currentAuthority` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ + +await setAuthority( + connection, + payer, + mint, + rateAuthority, + AuthorityType.InterestRate, // Rate type (InterestRate) + otherAccount.publicKey, // new rate authority, + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); + +await updateRateInterestBearingMint( + connection, + payer, + mint, + otherAccount, // new rate authority + 10, // updated rate + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +## Lab + +In this lab, we're establishing Interest Bearing Tokens via the Token-2022 +program on Solana. We'll initialize these tokens with a specific interest rate, +update the rate with proper authorization, and observe how interest accumulates +on tokens over time. + +#### 1. Setup Environment + +To get started, create an empty directory named `interest-bearing-token` and +navigate to it. Run `npm init -y` to initialize a brand new project. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun dotenv typescript +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { Connection, Keypair, PublicKey } from "@solana/web3.js"; + +import { + ExtensionType, + getMintLen, + TOKEN_2022_PROGRAM_ID, + getMint, + getInterestBearingMintConfigState, + updateRateInterestBearingMint, + amountToUiAmount, + mintTo, + createAssociatedTokenAccount, + getAccount, + AuthorityType, +} from "@solana/spl-token"; + +import { initializeKeypair, makeKeypairs } from "@solana-developers/helpers"; + +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +const payer = await initializeKeypair(connection); +const [otherAccount, mintKeypair] = makeKeypairs(2); +const mint = mintKeypair.publicKey; +const rateAuthority = payer; + +const rate = 32_767; + +// Create an interest-bearing token + +// Create an associated token account + +// Create the getInterestBearingMint function + +// Attempt to update the interest rate + +// Attempt to update the interest rate with the incorrect owner + +// Log the accrued interest + +// Log the interest-bearing mint configuration state + +// Update the rate authority and attempt to update the interest rate with the new authority +``` + +`index.ts` creates a connection to the specified validator node and calls +`initializeKeypair`. It also has a few variables we will be using in the rest of +this lab. The `index.ts` is where we'll end up calling the rest of our script +once we've written it. + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +`const connection = new Connection("http://127.0.0.1:8899", "confirmed");` + +#### 3. Helpers + +When we pasted the `index.ts` code from earlier, we added the following helpers: + +- `initializeKeypair`: This function creates the keypair for the `payer` and + also airdrops 1 testnet SOL to it +- `makeKeypairs`: This function creates keypairs without airdropping any SOL + +Additionally, we have some initial accounts: + +- `payer`: Used to pay for and be the authority for everything +- `mintKeypair`: Our mint that will have the `interest bearing token` extension +- `otherAccount`: The account we will use to attempt to update interest +- `otherTokenAccountKeypair`: Another token used for testing + +#### 4. Create Mint with interest bearing token + +This function is where we'll be creating the token such that all new tokens will +be created with an interest rate. Create a new file inside of `src` named +`token-helper.ts`. + +```typescript +import { + TOKEN_2022_PROGRAM_ID, + createInitializeInterestBearingMintInstruction, + createInitializeMintInstruction, +} from "@solana/spl-token"; +import { + sendAndConfirmTransaction, + Connection, + Keypair, + Transaction, + PublicKey, + SystemProgram, +} from "@solana/web3.js"; + +export async function createTokenWithInterestRateExtension( + connection: Connection, + payer: Keypair, + mint: PublicKey, + mintLen: number, + rateAuthority: Keypair, + rate: number, + mintKeypair: Keypair, +) { + const mintAuthority = payer; + const decimals = 9; +} +``` + +This function will take the following arguments: + +- `connection`: The connection object +- `payer`: Payer for the transaction +- `mint`: Public key for the new mint +- `rateAuthority`: Keypair of the account that can modify the token, in this + case, it is `payer` +- `rate`: Chosen interest rate for the token. In our case, this will be + `32_767`, or 32767, the max rate for the interest bearing token extension +- `mintKeypair`: Keypair for the new mint + +When creating an interest bearing token, we must create the account instruction, +add the interest instruction and initialize the mint itself. Inside of +`createTokenWithInterestRateExtension` in `src/token-helper.ts` there are a few +variables already created that will be used to create the interest bearing +token. Add the following code beneath the declared variables: + +```ts +const extensions = [ExtensionType.InterestBearingConfig]; +const mintLen = getMintLen(extensions); +const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLen); + +const mintTransaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mint, + space: mintLen, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeInterestBearingMintInstruction( + mint, + rateAuthority.publicKey, + rate, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMintInstruction( + mint, + decimals, + mintAuthority.publicKey, + null, + TOKEN_2022_PROGRAM_ID, + ), +); + +await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + undefined, +); +``` + +That's it for the token creation! Now we can move on and start adding tests. + +#### 5. Establish required accounts + +Inside of `src/index.ts`, the starting code already has some values related to +the creation of the interest bearing token. + +Underneath the existing `rate` variable, add the following function call to +`createTokenWithInterestRateExtension` to create the interest bearing token. +We'll also need to create an associated token account which we'll be using to +mint the interest bearing tokens to and also run some tests to check if the +accrued interest increases as expected. + +```typescript +const rate = 32_767; + +// Create interest bearing token +await createTokenWithInterestRateExtension( + connection, + payer, + mint, + rateAuthority, + rate, + mintKeypair, +); + +// Create associated token account +const payerTokenAccount = await createAssociatedTokenAccount( + connection, + payer, + mint, + payer.publicKey, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +### 6. Tests + +Before we start writing any tests, it would be helpful for us to have a function +that takes in the `mint` and returns the current interest rate of that +particular token. + +Let's utilize the `getInterestBearingMintConfigState` helper provided by the SPL +library to do just that. We'll then create a function that is used in our tests +to log out the current interest rate of the mint. + +The return value of this function is an object with the following values: + +- `rateAuthority`: Keypair of the account that can modify the token +- `initializationTimestamp`: Timestamp of interest bearing token initialization +- `preUpdateAverageRate`: Last rate before update +- `lastUpdateTimestamp`: Timestamp of last update +- `currentRate`: Current interest rate + +Add the following types and function: + +```typescript +// Create getInterestBearingMint function +interface GetInterestBearingMint { + connection: Connection; + mint: PublicKey; +} + +async function getInterestBearingMint(inputs: GetInterestBearingMint) { + const { connection, mint } = inputs; + // retrieves information of the mint + const mintAccount = await getMint( + connection, + mint, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + // retrieves the interest state of mint + const interestBearingMintConfig = + await getInterestBearingMintConfigState(mintAccount); + + // returns the current interest rate + return interestBearingMintConfig?.currentRate; +} +``` + +**Updating interest rate** + +The Solana SPL library provides a helper function for updating the interest rate +of a token named `updateRateInterestBearingMint`. For this function to work +correctly, the `rateAuthority` of that token must be the same one of which the +token was created. If the `rateAuthority` is incorrect, updating the token will +result in a failure. + +Let's create a test to update the rate with the correct authority. Add the +following function calls: + +```typescript +// Attempt to update interest rate +const initialRate = await getInterestBearingMint({ connection, mint }); +try { + await updateRateInterestBearingMint( + connection, + payer, + mint, + payer, + 0, // updated rate + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + const newRate = await getInterestBearingMint({ connection, mint }); + + console.log( + `✅ - We expected this to pass because the rate has been updated. Old rate: ${initialRate}. New rate: ${newRate}`, + ); +} catch (error) { + console.error("You should be able to update the interest."); +} +``` + +Run `npx esrun src/index.ts`. We should see the following error logged out in +the terminal, meaning the extension is working as intended and the interest rate +has been updated: +`✅ - We expected this to pass because the rate has been updated. Old rate: 32767. New rate: 0` + +**Updating interest rate with incorrect rate authority** + +In this next test, let's try and update the interest rate with the incorrect +`rateAuthority`. Earlier we created a keypair named `otherAccount`. This will be +what we use as the `otherAccount` to attempt the change the interest rate. + +Below the previous test we created add the following code: + +```typescript +// Attempt to update the interest rate as the account other than the rate authority. +try { + await updateRateInterestBearingMint( + connection, + otherAccount, + mint, + otherAccount, // incorrect authority + 0, // updated rate + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + console.log("You should be able to update the interest."); +} catch (error) { + console.error( + `✅ - We expected this to fail because the owner is incorrect.`, + ); +} +``` + +Now run `npx esrun src/index.ts`. This is expected to fail and log out +`✅ - We expected this to fail because the owner is incorrect.` + +**Mint tokens and read interest rate** + +So we've tested updating the interest rate. How do we check that the accrued +interest increases when an account mints more tokens? We can use the +`amountToUiAmount` and `getAccount` helpers from the SPL library to help us +achieve this. + +Let's create a for loop that 5 times and mints 100 tokens per loop and logs out +the new accrued interest: + +```typescript +// Log accrued interest +{ + // Logs out interest on token + for (let i = 0; i < 5; i++) { + const rate = await getInterestBearingMint({ connection, mint }); + await mintTo( + connection, + payer, + mint, + payerTokenAccount, + payer, + 100, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + const tokenInfo = await getAccount( + connection, + payerTokenAccount, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + // Convert amount to UI amount with accrued interest + const uiAmount = await amountToUiAmount( + connection, + payer, + mint, + tokenInfo.amount, + TOKEN_2022_PROGRAM_ID, + ); + + console.log( + `Amount with accrued interest at ${rate}: ${tokenInfo.amount} tokens = ${uiAmount}`, + ); + } +} +``` + +You should see something similar to the logs below: + +```typescript +Amount with accrued interest at 32767: 100 tokens = 0.0000001000000207670422 +Amount with accrued interest at 32767: 200 tokens = 0.0000002000000623011298 +Amount with accrued interest at 32767: 300 tokens = 0.0000003000001246022661 +Amount with accrued interest at 32767: 400 tokens = 0.00000040000020767045426 +Amount with accrued interest at 32767: 500 tokens = 0.0000005000003634233328 +``` + +As you can see, the interest rate increases as more tokens are minted! + +**Log mint config** + +If for some reason you need to retrieve the mint config state, we can utilize +the `getInterestBearingMintConfigState` function we created earlier to display +information about the interest bearing mint state. + +```ts +// Log interest bearing mint config state +const mintAccount = await getMint( + connection, + mint, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +// Get Interest Config for Mint Account +const interestBearingMintConfig = + await getInterestBearingMintConfigState(mintAccount); + +console.log( + "\nMint Config:", + JSON.stringify(interestBearingMintConfig, null, 2), +); +``` + +This should log out something that looks similar to this: + +```typescript +Mint Config: { + "rateAuthority": "Ezv2bZZFTQEznBgTDmaPPwFCg7uNA5KCvMGBNvJvUmS", + "initializationTimestamp": 1709422265, + "preUpdateAverageRate": 32767, + "lastUpdateTimestamp": 1709422267, + "currentRate": 0 +} +``` + +### Update rate authority and interest rate + +Before we conclude this lab, let's set a new rate authority on the interest +bearing token and attempt to update the interest rate. We do this by using the +`setAuthority` function and passing in the original authority, specifying the +rate type (in this case it is 7 for `InterestRate`) and passing the new +authority's public key. + +Once we set the new authority, we can attempt to update the interest rate. + +```typescript +// Update rate authority and attempt to update interest rate with new authority +try { + await setAuthority( + connection, + payer, + mint, + rateAuthority, + AuthorityType.InterestRate, // Rate type (InterestRate) + otherAccount.publicKey, // new rate authority, + [], + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await updateRateInterestBearingMint( + connection, + payer, + mint, + otherAccount, // new authority + 10, // updated rate + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + const newRate = await getInterestBearingMint({ connection, mint }); + + console.log( + `✅ - We expected this to pass because the rate can be updated with the new authority. New rate: ${newRate}`, + ); +} catch (error) { + console.error( + `You should be able to update the interest with new rate authority.`, + ); +} +``` + +This is expected to work and the new interest rate should be 10. + +Thats it! We've just created an interest bearing token, updated the interest +rate and logged the updated state of the token! + +## Challenge + +Create your own interest bearing token. diff --git a/content/courses/token-extensions/intro-to-token-extensions-program.mdx b/content/courses/token-extensions/intro-to-token-extensions-program.mdx new file mode 100644 index 000000000..545225bb9 --- /dev/null +++ b/content/courses/token-extensions/intro-to-token-extensions-program.mdx @@ -0,0 +1,483 @@ +--- +title: Intro to Token Extensions +objectives: + - Learn about the Token Extensions Program + - Understand that mints that use Token Extensions must be created with the + Token Extensions Program + - Learn about the wide variety of token extensions + - Use token extensions from the Solana CLI +description: + "Learn what token extensions are, and how to create tokens that use their + features." +--- + +## Summary + +- The existing Token Program on Solana provides interfaces for fungible and + non-fungible tokens. However as new features have been needed, various forks + of Token Program have been created to add features, posing adoption challenges + across the ecosystem. +- To introduce new token features without disrupting current users, wallets, and + decentralized applications (dApps), and to ensure the safety of existing + tokens, a new token program, Token Extensions Program (also called + Token-2022), has been developed. +- Token Extensions Program is a separate program with a separate address from + the original Token Program. It supports the exact same functions plus + additional ones through extensions. + +## Overview + +The Token Extensions Program, previously know as Token 2022, is a superset of +the functionality provided by the original Token Program. The Token Program +serves most needs for fungible and non-fungible tokens through a simple set of +interfaces and structures. Though simple and performant, the Token Program +lacked features that the developer community soon found need of. This +necessitated forks of the Token Program, potentially splitting the ecosystem. + +For example, say a university wants to send an NFT version of a diploma to a +graduate's wallet. How can we be sure that diploma is never transferred away to +a third party? In the current Token Program, this is not possible - we would +need a check in the transfer instruction that rejects all transactions. One +solution to this would be fork the Token Program, and add the check. However +this means it would be an entirely separate token program. The university would +have to run a campaign for wallets and degree-checking dapps to adopt it. +Additionally, what if different universities want different functionality? There +would have to be some sort of University DAO just to manage these internal +debates - maybe there would be several University DAOs... Or they could just use +the `non-transferable token` extension in the new Token Extension Program. Which +is a core Solana program, adopted by everyone. + +This is why the Token Extension Program was created, to vastly improve the +functionality and customization of the most wanted and requested features from +the original Token Program. And it does so by 100% supporting all of the +functions developers are used to in the original and leaves room for future +improvements. Though it is a second program, two programs are much easier to +adopt than dozens. + +This being said, the Token Extensions Program, is deployed to a separate +address. Even if the interfaces of these two programs are same, the addresses of +these programs are **not interchangeable** in any case. Meaning a token created +with the Token Program, cannot interact with the Token Extension Program. As a +result, if we want to add support for Token Extensions program, our client +application will need some extra logic to differentiate between the tokens owned +by these two programs. + +Last note - The Token Extension Program does not completely replace the Token +Program, if the use-case of a particular token is very simple, it may not need +extensions. In this case, the original Token Program would be ever-so-slightly +preferable to use since the program does not need to go through any of the +additional extension checks. + +### Extensions + +The extensions of the Token Extension Program are just that, extensions. Meaning +any extra data needed for the extension is tagged at the end of the Mint and +Token accounts that we're familiar with. This is crucial for the interfaces of +the Token Program and Token Extension Program to match up. + +As of writing there are +[16 extensions](https://spl.solana.com/token-2022/extensions), four on the Token +accounts, and 12 on the Mint accounts: + +**Account extensions** currently include: + +- **Required memos** This extension makes it mandatory to have a memo on on all + transfers, just like traditional banking systems. + +- **Immutable ownership** A token account's owner can normally transfer + ownership to any other address, which is useful in many scenarios but can lead + to security vulnerabilities, especially when dealing with Associated Token + Accounts (ATAs). To avoid these issues, we can use this extension which makes + it impossible to reassign account ownership. + + + +All Token Extension Program ATAs have the immutable +ownership extension baked in. + + + +- **Default account state** Mint creators can use this extension which forces + all new token accounts to be frozen. This way, users must eventually interact + with some type of service to unfreeze their accounts and use the tokens. + +- **CPI guard** This extension safeguards users against authorizing actions that + are not visible to them, specifically targeting concealed programs that are + neither the System nor Token programs. It does this by restricting certain + activities within cross-program invocations. + +**Mint extensions** include: + +- **Transfer fees** The Token Extension Program implements transfer fees at the + protocol level, deducting a certain amount from each transfer to the + recipient's account. This withheld amount is inaccessible to the recipient, + and is redeemable by whatever address the mint creator dictates. + +- **Closing mint** Under the Token Program, only token accounts could be closed. + However, the introduction of the close authority extension now allows for the + closure of mint accounts as well. + + + +To close a mint account, the supply has to be 0. So all +tokens minted must be burned. + + + +- **Interest-bearing tokens** Tokens which have constantly fluctuating values, + showing the updated values in clients requires proxies that require regular + rebase or update operations. With this extension, we can change how the UI + amount of tokens are represented by setting an interest rate on the token and + fetching it's amount with interest at any time. Note, the interest on the + token is purely aesthetic and does not change the amount of tokens within an + account. + +- **Non-transferable tokens** This extension enables the creation of tokens that + are "bound" to their owner, meaning they cannot be transferred to others. + +- **Permanent delegate** This extension allows us to specify a permanent + delegate for a mint. This authority has unlimited delegate privileges over any + token account of that mint. This means that it can burn or transfer any amount + of tokens from any account. Permanent delegate can be used for example by + membership programs to revoke access tokens, or by stablecoin issuers to + revoke balances owned by sanctioned entities. This Extension is powerful and + dangerous. + +- **Transfer hook** This extension allows token creators to have more control + over how their tokens are transferred, by allowing a callback "hook" function + onchain. The creators must develop and deploy a program that implements the + hook interface and then configure their token mint to use their program. Then, + on any transfer of that mint, the transfer hook will be called. + +- **Metadata pointer** A mint can have multiple different accounts claiming to + describe the mint. This extension allows the token creator to designate an + address that describes the canonical metadata. The pointer can be an external + account, like a Metaplex metadata account, or if using the metadata extension, + self pointing. + +- **Metadata** This extension allows a mint creator to include their token's + metadata directly into the mint account. This is always used in conjunction + with the metadata pointer extension. + +- **Group pointer** Think of a group of tokens much like a "collection" of + tokens. More specifically, in an NFT collection, the mint with the group + pointer extension would be considered to be the collection NFT. This extension + contains a pointer to an account that conforms to the + [Token-Group Interface](https://github.com/solana-labs/solana-program-library/tree/master/token-group/interface). + +- **Group** This stores the + [group information](https://github.com/solana-labs/solana-program-library/tree/master/token-group/interface) + within the mint itself. It is always used in conjunction with the group + pointer extension. + +- **Member pointer** The inverse of the group pointer is the member pointer. + This pointer points to an account that holds the member data, like which group + it's a part of. In a collection of NFTs, these would be the NFTs in the + collection. + +- **Member** This stores the member information within the mint itself. It's + always used in conjunction with the member pointer extension. + +- **Confidential transfers** This extension enhances privacy of the transactions + without revealing key details of the transaction such as the amount. + + + +These extensions can be mixes and matched to make a +plethora of highly functional tokens. + + + +We'll dig deeper into each extension in separate lessons. + +## Things to consider when working with both Token Program and Token Extension Program + +Although the interfaces for both of these programs remain consistent, they are +two different programs. The program IDs of these programs are not +interchangeable, and the addresses created by using them are different. If you +want to support both Token Program tokens and Token Extension Program tokens, +you must add extra logic on the client side and program side. We will dive into +these implementations in later lessons. + +## Lab + +Now, we will test out some of these extensions using the `spl-token-cli` CLI. + +#### 1. Getting Started + +Before we can use the extensions, we need to install the `spl-token-cli`. Follow +the instructions in [this guide](https://spl.solana.com/token#setup). After the +installation, verify it by running the following command: + +```bash +spl-token --version +``` + + + +Make sure you follow each step in the +[guide above](https://spl.solana.com/token#setup) as it also describes how to +initialize a local wallet and airdrop SOL. + + + +#### 2. Creating a mint with close authority + +Let's create a mint account with the close authority extension, and then, to +show that it works, we'll close the mint! + +Let's create a mint with close authority extension using the CLI: + +This extension requires following arguments: + +- `create-token` : The instruction that we want to execute. +- `--program-id` : This flag is used to specified which program ID to use. + `TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb` is the public address at which + the Token Extension Program is deployed. +- `--enable-close` : This flag specifies that we want to initialize the mint + with close authority. + +Run the following command: + +```bash +spl-token create-token --program-id TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb --enable-close +``` + +We will see output similar to as shown below: + +```bash +Creating token 3s6mQcPHXqwryufMDwknSmkDjtxwVujfovd5gPQLvKw9 under program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb + +Address: 3s6mQcPHXqwryufMDwknSmkDjtxwVujfovd5gPQLvKw9 +Decimals: 9 + +Signature: fGQQ1eAGsnKN11FUcFhGuacpuMTGYwYEfaAVBUys4gvH4pESttRgjVKzTLSfqjeQ5rNXP92qEyBMaFFNTVPMVAD +``` + +To view details about the newly created mint, we can use the `display` command. +This command will show relent details for a token mint, account or multisig. +Let's pass it mint address of the previous step. + +```bash +spl-token display +``` + +Now that we have a mint, we can close it with the following where +`` is the resulting address from the previous step. + +```bash +spl-token close-mint +``` + + + +By closing the account, we reclaim the rent lamports on the +mint account. Remember, the supply on the mint must be zero. + + + +As a challenge, repeat this process, but before closing the mint account, mint +some tokens and then try to close it - see what happens. (Spoiler, it'll fail) + +#### 3. Creating a token account with immutable owner + +Let's test out another extension, a Token account extension this time. We'll +create a new mint, and then we'll create an associated token account using the +immutable owner extension. + +First let's create a new vanilla mint with no additional extensions: + +```bash +spl-token create-token --program-id TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb +``` + +You should get something similar to this: + +```bash +Creating token FXnaqGm42aQgz1zwjKrwfn4Jk6PJ8cvkkSc8ikMGt6EU under program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb + +Address: FXnaqGm42aQgz1zwjKrwfn4Jk6PJ8cvkkSc8ikMGt6EU +Decimals: 9 + +Signature: 3tX6FHvE24e8UHqSWbK5HRpBFxtCnDTRHASFZtipKkTzapgMGZEeNJ2zHAHSrSUs8L8wQGnLbvJiLrHuomyps39j +``` + +Save the resulting mint address, we'll use it in the next step. + +Now let's mint one of these tokens to an associated token account (ATA) that +uses the `immutable owner` extension. By default all ATAs enable the +`immutable owner` extension. And all token accounts made with the CLI will be +ATAs, so `immutable owner` will be enabled. + +This extension requires following arguments: + +- `create-account`: The instruction that we want to execute. +- `--program-id` (optional): The program ID we want to use. This is optional + because the CLI will figure out the owning program of the mint. +- `--owner` (optional): Public key of the owner's wallet. It will default to the + current working public key which we can get by running the command + `solana address`. +- `--fee-payer` (optional): Keypair of the wallet paying for the transaction. It + will default to the current working keypair, which can be found with + `solana config get`. +- ``: this is the mint account we got from the + `create-token` command. + +Run the following command to create the associated token account with the +immutable owner extension: + +```bash +spl-token create-account +``` + +After running this command, we will see out similar to as shown below. + +```bash +Creating account F8iDrVskLGwYo53SdJnvBKTpN1C7hobgnPQMq6hLivUn + +Signature: 5zX73E2aFVwcsvhCgBSF6AxWqydWYk3KJaTmeS4AY22FwCvgEvnodvJ7fzvBHZptqv3FMz6tbLFR5LbmiUHLUkne +``` + +Now we can mint some tokens to it with the `mint` function. Here are the +arguments we have to provide: + +- `mint`: The instruction +- ``: The address of the mint we got from the first step +- ``: Amount to mint in tokens +- ``(optional): This is the token account used + to hold the tokens we created in the previous step. However, this defaults to + the ATA of our current working keypair and mint. So this will automatically + use the account from our last step. + +```bash +spl-token mint +``` + +This will result in something like the following: + +```bash +Minting 1 tokens + Token: FXnaqGm42aQgz1zwjKrwfn4Jk6PJ8cvkkSc8ikMGt6EU + Recipient: 8r9VNjnLqjzrpgkcgCozgvCBDQwWWYUL7RKwatSWnd6B + +Signature: 54yREwGCH8YfYXqEf6gRKGou681F8NkToAJZvJqM5qZETJokRkdTb8s8HVkKPeVMQQcc8gCZkq4Kxx3YbLtY9Frk +``` + +Feel free to use the `spl-token display` command to get some info about the mint +and token account. + +#### 4. Creating a non-transferrable ("soul-bound") NFT + +Lastly, let's create an NFT which will be non-transferable, sometimes called a +'soul-bound' NFT. Think of it as a achievement token which is exclusively owned +by one person or account. For creating this token, we will use three extensions: +metadata pointer, metadata and non-transferable token. + +With the metadata extension, we can include metadata directly in the mint +account and the non-transferable extension makes the token exclusive to the +account. + +The command takes following arguments: + +- `create-token`: The instruction that we want to execute. +- `--program-id`: The program ID we want to use. +- `--decimals`: NFTs are usually whole, and have 0 decimals +- `--enable-metadata`: The metadata extension flag. (This initializes the + metadata and metadata pointer extensions) +- `--enable-non-transferable`: The non-transferable extension flag. + +Run the following command to create a token initialized with the metadata and +non-transferrable extensions. + +```bash +spl-token create-token --program-id TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb --decimals 0 --enable-metadata --enable-non-transferable +``` + +We will see output similar to as shown below. + +```bash +Creating token GVjznwtfPndL9RsBtAYDFT1H8vhQjx8ymAB1rbd17qPr under program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb +To initialize metadata inside the mint, please run `spl-token initialize-metadata GVjznwtfPndL9RsBtAYDFT1H8vhQjx8ymAB1rbd17qPr `, and sign with the mint authority. + +Address: GVjznwtfPndL9RsBtAYDFT1H8vhQjx8ymAB1rbd17qPr +Decimals: 0 + +Signature: 5EQ95NPTXg5reg9Ybcw9LQRjiWFZvfb9WqJidxu6kKbcKGajp1U999ioToC1qC88KUS4kdUi6rZbibqjgJbzYses +``` + +After creating the mint with metadata extension, we need to initialize the +metadata as specified in the output above. Initializing metadata takes the +following arguments: + +- Mint address : Address of the mint to initialize the metadata for. +- ``: Name of the token +- ``: Symbol by which the token will be identified. +- ``: URI for the token. +- `--update-authority` (optional): The address of the account with the authority + to update the metadata. This will default to the current working public key. + +Run the following command to initialize the metadata: + +```bash +spl-token initialize-metadata MyToken TOK http://my.tokn +``` + +Now, let's take a look at the metadata by calling our trusty `display` command. + +```bash +spl-token display +``` + +Next, let's update the metadata for that mint. We will be updating the name of +our token. Run the following command: + +```bash +spl-token update-metadata name MyAwesomeNFT +``` + +Now let's see how we can add a custom field to our mint's metadata. This command +takes the following arguments: + +- Mint address : Address of the mint to update metadata for. +- Custom field name : Name of the new custom field. +- Custom field value : Value of the new custom field. + +Run the following command: + +```bash +spl-token update-metadata new-field new-value +``` + +We can also remove the custom fields from the mint metadata. Run the following +command: + +```bash +spl-token update-metadata new-field --remove +``` + +Lastly, lets make it a real non-transferrable NFT. We do this by minting the NFT +to our ATA and then removing the mint authority. This way the supply will only +be one. + +```bash +spl-token create-account +spl-token mint 1 +spl-token authorize mint --disable +``` + +Now we have successfully created a non-transferrable NFT which is exclusively +owned by our ATA. + +That's it! This is how we can use the Solana CLI with Token Extension Program to +use the extensions. We will dive deeper into these extensions in separate +lessons and see how we can use them programmatically. + +## Challenge + +Go and try out different combinations of extensions using the CLI. + +Hint: Take a look at your options by calling commands with the `--help` flag: + +```bash +spl-token --create-token --help +``` diff --git a/content/courses/token-extensions/meta.json b/content/courses/token-extensions/meta.json new file mode 100644 index 000000000..a5a8f5020 --- /dev/null +++ b/content/courses/token-extensions/meta.json @@ -0,0 +1,19 @@ +{ + "pages": [ + "intro-to-token-extensions-program", + "token-extensions-in-the-client", + "token-extensions-onchain", + "token-extensions-metadata", + "non-transferable-token", + "transfer-fee", + "close-mint", + "default-account-state", + "cpi-guard", + "group-member", + "immutable-owner", + "interest-bearing-token", + "permanent-delegate", + "required-memo", + "transfer-hook" + ] +} diff --git a/content/courses/token-extensions/non-transferable-token.mdx b/content/courses/token-extensions/non-transferable-token.mdx new file mode 100644 index 000000000..34bc4dfe7 --- /dev/null +++ b/content/courses/token-extensions/non-transferable-token.mdx @@ -0,0 +1,456 @@ +--- +title: Non-Transferable Token +objectives: + - Create non-transferable token + - Mint a non-transferable token + - Attempt to transfer the non-transferable token +description: + "Create tokens that can't be transferred. Certificates, identity, ticketing + and more." +--- + +## Summary + +- In the original Token Program, creating non-transferrable (sometimes called + "soul-bound") tokens is impossible +- The Token Extension Program's `non-transferrable token` enables + non-transferrable tokens + +## Overview + +In the Token Program, it's impossible to create a token that cannot be +transferred away. While this may seem unimportant, there are several reasons one +may want to issue a non-transferrable (or "soul-bound") token. + +Take the following example: Say you are a Solana game dev, and your new game, +"Bits and Bytes", wants to award achievements to the players. Achievements are +not transferrable, and you want their hard work to be proudly displayed in their +wallet. The solution is to send them a non-transferable NFT. However, in the +Token Program, this is not possible. However, it is in the Token Extension +Program! Enter, the `non-transferable` extension. + +Token Extension Program has the `non-transferable` extension which can be used +to create non-transferable mints. These mints can be burned, but they can't be +transferred. + +### Creating non-transferable mint account + +Initializing a non-transferable mint involves three instruction: + +- `SystemProgram.createAccount` +- `createInitializeNonTransferableMintInstruction` +- `createInitializeMintInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. This instruction accomplishes three things: + +- Allocates `space` +- Transfers `lamports` for rent +- Assigns to itself it's owning program + +Like all other extensions, you'll need to calculate the space and lamports +needed for the mint account. You can do this by calling: `getMintLen` and +`getMinimumBalanceForRentExemption`. + +```ts +const extensions = [ExtensionType.NonTransferable]; +const mintLength = getMintLen(extensions); + +const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second instruction `createInitializeNonTransferableMintInstruction` +initializes the non-transferable extension. + +```ts +const initializeNonTransferableMintInstruction = + createInitializeNonTransferableMintInstruction( + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ); +``` + +The third instruction `createInitializeMintInstruction` initializes the mint. + +```ts +const initializeMintInstruction = createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + null, // Confirmation Config + TOKEN_2022_PROGRAM_ID, +); +``` + +Lastly, add all of the instructions to a transaction and send to Solana. + +```ts +const mintTransaction = new Transaction().add( + createAccountInstruction, + initializeNonTransferableMintInstruction, + initializeMintInstruction, +); + +await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, +); +``` + +And that's it! You now have a mint account, that when minted, cannot be +transferred. This extension gets more exciting when you mix it with the +`metadata` and `metadata-pointer` extensions to create soul-bound NFTs. + +## Lab + +In this lab, we will create a non-transferable token and then see what happens +when we try to transfer it (hint: it will fail the transfer). + +#### 1. Getting started + +To get started, create an empty directory named `non-transferable-token` and +navigate to it. We'll be initializing a brand new project. Run `npm init` and +follow through the prompts. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun dotenv typescript +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { Connection, Keypair } from "@solana/web3.js"; +import { initializeKeypair } from "@solana-developers/helpers"; +import dotenv from "dotenv"; +import { + createAccount, + mintTo, + TOKEN_2022_PROGRAM_ID, +} from "@solana/spl-token"; +// import { createNonTransferableMint } from './create-mint'; +dotenv.config(); + +/** + * Create a connection and initialize a keypair if one doesn't already exists. + * If a keypair exists, airdrop a sol if needed. + */ +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +const payer = await initializeKeypair(connection); + +console.log(`public key: ${payer.publicKey.toBase58()}`); + +const mintKeypair = Keypair.generate(); +const mint = mintKeypair.publicKey; +console.log("\nmint public key: " + mintKeypair.publicKey.toBase58() + "\n\n"); + +// CREATE MINT + +// CREATE SOURCE ACCOUNT AND MINT TOKEN + +// CREATE DESTINATION ACCOUNT FOR TRANSFER + +// TRY TRANSFER +``` + +This file has a main function that creates a connection to the specified +validator node and calls `initializeKeypair`. This main function is where we'll +end up calling the rest of our script once we've written it. + +Go ahead and run the script. You should see the `mint` public key logged to your +terminal. + +```bash +esrun src/index.ts +``` + +If you run into an error in `initializeKeypair` with airdropping, follow the +next step. + +#### 2. Setting up dev environment (optional) + +If you are having issues with airdropping devnet SOL. You can either: + +1. Add the `keypairPath` parameter to `initializeKeypair` and get some devnet + SOL from [Solana's faucet.](https://faucet.solana.com/) +2. Run a local validator by doing the following: + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +```typescript +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +``` + +#### 3. Create a non-transferable mint + +Let's create the function `createNonTransferableMint` in a new file +`src/create-mint.ts`. + +Inside the file, create the function `createNonTransferableMint` with the +following arguments: + +- `connection` : The connection object +- `payer` : Payer for the transaction +- `mintKeypair` : Keypair for new mint +- `decimals` : Mint decimals + +Inside the function, we'll call the following: + +- `getMintLen` - to get the space needed for the mint account +- `getMinimumBalanceForRentExemption` - to get the amount of lamports needed for + the mint account +- `createAccount` - Allocates space on the blockchain for the mint account +- `createInitializeNonTransferableMintInstruction` - initializes the extension +- `createInitializeMintInstruction` - initializes the mint +- `sendAndConfirmTransaction` - sends the transaction to the blockchain + +```typescript +import { + sendAndConfirmTransaction, + Connection, + Keypair, + SystemProgram, + Transaction, + TransactionSignature, +} from "@solana/web3.js"; + +import { + ExtensionType, + createInitializeMintInstruction, + getMintLen, + TOKEN_2022_PROGRAM_ID, + createInitializeNonTransferableMintInstruction, +} from "@solana/spl-token"; + +export async function createNonTransferableMint( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number, +): Promise { + const extensions = [ExtensionType.NonTransferable]; + const mintLength = getMintLen(extensions); + + const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + + console.log("Creating a transaction with non-transferable instruction..."); + const mintTransaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeNonTransferableMintInstruction( + mintKeypair.publicKey, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + null, + TOKEN_2022_PROGRAM_ID, + ), + ); + + const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, + ); + + return signature; +} +``` + +Now let's invoke this function in `src/index.ts` to create the non-transferable +mint: + +```typescript +// CREATE MINT +const decimals = 9; + +await createNonTransferableMint(connection, payer, mintKeypair, decimals); +``` + +The script should run with no errors + +```bash +esrun src/index.ts +``` + +The non-transferable mint has been set up correctly and will be created when we +run `npm start`. Let's move on to the next step and create a source account and +mint a token to it. + +#### 4. Mint token + +Let's test that we can't actually transfer tokens created from this mint. To do +this, we need to mint a token to an account. + +Let's do this in `src/index.ts`. Let's create a source account and mint one +non-transferable token. + +We can accomplish this in two functions: + +- `getOrCreateAssociatedTokenAccount`: from the `@solana/spl-token` library, + this creates an associated token account (ATA) for the given mint and owner. +- `mintTo`: This function will mint an `amount` of tokens to the given token + account. + +```typescript +// CREATE PAYER ATA AND MINT TOKEN +console.log("Creating an Associated Token Account..."); +const ata = ( + await getOrCreateAssociatedTokenAccount( + connection, + payer, + mint, + payer.publicKey, + undefined, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ) +).address; + +console.log("Minting 1 token..."); + +const amount = 1 * 10 ** decimals; +await mintTo( + connection, + payer, + mint, + ata, + payer, + amount, + [payer], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); +const tokenBalance = await connection.getTokenAccountBalance(ata, "finalized"); + +console.log( + `Account ${ata.toBase58()} now has ${tokenBalance.value.uiAmount} token.`, +); +``` + +Run the script and confirm a token has been minted to an account: + +```bash +esrun src/index.ts +``` + +#### 5. Attempt to transfer a non-transferable token + +Lastly, let's try and actually transfer the token somewhere else. First we need +to create a token account to transfer to, and then we want to try and transfer. + +In `src/index.ts`, we will create a destination account and try to transfer the +non-transferable token to this account. + +We can accomplish this in two functions: + +- `createAccount`: This will create a token account for a given mint and the + keypair of said account. So instead of using an ATA here, let's generate a new + keypair as the token account. We're doing this just to show different options + of accounts. +- `transferChecked`: This will attempt to transfer the token. + +First, the `createAccount` function: + +```typescript +// CREATE DESTINATION ACCOUNT FOR TRANSFER +console.log("Creating a destination account...\n\n"); +const destinationKeypair = Keypair.generate(); +const destinationAccount = await createAccount( + connection, + payer, + mintKeypair.publicKey, + destinationKeypair.publicKey, + undefined, + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); +``` + +Now, the `transferChecked` function: + +```typescript +// TRY TRANSFER +console.log("Attempting to transfer non-transferable mint..."); +try { + const signature = await transferChecked( + connection, + payer, + ata, + mint, + destinationAccount, + ata, + amount, + decimals, + [destinationKeypair], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, + ); +} catch (e) { + console.log( + "This transfer is failing because the mint is non-transferable. Check out the program logs: ", + (e as any).logs, + "\n\n", + ); +} +``` + +Now let's run everything and see what happens: + +``` +esrun src/index.ts +``` + +You should get an error message at the very end that says +`Transfer is disabled for this mint`. This is indicating that the token we are +attempting to transfer is in fact non-transferable! + +```bash +Attempting to transfer non-transferable mint... +This transfer is failing because the mint is non-transferable. Check out the program logs: [ + 'Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [1]', + 'Program log: Instruction: TransferChecked', + 'Program log: Transfer is disabled for this mint', + 'Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 3910 of 200000 compute units', + 'Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb failed: custom program error: 0x25' +] +``` + +That's it! We have successfully created a non-transferable mint. If you are +stuck at any point, you can find the working code on the `solution` branch of +[this repository](https://github.com/Unboxed-Software/solana-lab-non-transferable-token/tree/solution). + +## Challenge + +For the challenge, create your own non-transferable token with the metadata +extension and keep a “soulbound” NFT to yourself. diff --git a/content/courses/token-extensions/permanent-delegate.mdx b/content/courses/token-extensions/permanent-delegate.mdx new file mode 100644 index 000000000..2474c1cfe --- /dev/null +++ b/content/courses/token-extensions/permanent-delegate.mdx @@ -0,0 +1,1021 @@ +--- +title: Permanent Delegate +objectives: + - Create a mint with a permanent delegate + - Explain the use cases of permanent delegate + - Experiment with the rules of the extension +description: + "Create a token than can be permanently transferred or burned by a particular + account." +--- + +## Summary + +- The permanent delegate holds global ownership over all token accounts + associated with the mint +- The permanent delegate has unrestricted permissions to transfer and burn + tokens from any token account of that mint +- This delegate role designates a trusted entity with comprehensive control. + Common use cases include sanction compliance and revocable access tokens. +- With this level of access, the permanent delegate can carry out high-level + administrative functions, such as reassigning tokens, managing token supplies, + and directly implementing specific policies or rules on the token accounts. + +## Overview + +The `permanent delegate` extension allows a `permanent delegate` for all tokens +of the mint. This means one address is capable of transferring or burning any +token of that mint, from any token account. This makes the extension very +powerful but can also be very risky. It gives a single address complete control +over the token supply. This can be good for things like automatic payments, +recovering drained wallets, and refunds. However, it's a double-edged sword, the +`permanent delegate` could be stolen or abused. In the words of Uncle Ben, "With +great power, comes great responsibility." + +Imagine a Solana based AirBnb, where NFTs that use permanent delegate are used +as the keys to unlock the door. When you check-in, the NFT key will be +transferred to you and you'll be able to enjoy your stay. At the end of your +stay, the owner will transfer it back from you to them - since they are the +`permanent delegate`. What happens if your wallet gets drained, or you lose +access to the key? No worries, the owner can transfer the key from any account +back to you! But on the other end, say the owner doesn't want you staying there +anymore, they can revoke it at any time, and you'd be locked out. In this way, +the permanent delegate extension is a double-edged sword. + +This all being said - the `permanent delegate` is a very exciting extension that +adds a world of possibilities to Solana tokens. + +### Initializing a permanent delegate to mint + +Initializing a permanent delegate token involves three instructions: + +- `SystemProgram.createAccount` +- `createInitializePermanentDelegateInstruction` +- `createInitializeMintInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. This instruction accomplishes three things: + +- Allocates space +- Transfers lamports for rent +- Assigns to its owning program + +```typescript +SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mint, + space: mintLen, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, +}), +``` + +The second instruction `createInitializePermanentDelegateInstruction` +initializes the permanent delegate extension. The defining argument that +dictates the permanent delegate will be a variable we create named +`permanentDelegate`. + +```typescript +createInitializePermanentDelegateInstruction( + mint, + permanentDelegate.publicKey, + TOKEN_2022_PROGRAM_ID, +), +``` + +The third instruction `createInitializeMintInstruction` initializes the mint. + +```typescript +createInitializeMintInstruction( + mint, + decimals, + mintAuthority.publicKey, + null, + TOKEN_2022_PROGRAM_ID, +); +``` + +When the transaction with these three instructions is sent, a new permanent +delegate token is created with the specified configuration. + +### Transferring tokens as delegate + +The `transferChecked` function enables the permanent delegate to securely +transfer tokens between accounts. This function makes sure that the token +transfer adheres to the mint's configured rules and requires the delegate to +sign the transaction. + +```ts +/** + * Approve a delegate to transfer up to a maximum number of tokens from an account, asserting the token mint and decimals + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param mint Address of the mint + * @param account Address of the account + * @param delegate Account authorized to perform a transfer tokens from the source account + * @param owner Owner of the source account + * @param amount Maximum number of tokens the delegate may transfer + * @param decimals Number of decimals in approved amount + * @param multiSigners Signing accounts if `owner` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ +await transferChecked( + connection, + payer, + bobAccount, + mint, + carolAccount, + permanentDelegate, + amountToTransfer, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +### Burning tokens as delegate + +The `burnChecked` function allows the permanent delegate to burn tokens from any +token account of the mint. This function makes sure that the burn operation +complies with the mint's rules and requires the delegate to sign the +transaction. + +```ts +/** + * Burn tokens from an account, asserting the token mint and decimals + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param account Account to burn tokens from + * @param mint Mint for the account + * @param owner Account owner + * @param amount Amount to burn + * @param decimals Number of decimals in amount to burn + * @param multiSigners Signing accounts if `owner` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ +await burnChecked( + connection, + payer, + bobAccount, + mint, + permanentDelegate, + amountToBurn, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +### Assign permissions to a new delegate + +The `approveChecked` function approves a delegate to transfer or burn up to a +maximum number of tokens from an account. This allows the designated delegate to +perform token transfers on behalf of the account owner up to the specified +limit. + +```ts +/** + * Approve a delegate to transfer up to a maximum number of tokens from an account, asserting the token mint and + * decimals + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param mint Address of the mint + * @param account Address of the account + * @param delegate Account authorized to perform a transfer tokens from the source account + * @param owner Owner of the source account + * @param amount Maximum number of tokens the delegate may transfer + * @param decimals Number of decimals in approved amount + * @param multiSigners Signing accounts if `owner` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ + +// Approve new delegate to perform actions +await approveChecked( + connection, + payer, + mint, + bobAccount, + delegate.publicKey, + bob, + amountToApprove, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +// Newly assigned delegate can now transfer from an account +await transferChecked( + connection, + payer, + bobAccount, + mint, + carolAccount, + carol, + amountToTransfer, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +## Lab + +In this lab, we'll explore the functionality of the `permanent delegate` +extension by creating a mint account with a permanent delegate and testing +various interactions with token accounts associated with that mint. + +#### 1. Setup Environment + +To get started, create an empty directory named `permanent-delegate` and +navigate to it. We'll be initializing a brand new project. Run `npm init` and +follow through the prompts. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { + sendAndConfirmTransaction, + Connection, + Keypair, + SystemProgram, + Transaction, + PublicKey, +} from "@solana/web3.js"; + +import { + ExtensionType, + createInitializeMintInstruction, + createInitializePermanentDelegateInstruction, + mintTo, + createAccount, + getMintLen, + TOKEN_2022_PROGRAM_ID, + transferChecked, +} from "@solana/spl-token"; +import { initializeKeypair } from "@solana-developers/helpers"; + +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +const payer = await initializeKeypair(connection); + +const mintAuthority = payer; +const mintKeypair = Keypair.generate(); +const mint = mintKeypair.publicKey; +const permanentDelegate = payer; + +const extensions = [ExtensionType.PermanentDelegate]; +const mintLen = getMintLen(extensions); + +const decimals = 9; +const amountToMint = 100; +const amountToTransfer = 10; +const amountToBurn = 5; + +// Create mint account with permanent delegate + +// Create delegate and destination token accounts + +// Mint tokens to accounts + +// Attempt to transfer with correct delegate + +// Attempt to transfer without correct delegate + +// Attempt to transfer from one account to another with correct delegate + +// Attempt to burn with correct delegate + +// Attempt to burn without correct delegate + +// Grant permission to an account to transfer tokens from a different token account + +// Try to transfer tokens again with Carol as the delegate, overdrawing her allotted control +``` + +`index.ts` creates a connection to the specified validator node and calls +`initializeKeypair`. It also has a few variables we will be using in the rest of +this lab. The `index.ts` is where we'll end up calling the rest of our script +once we've written it. + +If you run into an error in `initializeKeypair` with airdropping, follow the +next step. + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +```typescript +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +``` + +Alternatively, if you'd like to use testnet or devnet, import the +`clusterApiUrl` from `@solana/web3.js` and pass it to the connection as such: + +```typescript +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +``` + +#### 3. Helpers + +When we pasted the `index.ts` code from earlier, we added the following helpers: + +- `initializeKeypair`: This function creates the keypair for the `payer` and + also airdrops some SOL to it +- `makeKeypairs`: This function creates keypairs without airdropping any SOL + +Additionally we have some initial accounts and variables that will be used to +test the `permanent delegate` extension! + +#### 4. Create Mint with permanent delegate + +When creating a mint token with default state, we must create the account +instruction, initialize the default account state for the mint account and +initialize the mint itself. + +Create an asynchronous function named +`createTokenExtensionMintWithPermanentDelegate` in `src/mint-helper.ts`. This +function will create the mint such that all new mints will be created with a +permanent delegate. The function will take the following arguments: + +- `connection`: The connection object +- `payer`: Payer for the transaction +- `mintKeypair`: Keypair for the new mint +- `decimals`: Mint decimals +- `permanentDelegate`: Assigned delegate keypair + +The first step in creating a mint is reserving space on Solana with the +`SystemProgram.createAccount` method. This requires specifying the payer's +keypair, (the account that will fund the creation and provide SOL for rent +exemption), the new mint account's public key (`mintKeypair.publicKey`), the +space required to store the mint information on the blockchain, the amount of +SOL (lamports) necessary to exempt the account from rent and the ID of the token +program that will manage this mint account (`TOKEN_2022_PROGRAM_ID`). + +```typescript +const extensions = [ExtensionType.PermanentDelegate]; +const mintLen = getMintLen(extensions); +const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLen); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mint, + space: mintLen, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +After the mint account creation, the next step involves initializing it with a +permanent delegate. The `createInitializePermanentDelegateInstruction` function +is used to generate an instruction that enables the mint to set the permanent +delegate of any new mint accounts. + +```typescript +const initializePermanentDelegateInstruction = + createInitializePermanentDelegateInstruction( + mint, + permanentDelegate.publicKey, + TOKEN_2022_PROGRAM_ID, + ); +``` + +Next, let's add the mint instruction by calling +`createInitializeMintInstruction` and passing in the required arguments. This +function is provided by the SPL Token package and it constructs a transaction +instruction that initializes a new mint. + +```typescript +const initializeMintInstruction = createInitializeMintInstruction( + mint, + decimals, + mintAuthority.publicKey, // Designated Mint Authority + null, // No Freeze Authority + TOKEN_2022_PROGRAM_ID, +); +``` + +Lastly, let's add all of the instructions to a transaction and send it to the +blockchain: + +```typescript +const transaction = new Transaction().add( + createAccountInstruction, + initializePermanentDelegateInstruction, + initializeMintInstruction, +); + +return await sendAndConfirmTransaction(connection, transaction, [ + payer, + mintKeypair, +]); +``` + +Putting it all together, the final `src/mint-helper.ts` file will look like +this: + +```ts +import { + ExtensionType, + TOKEN_2022_PROGRAM_ID, + createInitializeMintInstruction, + createInitializePermanentDelegateInstruction, + getMintLen, +} from "@solana/spl-token"; +import { + Connection, + Keypair, + SystemProgram, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; + +/** + * Creates the mint with a permanent delegate + * @param connection + * @param payer + * @param mintKeypair + * @param decimals + * @param permanentDelegate + * @returns signature of the transaction + */ +export async function createTokenExtensionMintWithPermanentDelegate( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number = 2, + permanentDelegate: Keypair, +): Promise { + const mintAuthority = payer; + const mint = mintKeypair.publicKey; + + const extensions = [ExtensionType.PermanentDelegate]; + const mintLen = getMintLen(extensions); + const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLen); + const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mint, + space: mintLen, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }); + + const initializePermanentDelegateInstruction = + createInitializePermanentDelegateInstruction( + mint, + permanentDelegate.publicKey, + TOKEN_2022_PROGRAM_ID, + ); + + const initializeMintInstruction = createInitializeMintInstruction( + mint, + decimals, + mintAuthority.publicKey, // Designated Mint Authority + null, // No Freeze Authority + TOKEN_2022_PROGRAM_ID, + ); + + const transaction = new Transaction().add( + createAccountInstruction, + initializePermanentDelegateInstruction, + initializeMintInstruction, + ); + + return await sendAndConfirmTransaction(connection, transaction, [ + payer, + mintKeypair, + ]); +} +``` + +#### 6. Create printBalances function + +We're going to be creating multiple tests that modify a token account's balance. +To make it easier to follow along we should probably create a utility function +that prints all token account balances. + +At the bottom of the `src/index.ts` file add the following `printBalances` +function: + +```typescript +async function printBalances( + connection: Connection, + tokenAccounts: PublicKey[], + names: string[], +) { + if (tokenAccounts.length !== names.length) + throw new Error("Names needs to be one to one with accounts"); + + for (let i = 0; i < tokenAccounts.length; i++) { + const tokenInfo = await getAccount( + connection, + tokenAccounts[i], + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + console.log(`${names[i]}: ${tokenInfo.amount}`); + } +} +``` + +#### 7. Test Setup + +Now that we have the ability to create a mint with a permanent delegate for all +of its new mint accounts, let's write some tests to see how it functions. + +#### 7.1 Create Mint with Permanent Delegate + +Let's first create a mint with `payer` as the permanent delegate. To do this we +call the `createTokenExtensionMintWithPermanentDelegate` function we just +created in our `index.ts` file: + +```ts +// Create mint account with permanent delegate +await createTokenExtensionMintWithPermanentDelegate( + connection, + payer, // Also known as alice + mintKeypair, + decimals, + defaultState, +); +``` + +#### 7.2 Create Test Token Accounts + +Now, let's create three new Token accounts to test with. We can accomplish this +by calling the `createAccount` helper provided by the SPL Token library. We will +use the keypairs we generated at the beginning: `alice`, `bob`, and `carol`. + +In this lab, `alice` will be the permanent delegate. + +```typescript +// Create delegate and destination token accounts +const aliceAccount = await createAccount( + connection, + payer, + mint, + alice.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const bobAccount = await createAccount( + connection, + payer, + mint, + bob.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const carolAccount = await createAccount( + connection, + payer, + mint, + carol.publicKey, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +#### 7.3 Mint tokens to accounts + +In the previous step, we created the 3 accounts we need to test the +`permanent delegate` extension. Next, we need to mint tokens to those accounts +before we write the tests. + +Add the `tokenAccounts` and `names` variables and then create a for loop that +iterates over each account and mints 100 tokens to each account. Call the +`printBalances` function so we can display the token balance of each account: + +```typescript +// Mint tokens to accounts +const tokenAccounts = [aliceAccount, bobAccount, carolAccount]; +const names = ["Alice", "Bob", "Carol"]; + +for (const holder of tokenAccounts) { + await mintTo( + connection, + payer, + mint, + holder, + mintAuthority, + amountToMint, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); +} + +console.log("Initial Balances: "); +await printBalances(connection, tokenAccounts, names); +``` + +Start your local validator and run `npx esrun src/index.ts`. You should see the +following in your terminal, indicating that our token accounts have had tokens +minted to them: + +```bash +Initial Balances: +Alice: 100 +Bob: 100 +Carol: 100 +``` + +#### 8. Tests + +Now let's write some tests to show the interactions that can be had with the +`permanent delegate` extension. + +We'll write the following tests: + +1. **Attempt to Transfer with Correct Delegate:** + + - Have Alice transfer tokens from Bob's account to herself successfully since + she is the permanent delegate. + - Print balances to verify the transfer. + +2. **Attempt to Transfer without Correct Delegate:** + + - Have Bob attempt to transfer tokens from Alice's account to himself (expect + this to fail since Bob isn't authorized). + - Print balances to verify the failure. + +3. **Attempt to Transfer from One Account to Another with Correct Delegate:** + + - Have Alice transfer tokens from Bob's account to Carol's account. + - Print balances to verify the transfer. + +4. **Attempt to Burn with Correct Delegate:** + + - Have Alice burn tokens from Bob's account successfully since she is the + permanent delegate. + - Print balances to verify the burning. + +5. **Attempt to Burn without Correct Delegate:** + + - Have Bob attempt to burn tokens from Carol's account (expect this to fail + since Bob isn't authorized). + - Print balances to verify the failure. + +6. **Grant Permission to an Account to Transfer Tokens from a Different Token + Account:** + + - Approve Carol to transfer tokens from Bob's account to herself. + - Transfer tokens from Bob's account to Carol's account. + - Print balances to verify the transfer. + +7. **Try to Transfer Tokens Again with Carol as the Delegate, Overdrawing Her + Allotted Control:** + - Attempt to transfer tokens from Bob's account to Carol's account with Carol + again, but overdraw her allotted control (expect this to fail). + +#### 8.1 Transfer tokens with the correct delegate + +In this test, `alice` attempts to transfer tokens from `bob` to herself. This +test is expected to pass as `alice` is the permanent delegate and has control +over the token accounts of that mint. + +To do this, let's wrap a `transferChecked` function in a `try catch` and print +out the balances of our accounts: + +```typescript +// Attempt to transfer with correct delegate +{ + // Have Alice transfer tokens from Bob to herself ( Will Succeed ) + try { + await transferChecked( + connection, + payer, + bobAccount, + mint, + aliceAccount, + alice, + amountToTransfer, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + console.log( + "✅ Since Alice is the permanent delegate, she has control over all token accounts of this mint", + ); + await printBalances(connection, tokenAccounts, names); + } catch (error) { + console.log("Alice should be able to transfer Bob's tokens to Alice"); + } +} +``` + +Test this by running the script: + +```bash +npx esrun src/index.ts +``` + +We should see the following error logged out in the terminal, meaning the +extension is working as intended. +`✅ Since Alice is the permanent delegate, she has control over all token accounts of this mint` + +#### 8.2 Transfer tokens with incorrect delegate + +In this test, `bob` is going to try to transfer tokens from `alice` to himself. +Given that `bob` is not the permanent delegate, the attempt won't be successful. + +Similar to the previous test we can create this test by calling +`transferChecked` and then printing the balances: + +```typescript +// Attempt to transfer without correct delegate +{ + // Have Bob try to transfer tokens from Alice to himself ( Will Fail ) + try { + await transferChecked( + connection, + payer, + aliceAccount, // transfer from + mint, + bobAccount, + bob, // incorrect delegate + amountToTransfer, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + console.log("Bob should not be able to transfer tokens"); + } catch (error) { + console.log( + "✅ We expect this to fail because Bob does not have authority over Alice's funds", + ); + await printBalances(connection, tokenAccounts, names); + } +} +``` + +Go ahead and run the script, the transaction should fail. + +```bash +npx esrun src/index.ts +``` + +#### 8.3 Transfer from one account to another with the correct delegate + +Lets use the power of the permanent delegate extension to have `alice` transfer +some tokens from `bob` to `carol`. + +We expect this test to succeed. Remember, the permanent delegate has control +over **all** token accounts of the mint. + +To test this, let's wrap a `transferChecked` function in a `try catch` and print +the balances: + +```typescript +// Attempt to transfer from one account to another with correct delegate +{ + // Have Alice transfer tokens from Bob to Carol + try { + await transferChecked( + connection, + payer, + bobAccount, // transfer from + mint, + carolAccount, // transfer to + alice, + amountToTransfer, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + console.log( + "✅ Since Alice is the permanent delegate, she has control and can transfer Bob's tokens to Carol", + ); + await printBalances(connection, tokenAccounts, names); + } catch (error) { + console.log("Alice should be able to transfer Bob's tokens to Alice"); + } +} +``` + +In our first test we wrote, `bob` had 10 of his tokens transferred to `carol`. +Up until this point `bob` has 90 tokens remaining. Run the test and see the +results. You will notice that `bob` now has 80 tokens: + +```bash +npx esrun src/index.ts +``` + +#### 8.4 Burn with correct delegate + +Now let's try and burn some of the tokens from `bob`. This test is expected to +pass. + +We'll do this by calling `burnChecked` and then printing out the balances: + +```typescript +// Attempt to burn with correct delegate +{ + // Have Alice burn Bob's tokens + try { + await burnChecked( + connection, + payer, + bobAccount, + mint, + alice, // correct permanent delegate + amountToBurn, // in this case is 5 + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + console.log( + "✅ Since Alice is the permanent delegate, she has control and can burn Bob's tokens", + ); + await printBalances(connection, tokenAccounts, names); + } catch (error) { + console.error("Alice should be able to burn Bob's tokens"); + } +} +``` + +Run the tests again: + +```bash +npx esrun src/index.ts +``` + +Bob had 5 tokens burned and now only has 75 tokens. Poor Bob! + +#### 8.5 Burn with incorrect delegate + +Let's try and burn tokens from an account using the incorrect delegate. This is +expected to fail as `bob` doesn't have any control over the token accounts. + +```typescript +// Attempt to burn without correct delegate +{ + // Have Bob try to burn tokens from Carol ( Will Fail ) + try { + await burnChecked( + connection, + payer, + carolAccount, + mint, + bob, // wrong permanent delegate + amountToBurn, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + await printBalances(connection, tokenAccounts, names); + console.error("Bob should not be able to burn the tokens"); + } catch (error) { + console.log( + "✅ We expect this to fail since Bob is not the permanent delegate and has no control over the tokens", + ); + } +} +``` + +Run `npm start`. You will see the following message, indicating that the +extension is working as intended: +`✅ We expect this to fail since Bob is not the permanent delegate and has no control over the tokens` + +#### 8.6. Assign delegate permissions to Carol and transfer + +With the `permanent delegate` extension, the initial delegate can grant a token +account permission to hold a certain level of control over the mint tokens. In +this case, `alice` will allow `carol` to transfer some of the tokens from `bob` +account to herself. + +For this to work we will need to set some boundaries for `carol`. Using the +`approveChecked` function provided by the SPL Library, we can set the maximum +number of tokens that can be transferred or burned by `carol`. This ensures that +she can only transfer a specified amount, protecting the overall balance from +excessive or unauthorized transfers. + +Add the following test: + +```typescript +// Grant permission to an account to transfer tokens from a different token account +{ + // Approve Carol to transfer Bob's tokens to herself + await approveChecked( + connection, + payer, + mint, + bobAccount, + carol.publicKey, + bob, + amountToTransfer, // maximum amount to transfer + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + await transferChecked( + connection, + payer, + bobAccount, + mint, + carolAccount, + carol, + amountToTransfer, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + console.log( + "✅ Since Alice is the permanent delegate, she can allow Carol to transfer Bob's tokens to Carol", + ); + await printBalances(connection, tokenAccounts, names); +} +``` + +Run the tests again. You will notice that `bob` now only has 65 tokens as +`carol` has just transferred 10 of his tokens to herself: +`npx esrun src/index.ts` + +#### 8.7. Attempt to transfer again + +In the previous test, we approved `carol` to be able to transfer 10 tokens to +herself. This means that she has reached the maximum amount of tokens to send +from another account. Let's write a test and attempt to transfer another 10 +tokens to herself. This is expected to fail. + +```typescript +// Try to transfer tokens again with Carol as the delegate, overdrawing her allotted control +{ + // Try to transfer again with Carol as the delegate overdrawing her allotted control + try { + await transferChecked( + connection, + payer, + bobAccount, + mint, + carolAccount, + carol, // Owner - whoever has the authority to transfer tokens on behalf of the destination account + amountToTransfer, + decimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + } catch (e) { + console.log( + `✅ We expect this to fail since Carol already transferred ${amountToTransfer} tokens and has no more allotted`, + ); + } +} +``` + +Run the tests one last time and you will see this message, meaning that the +`✅ We expect this to fail since Carol already transferred 10 tokens and has no more allotted` + +Thats it! You've just created a mint account with a permanent delegate and +tested that the functionality all works! + +## Challenge + +Create your own mint account with a permanent delegate. diff --git a/content/courses/token-extensions/required-memo.mdx b/content/courses/token-extensions/required-memo.mdx new file mode 100644 index 000000000..54baeaeb6 --- /dev/null +++ b/content/courses/token-extensions/required-memo.mdx @@ -0,0 +1,607 @@ +--- +title: Required Memo +objectives: + - Create a token account with required memo on transfer + - Transfer with memo + - Transfer without memo + - Disable required memo +description: "Create a token that requires a short note on every transfer." +--- + +## Summary + +- The `required memo` extension allows developers to mandate that all incoming + transfers to a token account include a memo, facilitating enhanced transaction + tracking and user identification. +- When a transfer is initiated without a memo, the transaction will fail. +- The `required memo` extension can be disabled by calling + `disableRequiredMemoTransfers`. + +## Overview + +For certain applications, such as exchanges or financial services, tracking the +purpose or origin of a transaction is crucial. The `required memo` extension +specifies that a memo is necessary for every incoming transfer to a token +account. This requirement ensures that each transaction is accompanied by +additional information, which can be used for compliance, auditing, or +user-specific purposes. If the need for strict tracking diminishes, the +requirement can be adjusted to make memos optional, offering flexibility in how +transactions are handled and recorded. + +It is important to note that this is a token account extension, not a mint +extension. This means individual token accounts need to enable this feature. And +like all extensions, this will only work with Token Extensions Program tokens. + +### Creating token with required memo + +Initializing a token account with required memo involves three instructions: + +- `SystemProgram.createAccount` +- `initializeAccountInstruction` +- `createEnableRequiredMemoTransfersInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the token account. This instruction accomplishes three things: + +- Allocates `space` +- Transfers `lamports` for rent +- Assigns to it's owning program + +```tsx +const accountLen = getAccountLen([ExtensionType.MemoTransfer]); +const lamports = await connection.getMinimumBalanceForRentExemption(accountLen); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: tokenAccountKeypair.publicKey, + space: accountLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second instruction `createInitializeAccountInstruction` initializes the +account instruction. + +```tsx +const initializeAccountInstruction = createInitializeAccountInstruction( + tokenAccountKeypair.publicKey, + mint, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +The third instruction `createEnableRequiredMemoTransfersInstruction` initializes +the token account with required memo. + +```tsx +const enableRequiredMemoTransfersInstruction = + createEnableRequiredMemoTransfersInstruction( + tokenAccountKeypair.publicKey, + payer.publicKey, + undefined, + TOKEN_2022_PROGRAM_ID, + ); +``` + +When the transaction with these three instructions is sent, a new token account +is created with the required memo extension. + +```tsx +const transaction = new Transaction().add( + createAccountInstruction, + initializeAccountInstruction, + enableRequiredMemoTransfersInstruction, +); + +const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [payer, tokenAccountKeypair], // Signers +); +``` + +### Transferring with required memo + +When transferring to a token account with the `required memo` instruction +enabled, you need to send a memo first within the same transaction. We do this +by creating a memo instruction to call the Memo program. Then, we add in our +transfer instruction. + +```ts +const message = "Hello, Solana"; + +const transaction = new Transaction().add( + new TransactionInstruction({ + keys: [{ pubkey: payer.publicKey, isSigner: true, isWritable: true }], + data: Buffer.from(message, "utf-8"), // Memo message. In this case it is "Hello, Solana" + programId: new PublicKey("MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr"), // Memo program that validates keys and memo message + }), + createTransferInstruction( + ourTokenAccount, + otherTokenAccount, // Has required memo + payer.publicKey, + amountToTransfer, + undefined, + TOKEN_2022_PROGRAM_ID, + ), +); +await sendAndConfirmTransaction(connection, transaction, [payer]); +``` + +### Disabling required memo + +The required memo extension can be disabled given you have the authority to +modify the token account. To do this, simply call the +`disableRequiredMemoTransfers` function and pass in the required arguments. + +```tsx +/** + * Disable memo transfers on the given account + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param account Account to modify + * @param owner Owner of the account + * @param multiSigners Signing accounts if `owner` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ +await disableRequiredMemoTransfers( + connection, + payer, + otherTokenAccount, + payer, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +## Lab + +In this lab, we'll create a token account with the required memo extension. +We'll then write tests to check if the extension is working as intended by +attempting to transfer funds with and without a memo. + +#### 1. Setup Environment + +To get started, create an empty directory named `required-memo` and navigate to +it. We'll be initializing a brand new project. Run `npm init` and follow through +the prompts. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun dotenv typescript +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { + TOKEN_2022_PROGRAM_ID, + getAccount, + mintTo, + createTransferInstruction, + createMint, + disableRequiredMemoTransfers, + enableRequiredMemoTransfers, +} from "@solana/spl-token"; +import { + sendAndConfirmTransaction, + Connection, + Transaction, + PublicKey, + TransactionInstruction, +} from "@solana/web3.js"; +// import { createTokenWithMemoExtension } from "./token-helper"; // We'll uncomment this later +import { initializeKeypair, makeKeypairs } from "@solana-developers/helpers"; + +require("dotenv").config(); + +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +const payer = await initializeKeypair(connection); +const mintDecimals = 9; + +const [ourTokenAccountKeypair, otherTokenAccountKeypair] = makeKeypairs(2); +const ourTokenAccount = ourTokenAccountKeypair.publicKey; +const otherTokenAccount = otherTokenAccountKeypair.publicKey; + +const amountToMint = 1000; +const amountToTransfer = 300; + +// CREATE MINT + +// CREATE TOKENS + +// MINT TOKENS + +// ATTEMPT TO TRANSFER WITHOUT MEMO + +// ATTEMPT TO TRANSFER WITH MEMO + +// DISABLE MEMO EXTENSION AND TRANSFER +``` + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +`const connection = new Connection("http://127.0.0.1:8899", "confirmed");` + +Alternatively, if you'd like to use testnet or devnet, import the +`clusterApiUrl` from `@solana/web3.js` and pass it to the connection as such: + +```typescript +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +``` + +#### 3. Helpers + +When we pasted the `index.ts` code from earlier, we added the following helpers +provided by the `@solana-developers/helpers` package and some starting +variables. + +- `initializeKeypair`: This function creates the keypair for the `payer` and + also airdrops 1 testnet SOL to it +- `makeKeypairs`: This function creates keypairs without airdropping any SOL + +#### 4. Create the mint + +First things first, since the `required memo` extension is a token extension, we +don't need to do anything fancy with the mint. It just needs to be a Token +Extensions Program mint. That being said, we can just create one using the +`createMint` function. + +Let's do this in `src/index.ts`: + +```tsx +// CREATE MINT +const mint = await createMint( + connection, + payer, + payer.publicKey, + null, + mintDecimals, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +#### 5. Create Token Account with required memo + +Let's create a new file `src/token-helper.ts` and create a new function within +it called `createTokenWithMemoExtension`. As the name implies, we'll use this to +create our token accounts with the `required memo` extension enabled. The +function will take the following arguments: + +- `connection`: The connection object +- `mint`: Public key for the new mint +- `payer`: Payer for the transaction +- `tokenAccountKeypair`: The token account keypair associated with the token + account + +```ts +import { + TOKEN_2022_PROGRAM_ID, + getAccountLen, + ExtensionType, + createInitializeAccountInstruction, + createEnableRequiredMemoTransfersInstruction, +} from "@solana/spl-token"; +import { + sendAndConfirmTransaction, + Connection, + Keypair, + Transaction, + PublicKey, + SystemProgram, +} from "@solana/web3.js"; + +export async function createTokenWithMemoExtension( + connection: Connection, + payer: Keypair, + tokenAccountKeypair: Keypair, + mint: PublicKey, +): Promise { + // CREATE ACCOUNT INSTRUCTION + + // CREATE INITIALIZE ACCOUNT INSTRUCTION + + // CREATE ENABLE REQUIRED MEMO TRANSFER INSTRUCTION + + // SEND AND CONFIRM TRANSACTION + + return await "TODO FINISH FUNCTION"; +} +``` + +Let's start adding our code. + +The first step in creating the token account is reserving space on Solana with +the `SystemProgram.createAccount` method: + +```tsx +// CREATE ACCOUNT INSTRUCTION +const accountLen = getAccountLen([ExtensionType.MemoTransfer]); +const lamports = await connection.getMinimumBalanceForRentExemption(accountLen); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: tokenAccountKeypair.publicKey, + space: accountLen, + lamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +Now we need to initialize the token account. To create this instruction we call +`createInitializeAccountInstruction` and pass in the required arguments. This +function is provided by the SPL Token package and it constructs a transaction +instruction that initializes a new token account. + +```tsx +// CREATE INITIALIZE ACCOUNT INSTRUCTION +const initializeAccountInstruction = createInitializeAccountInstruction( + tokenAccountKeypair.publicKey, + mint, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +The last instruction we need is the one that enables the required memo. We get +this by calling the `createEnableRequiredMemoTransfersInstruction` function. +When the required memos are enabled, any transfer of tokens into the account +must include a memo. + +```tsx +// CREATE ENABLE REQUIRED MEMO TRANSFERS INSTRUCTION +const enableRequiredMemoTransfersInstruction = + createEnableRequiredMemoTransfersInstruction( + tokenAccountKeypair.publicKey, + payer.publicKey, + undefined, + TOKEN_2022_PROGRAM_ID, + ); +``` + +Lastly, let's add all of the instructions to a transaction, send it to the +blockchain and return the signature + +```tsx +// SEND AND CONFIRM TRANSACTION +const transaction = new Transaction().add( + createAccountInstruction, + initializeAccountInstruction, + enableRequiredMemoTransfersInstruction, +); + +const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [payer, tokenAccountKeypair], // Signers +); + +return transactionSignature; +``` + +Let's go back to `index.ts` and create two new token accounts: +`ourTokenAccountKeypair` and `otherTokenAccountKeypair` using our newly created +function. + +```typescript +// CREATE TOKENS +await createTokenWithMemoExtension( + connection, + payer, + ourTokenAccountKeypair, + mint, +); + +await createTokenWithMemoExtension( + connection, + payer, + otherTokenAccountKeypair, + mint, +); +``` + +Lastly, let's call `mintTo` to mint some initial tokens to +`ourTokenAccountKeypair`: + +```ts +// MINT TOKENS +await mintTo( + connection, + payer, + mint, + ourTokenAccount, + payer, + amountToMint, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +Note: The `required memo` extension only requires a memo on transferring, not +minting. + +#### 6. Tests + +Now that we've created some accounts with the `required memo` instruction. Let's +write some tests to see how they function. + +We'll write 3 tests in total: + +- Transferring without a memo +- Transferring with a memo +- Disabling Required Memo extension and transferring without a memo + +#### 6.1 Transfer without Memo + +This first test will attempt to transfer tokens from `ourTokenAccount` to +`otherTokenAccount`. This test is expected to fail as there is no memo attached +to the transaction. + +```tsx +// ATTEMPT TO TRANSFER WITHOUT MEMO +try { + const transaction = new Transaction().add( + createTransferInstruction( + ourTokenAccount, + otherTokenAccount, + payer.publicKey, + amountToTransfer, + undefined, + TOKEN_2022_PROGRAM_ID, + ), + ); + + await sendAndConfirmTransaction(connection, transaction, [payer]); + + console.error("You should not be able to transfer without a memo."); +} catch (error) { + console.log( + `✅ - We expected this to fail because you need to send a memo with the transfer.`, + ); +} +``` + +Run this test, you should see the following error logged out in the terminal, +meaning the extension is working as intended: +`✅ - We expected this to fail because you need to send a memo with the transfer.` + +```bash +npx esrun src/index.ts +``` + +#### 6.2 Test transfer with memo + +This test will attempt to transfer tokens with a memo. This test is expected to +pass. Pay extra attention to the first instruction - It is the part of the +transaction that adds the memo instruction to it: + +```tsx +// ATTEMPT TO TRANSFER WITH MEMO +const message = "Hello, Solana"; + +const transaction = new Transaction().add( + new TransactionInstruction({ + keys: [{ pubkey: payer.publicKey, isSigner: true, isWritable: true }], + data: Buffer.from(message, "utf-8"), // Memo message. In this case it is "Hello, Solana" + programId: new PublicKey("MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr"), // Memo program that validates keys and memo message + }), + + createTransferInstruction( + ourTokenAccount, + otherTokenAccount, + payer.publicKey, + amountToTransfer, + undefined, + TOKEN_2022_PROGRAM_ID, + ), +); +await sendAndConfirmTransaction(connection, transaction, [payer]); + +const accountAfterMemoTransfer = await getAccount( + connection, + otherTokenAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +console.log( + `✅ - We have transferred ${accountAfterMemoTransfer.amount} tokens to ${otherTokenAccount} with the memo: ${message}`, +); +``` + +Run the test and see that it passes: + +```bash +npx esrun src/index.ts +``` + +#### 6.3 Test transfer with disabled memo + +In our last test, we'll disable the `required memo` extension on the +`otherTokenAccount` and send it some tokens without a memo. We expect this to +pass. + +```tsx +// DISABLE MEMO EXTENSION AND TRANSFER +await disableRequiredMemoTransfers( + connection, + payer, + otherTokenAccount, + payer, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +// Transfer tokens to otherTokenAccount +const transfer = new Transaction().add( + createTransferInstruction( + ourTokenAccount, + otherTokenAccount, + payer.publicKey, + amountToTransfer, + undefined, + TOKEN_2022_PROGRAM_ID, + ), +); + +await sendAndConfirmTransaction(connection, transfer, [payer]); + +const accountAfterDisable = await getAccount( + connection, + otherTokenAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +// Re-enable memo transfers to show it exists +await enableRequiredMemoTransfers( + connection, + payer, + otherTokenAccount, + payer, + undefined, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +console.log( + `✅ - We have transferred ${accountAfterDisable.amount} tokens to ${otherTokenAccount} without a memo.`, +); +``` + +Run the tests. You will notice that `otherTokenAccount` now has 600 tokens, +meaning it has successfully transferred without a memo after disabling the +extension. + +```bash +npx esrun src/index.ts +``` + +Congratulations! We've just tested the required memo extension! + +## Challenge + +Go create your own token account with required memo. diff --git a/content/courses/token-extensions/token-extensions-in-the-client.mdx b/content/courses/token-extensions/token-extensions-in-the-client.mdx new file mode 100644 index 000000000..0e3aebbd6 --- /dev/null +++ b/content/courses/token-extensions/token-extensions-in-the-client.mdx @@ -0,0 +1,602 @@ +--- +title: Use Token Extensions from a Client +objectives: + - Learn how to effectively integrate multiple Solana token programs within + client applications + - Become proficient in utilizing the SPL TypeScript library for comprehensive + token operations +description: + "Use mints with Token Extensions program from TS in web and mobile apps." +--- + +## Summary + +- The `Token Extensions Program` has all of the same functions as the + `Token Program`, with added `extensions` +- These two token programs: `Token Program` and `Token Extensions Program` use + separate addresses and are not directly compatible +- Supporting both requires specifying the correct program ID in client-side + functions +- By default, the SPL program library uses the original **`Token Program`** + unless another is specified +- The `Token Extensions Program` may also be referred to as it's technical spec + name `Token22` + +## Overview + +The `Token Extensions Program` enhances the original `Token Program` by +incorporating additional features known as extensions. These extensions are +designed to address specific scenarios that previously necessitated developers +to fork and alter the Solana Program Library, leading to split ecosystems and +challenges in adoption. The introduction of the Token Extensions Program allows +for these scenarios to be effectively handled. + +Since the `Token Program` and `Token Extensions Program` are different onchain +programs, they are not interoperable. For example, a token minted with +`Token Extensions Program` may not be transferred with the `Token Program`. As a +result, we'll have to support both programs in any client-side applications that +need to display or otherwise support all SPL tokens. This means we'll want to +explicitly handle mints from both the original Token Program (address: +`TOKEN_PROGRAM_ID`) and the Extension Program (address: +`TOKEN_2022_PROGRAM_ID`). + +Fortunately, the interfaces for the two programs remain consistent, allowing the +use of `spl-token` helper functions in either program by simply swapping the +program ID (the function uses the original Token Program by default if no +program ID is provided). Most of the time, end users are not concerned with the +specific token program being used. As such, implementing additional logic to +track, assemble, and merge details from both token varieties is essential to +guarantee a smooth user experience. + +Lastly, "Token 22" is often used as the technical name. If you see someone refer +to the Token 22 Program, they are referring to the Token Extensions Program. + +### Differences between working with Token Program Tokens and Token Extensions Tokens + +When interacting with mints and tokens, we need to be sure we're using the +correct Token Program. To create a `Token Program` mint, use `Token Program`; to +create a mint with extensions, use the `Token Extensions Program`. + +Fortunately, the `spl-token` package makes it simple to do this. It provides +both the `TOKEN_PROGRAM_ID` and `TOKEN_2022_PROGRAM_ID` constants, along with +all of its helper functions for creating and minting tokens that take a program +ID as input. + + + +`spl-token` defaults to using the `TOKEN_PROGRAM_ID` unless +otherwise specified. Make sure to explicitly pass the `TOKEN_2022_PROGRAM_ID` +for all function calls related to the Token Extensions Program. Otherwise, you +will get the following error: `TokenInvalidAccountOwnerError`. + + + +### Considerations when working with both Token and Extension Tokens + +Although the interfaces for both of these programs remain consistent, they are +two different programs. The program IDs for these programs are unique and +non-interchangeable, resulting in distinct addresses when utilized. If you want +to support both `Token Program` tokens and `Token Extensions Program` tokens, +you must add extra logic on the client side. + +### Associated Token Accounts (ATA) + +An Associated Token Account (ATA) is a Token Account whose address is derived +using the wallet's public key, the token's mint, and the token program. This +mechanism provides a deterministic Token Account address for each mint per user. +The ATA account is usually the default account for most holders. Fortunately, +ATAs are handled the same way with both token programs. + +We can use the ATA helper functions for each token program by providing the +desired program ID. + +If we want to use the Token Extensions Program when we call +`getOrCreateAssociatedTokenAccount` Extension Tokens, we can pass in +`TOKEN_2022_PROGRAM_ID` for the `tokenProgramId` parameter: + +```ts +const tokenProgramId = TOKEN_2022_PROGRAM_ID; + +const tokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + payer, + mintAddress, + payer.publicKey, + true, + "finalized", + { commitment: "finalized" }, + tokenProgramId, // TOKEN_PROGRAM_ID for Token Program tokens and TOKEN_2022_PROGRAM_ID for Token Extensions Program tokens +); +``` + +To re-create the ATA's address from scratch, we can use the +`findProgramAddressSync` function by providing the correct seeds: + +```ts +function findAssociatedTokenAddress( + walletAddress: PublicKey, + tokenMintAddress: PublicKey, +): PublicKey { + return PublicKey.findProgramAddressSync( + [ + walletAddress.toBuffer(), + TOKEN_PROGRAM_ID.toBuffer(), // replace TOKEN_PROGRAM_ID with TOKEN_2022_PROGRAM_ID for Token22 tokens + tokenMintAddress.toBuffer(), + ], + ASSOCIATED_TOKEN_PROGRAM_ID, + )[0]; +} +``` + +### How to fetch tokens + +There is no difference between tokens made with Token Program or Token +Extensions Program. That means, when it comes to fetching tokens, there is no +difference between how we fetch Token Program or Token Extensions Program +tokens. All we have to do is provide the correct token program: + +```ts +const tokenAccounts = await connection.getTokenAccountsByOwner( + walletPublicKey, + { programId: TOKEN_PROGRAM_ID }, // or TOKEN_2022_PROGRAM_ID +); +``` + +If we want to fetch all of the tokens for a particular owner, we can use a +function like `getTokenAccountsByOwner`, and then call it twice, once with +`TOKEN_PROGRAM_ID` and another with `TOKEN_2022_PROGRAM_ID`. + +```ts +const allOwnedTokens = []; +const tokenAccounts = await connection.getTokenAccountsByOwner( + wallet.publicKey, + { programId: TOKEN_PROGRAM_ID }, +); +const tokenExtensionAccounts = await connection.getTokenAccountsByOwner( + wallet.publicKey, + { programId: TOKEN_2022_PROGRAM_ID }, +); + +allOwnedTokens.push(...tokenAccounts, ...tokenExtensionAccounts); +``` + + + +It's likely you'll want to store and associate the token +program with the token upon fetching. + + + +#### Check owning program + +You may run into a scenario where you don't know the token program for a given +account. Fortunately, `getParsedAccountInfo` will allow us to determine the +owning program: + +```typescript +const accountInfo = await connection.getParsedAccountInfo(mintAddress); +if (accountInfo.value === null) { + throw new Error("Account not found"); +} + +const programId = accountInfo.value.owner; // will return TOKEN_PROGRAM_ID for Token Program mint address and TOKEN_2022_PROGRAM_ID for Token Extensions Program mint address + +//we now use the programId to fetch the tokens +const tokenAccounts = await connection.getTokenAccountsByOwner( + wallet.publicKey, + { programId }, +); +``` + + + +After you fetch the owning program, it may be a good idea +to save that owner and associate it with the mints/tokens you are +handling. + + + +## Lab - Add Extension Token support to a script + +Let's work through a holistic example where we add Token Extensions support to +an existing script. This lab will lead us through the necessary adjustments and +expansions to embrace the capabilities and nuances of both the original Token +Program and its extension counterpart. + +By the end of this lab, we'll have navigated the complexities of supporting +these two distinct but related token systems, ensuring our script can interact +smoothly with both. + +#### 1. Clone the starter code + +To get started, clone +[this lab's repository](https://github.com/Unboxed-Software/solana-lab-token22-in-the-client/) +and checkout the `starter` branch. This branch contains a couple of helper files +and some boilerplate code to get you started. + +```bash +git clone https://github.com/Unboxed-Software/solana-lab-token22-in-the-client.git +cd solana-lab-token22-in-the-client +git checkout starter +``` + +Run `npm install` to install the dependencies. + +#### 2. Get familiar with the starter code + +The starter code comes with the following files: + +- `print-helpers.ts` +- `index.ts` + +The **`print-helpers.ts`** file contains a function called **`printTableData`**, +which is designed to output data to the console in a structured format. This +function is capable of accepting any object as its argument, and it utilizes the +**`console.table`** method, a feature provided by NodeJS, to display the data in +an easily readable table format. + +Lastly, `index.ts` contains our main script. It currently only creates a +connection and calls `initializeKeypair` to generate the keypair for `payer`. + +#### 3. Run validator node (Optional) + +Optionally, you may want to run your own local validator instead of using +devnet. This a good way around any issues with airdropping. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +```typescript +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +``` + +If you'd like to use Devnet and provide you're own devnet wallet, you still +can - just reconfigure the `Connection` and the keypair path input to +`initializeKeypair`. + +```typescript +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +``` + +Let's test that it all works so far by running `npm run start`. You should see +the `payer` public key logged out in your terminal. + +#### 4. Create Token Program and Token Extensions Program mints + +Let's start by creating new token mints using both the `Token Program` and the +`Token Extensions Program`. + +Create a new file called `create-and-mint-token.ts`. In this file we will create +a function called `createAndMintToken`. As the name suggests it will create a +mint, token account (ATA) and then mint some amount of tokens to that account. + +Within this `createAndMintToken` function we will be call `createMint`, +`getOrCreateAssociatedTokenAccount` and `mintTo`. This function is designed to +be indifferent to the specific token program being used, allowing for the +creation of tokens from either the `Token Program` or the +`Token Extensions Program`. This capability is achieved by accepting a program +ID as a parameter, enabling the function to adapt its behavior based on the +provided ID. + +Here are the arguments we'll be passing into this function: + +- `connection` - The connection object to use +- `tokenProgramId` - The token program to point to +- `payer` - The keypair paying for the transaction +- `decimals` - The number of decimals to include for the mint +- `mintAmount` - The amount of tokens to mint to the payer + +And this is what the function will do: + +- Create a new mint using **`createMint`** +- Fetch mint information using **`getMint`** +- Log mint information using **`printTableData`** +- Create an associated token account with + **`getOrCreateAssociatedTokenAccount`** +- Log the address of the associated token account +- Mint tokens to the associated token account with **`mintTo`** + +All put together this is what the final `createAndMintToken` function looks +like: + +```ts +import { + createMint, + getMint, + getOrCreateAssociatedTokenAccount, + mintTo, +} from "@solana/spl-token"; +import { Connection, Keypair, PublicKey } from "@solana/web3.js"; +import printTableData from "./print-helpers"; + +export async function createAndMintToken( + connection: Connection, + tokenProgramId: PublicKey, + payer: Keypair, + decimals: number, + mintAmount: number, +): Promise { + console.log("\\nCreating a new mint..."); + const mint = await createMint( + connection, + payer, + payer.publicKey, + payer.publicKey, + decimals, + undefined, + { + commitment: "finalized", // confirmOptions argument + }, + tokenProgramId, + ); + + console.log("\\nFetching mint info..."); + + const mintInfo = await getMint(connection, mint, "finalized", tokenProgramId); + + printTableData(mintInfo); + + console.log("\\nCreating associated token account..."); + const tokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + payer, + mint, + payer.publicKey, + true, + "finalized", + { commitment: "finalized" }, + tokenProgramId, + ); + + console.log(`Associated token account: ${tokenAccount.address.toBase58()}`); + + console.log("\\nMinting to associated token account..."); + await mintTo( + connection, + payer, + mint, + tokenAccount.address, + payer, + mintAmount, + [payer], + { commitment: "finalized" }, + tokenProgramId, + ); + + return mint; +} + +export default createAndMintToken; +``` + +#### 5. Creating and minting tokens + +Let's now take our new function and invoke it twice within our main script in +`index.ts`. We'll want a `Token Program` and `Token Extensions Program` token to +test against. So we'll use our two different program IDs: + +```typescript +import { initializeKeypair } from "@solana-developers/helpers"; +import { Cluster, Connection } from "@solana/web3.js"; +import createAndMintToken from "./create-and-mint-token"; +import printTableData from "./print-helpers"; +import { TOKEN_2022_PROGRAM_ID, TOKEN_PROGRAM_ID } from "@solana/spl-token"; +import dotenv from "dotenv"; +dotenv.config(); + +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +const payer = await initializeKeypair(connection); + +console.log(`Payer: ${payer.publicKey.toBase58()}`); + +const tokenProgramMint = await createAndMintToken( + connection, + TOKEN_PROGRAM_ID, + payer, + 0, + 1000, +); +const tokenExtensionProgramMint = await createAndMintToken( + connection, + TOKEN_2022_PROGRAM_ID, + payer, + 0, + 1000, +); +``` + +At this point you can run `npm run start` and see that both mints get created +and their info logged to the console. + +#### 6. Fetch Token Program and Token Extensions Program tokens + +We can now fetch tokens using the wallet's public key and the program ID. + +Let's create a new file `fetch-token-info.ts`. + +Within that new file, let's create the `fetchTokenInfo` function. This function +will fetch the token account provided and return a new interface we'll create +named `TokenInfoForDisplay`. This will allow us to format the returning data +nicely in our console. Again, this function will be agnostic about which token +program the account it from. + +```ts +import { AccountLayout, getMint } from "@solana/spl-token"; +import { Connection, LAMPORTS_PER_SOL, PublicKey } from "@solana/web3.js"; + +export type TokenTypeForDisplay = "Token Program" | "Token Extensions Program"; + +export interface TokenInfoForDisplay { + mint: PublicKey; + amount: number; + decimals: number; + displayAmount: number; + type: TokenTypeForDisplay; +} +``` + +To actually fetch all of this information we will be calling +`getTokenAccountsByOwner` and mapping the results into our new +`TokenInfoForDisplay` interface. + +To accomplish this the `fetchTokenInfo` function will need the following +parameters: + +- `connection` - The connection object to use +- `owner` - The wallet which owns the associated token accounts +- `programId` - The token program to point to +- `type` - Either `Token Program` or `Token Extensions Program`; used for + console logging purpose + +```ts +export type TokenTypeForDisplay = "Token Program" | "Token Extensions Program"; + +export interface TokenInfoForDisplay { + mint: PublicKey; + amount: number; + decimals: number; + displayAmount: number; + type: TokenTypeForDisplay; +} + +export async function fetchTokenInfo( + connection: Connection, + owner: PublicKey, + programId: PublicKey, + type: TokenTypeForDisplay, +): Promise { + const tokenAccounts = await connection.getTokenAccountsByOwner(owner, { + programId, + }); + + const ownedTokens: TokenInfoForDisplay[] = []; + + for (const tokenAccount of tokenAccounts.value) { + const accountData = AccountLayout.decode(tokenAccount.account.data); + + const mintInfo = await getMint( + connection, + accountData.mint, + "finalized", + programId, + ); + + ownedTokens.push({ + mint: accountData.mint, + amount: Number(accountData.amount), + decimals: mintInfo.decimals, + displayAmount: Number(accountData.amount) / 10 ** mintInfo.decimals, + type, + }); + } + + return ownedTokens; +} +``` + +Let's see this function in action. Inside of `index.ts`, let's add two separate +calls to this function, once for each program. + +```ts +// previous imports +import { TokenInfoForDisplay, fetchTokenInfo } from "./fetch-token-info"; + +// previous code +const myTokens: TokenInfoForDisplay[] = []; + +myTokens.push( + ...(await fetchTokenInfo( + connection, + payer.publicKey, + TOKEN_PROGRAM_ID, + "Token Program", + )), + ...(await fetchTokenInfo( + connection, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, + "Token Extensions Program", + )), +); + +printTableData(myTokens); +``` + +Run `npm run start`. You should now see all of the tokens the payer wallet owns. + +#### 7. Fetch Token Program and Token Extensions Program tokens without the program ID + +Now let's take a look at how we can retrieve the owning program from a given +mint account. + +To do this we will create a new function `fetchTokenProgramFromAccount` to +`fetch-token-info.ts`. This function will simply return us the `programId` of +the given mint. + +To accomplish this we will call the `getParsedAccountInfo` function and return +the owning program from `.value.owner`. + +The `fetchTokenProgramFromAccount` function will need the following parameters: + +- `connection` - The connection object to use +- `mint` - Public key of the mint account + +The final function will look like this: + +```ts +// previous imports and code + +export async function fetchTokenProgramFromAccount( + connection: Connection, + mint: PublicKey, +) { + // Find the program ID from the mint + const accountInfo = await connection.getParsedAccountInfo(mint); + if (accountInfo.value === null) { + throw new Error("Account not found"); + } + const programId = accountInfo.value.owner; + return programId; +} +``` + +Finally let's add see this in action in our `index.ts`: + +```ts +// previous imports +import { + TokenInfoForDisplay, + fetchTokenInfo, + fetchTokenProgramFromAccount, +} from "./fetch-token-info"; + +// previous code +const tokenProgramTokenProgram = await fetchTokenProgramFromAccount( + connection, + tokenProgramMint, +); +const tokenExtensionProgramTokenProgram = await fetchTokenProgramFromAccount( + connection, + tokenExtensionProgramMint, +); + +if (!tokenProgramTokenProgram.equals(TOKEN_PROGRAM_ID)) + throw new Error("Token Program mint token program is not correct"); +if (!tokenExtensionProgramTokenProgram.equals(TOKEN_2022_PROGRAM_ID)) + throw new Error("Token Extensions Program mint token program is not correct"); +``` + +Run `npm run start` again. You should see the same output as before - meaning +the expected token programs were correct. + +That's it! If you get stuck at any step, you can find the complete code in +[this lab's repository's](https://github.com/Unboxed-Software/solana-lab-token22-in-the-client/) +`solution` branch. + +## Challenge + +For the challenge, try and implement the burn token functionality for the Token +Program tokens and the Token Extensions tokens. diff --git a/content/courses/token-extensions/token-extensions-metadata.mdx b/content/courses/token-extensions/token-extensions-metadata.mdx new file mode 100644 index 000000000..1143f0101 --- /dev/null +++ b/content/courses/token-extensions/token-extensions-metadata.mdx @@ -0,0 +1,1409 @@ +--- +title: Metadata and Metadata Pointer Extension +objectives: + - Explain how the metadata pointers and metadata extensions work on Token + Extensions Program Mints + - Create an NFT with metadata embedded in the mint account itself + - Create an NFT with the metadata pointer extension +description: "Include token metadata directly inside the token mint account." +--- + +## Summary + +- The `metadata pointer` extension associates a token mint directly to a + metadata account. This happens by storing the metadata account's address in + the mint. This metadata account address can be an external metadata account, + like Metaplex, or can be the mint itself if using the `metadata` extension. +- The `metadata` mint extension allows embedding of metadata directly into mint + accounts through the Token Extensions Program. This is always accompanied with + a self-referencing `metadata pointer`. This facilitates embedding + comprehensive token information at the minting stage. +- These extensions enhance the interoperability of tokens across different + applications and platforms by standardizing how metadata is associated and + accessed. +- Directly embedding or pointing to metadata can streamline transactions and + interactions by reducing the need for additional lookups or external calls. + +## Overview + +The Token Extensions Program streamlines metadata on Solana. Without the Token +Extensions Program, developers store metadata in metadata accounts using a +metadata onchain program; mainly `Metaplex`. However, this has some drawbacks. +For example the mint account to which the metadata is "attached" has no +awareness of the metadata account. To determine if an account has metadata, we +have to PDA the mint and the `Metaplex` program together and query the network +to see if a Metadata account exists. Additionally, to create and update this +metadata you have to use a secondary program (i.e. `Metaplex`). These processes +introduces vender lock in and increased complexity. Token Extension Programs's +Metadata extensions fix this by introducing two extensions: + +- `metadata-pointer` extension: Adds two simple fields in the mint account + itself: a publicKey pointer to the account that holds the metadata for the + token following the + [Token-Metadata Interface](https://github.com/solana-labs/solana-program-library/tree/master/token-metadata/interface), + and the authority to update this pointer. +- `metadata` extension: Adds the fields described in the + [Token-Metadata Interface](https://github.com/solana-labs/solana-program-library/tree/master/token-metadata/) + which allows us to store the metadata in the mint itself. + + + +The `metadata` extension must be used in conjunction with +the `metadata-pointer` extension which points back to the mint itself. + + + +### Metadata-Pointer extension: + +Since multiple metadata programs exist, a mint can have numerous accounts +claiming to describe the mint, making it complicated to know which one is the +mint's "official" metadata. To resolve this, the `metadata-pointer` extension +adds a `publicKey` field to the mint account called `metadataAddress`, which +points to the account that holds the metadata for this token. To avoid imitation +mints claiming to be a stablecoin, a client can now check whether the mint and +the metadata point to each other. + +The extension adds two new fields to the mint account to accomplish this: + +- `metadataAddress`: Holds the metadata account address for this token; it can + point to itself if you use the `metadata` extension. +- `authority`: The authority that can set the metadata address. + +The extension also introduces three new helper functions: + +- `createInitializeMetadataPointerInstruction` +- `createUpdateMetadataPointerInstruction` +- `getMetadataPointerState` + +The function `createInitializeMetadataPointerInstruction` will return the +instruction that will set the metadata address in the mint account. + +This function takes four parameters: + +- `mint`: the mint account that will be created +- `authority`: the authority that can set the metadata address +- `metadataAddress`: the account address that holds the metadata +- `programId`: the SPL Token program ID (in this case, it will be the Token + Extension program ID) + +```ts +function createInitializeMetadataPointerInstruction( + mint: PublicKey, + authority: PublicKey | null, + metadataAddress: PublicKey | null, + programId: PublicKey, +); +``` + +The `createUpdateMetadataPointerInstruction` function returns an instruction +that will update the mint account's metadata address. You can update the +metadata pointer at any point if you hold the authority. + +This function takes five parameters: + +- `mint`: the mint account that will be created. +- `authority`: the authority that can set the metadata address +- `metadataAddress`: the account address that holds the metadata +- `multiSigners`: the multi-signers that will sign the transaction +- `programId`: the SPL Token program ID (in this case, it will be the Token + Extension program ID) + +```ts +function createUpdateMetadataPointerInstruction( + mint: PublicKey, + authority: PublicKey, + metadataAddress: PublicKey | null, + multiSigners: (Signer | PublicKey)[] = [], + programId: PublicKey = TOKEN_2022_PROGRAM_ID, +); +``` + +The `getMetadataPointerState` function will return the `MetadataPointer` state +for the given `Mint` object. We can get this using the `getMint` function. + +```ts +function getMetadataPointerState(mint: Mint): Partial | null; +``` + +```ts +export interface MetadataPointer { + /** Optional authority that can set the metadata address */ + authority: PublicKey | null; + /** Optional Account Address that holds the metadata */ + metadataAddress: PublicKey | null; +} +``` + +#### Create NFT with metadata-pointer + +To create an NFT with the `metadata-pointer` extension, we need two new +accounts: the `mint` and the `metadataAccount`. + +The `mint` is usually a new `Keypair` created by `Keypair.generate()`. The +`metadataAccount` can be the `mint`'s `publicKey` if using the metadata mint +extension or another metadata account like from Metaplex. + +At this point, the `mint` is only a `Keypair`, but we need to save space for it +on the blockchain. All accounts on the Solana blockchain owe rent proportional +to the size of the account, and we need to know how big the mint account is in +bytes. We can use the `getMintLen` method from the `@solana/spl-token` library. +The metadata-pointer extension increases the size of the mint account by adding +two new fields: `metadataAddress` and `authority`. + +```ts +const mintLen = getMintLen([ExtensionType.MetadataPointer]); +const lamports = await connection.getMinimumBalanceForRentExemption(mintLen); +``` + +To create and initialize the `mint` with the metadata pointer, we need several +instructions in a particular order: + +1. Create the `mint` account, which reserves space on the blockchain with + `SystemProgram.createAccount` +2. Initialize the metadata pointer extension with + `createInitializeMetadataPointerInstruction` +3. Initialize the mint itself with `createInitializeMintInstruction` + +```ts +const createMintAccountInstructions = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + lamports, + newAccountPubkey: mint.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + space: mintLen, +}); + +const initMetadataPointerInstructions = + createInitializeMetadataPointerInstruction( + mint.publicKey, + payer.publicKey, + metadataAccount, + TOKEN_2022_PROGRAM_ID, + ); + +const initMintInstructions = createInitializeMintInstruction( + mint.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +To create the NFT, add the instructions to a transaction and send it to the +Solana network: + +```ts +const transaction = new Transaction().add( + createMintAccountInstructions, + initMetadataPointerInstructions, + initMintInstructions, +); +const sig = await sendAndConfirmTransaction(connection, transaction, [ + payer, + mint, +]); +``` + +### Metadata extension: + +The `metadata` extension is an exciting addition to the Token Extensions +Program. This extension allows us to store the metadata directly _in_ the mint +itself! This eliminates the need for a separate account, greatly simplifying the +handling of metadata. + + + +The `metadata` extension works directly with the +`metadata-pointer` extension. During the mint creation, you should also add the +`metadata-pointer` extension, pointed at the mint itself. Check out the +[Solana Token Extensions Program docs](https://spl.solana.com/token-2022/extensions#metadata) + + + +The added fields and functions in the metadata extension follow the +[Token-Metadata Interface](https://github.com/solana-labs/solana-program-library/tree/master/token-metadata/interface) + +When a mint is initialized with the metadata extension, it will store these +extra fields: + +```rust +type Pubkey = [u8; 32]; +type OptionalNonZeroPubkey = Pubkey; // if all zeroes, interpreted as `None` + +pub struct TokenMetadata { + /// The authority that can sign to update the metadata + pub update_authority: OptionalNonZeroPubkey, + /// The associated mint, used to counter spoofing to be sure that metadata + /// belongs to a particular mint + pub mint: Pubkey, + /// The longer name of the token + pub name: String, + /// The shortened symbol for the token + pub symbol: String, + /// The URI pointing to richer metadata + pub uri: String, + /// Any additional metadata about the token as key-value pairs. The program + /// must avoid storing the same key twice. + pub additional_metadata: Vec<(String, String)>, +} +``` + +With these added fields, the `@solana/spl-token-metadata` library has been +updated with the following functions to help out: + +- `createInitializeInstruction` +- `createUpdateFieldInstruction` +- `createRemoveKeyInstruction` +- `createUpdateAuthorityInstruction` +- `createEmitInstruction` +- `pack` +- `unpack` + +Additionally, the @solana/spl-token library introduces a new function and two +constants: + +- `getTokenMetadata` +- `LENGTH_SIZE`: a constant number of bytes of the length of the data +- `TYPE_SIZE`: a constant number of bytes of the type of the data + +The function `createInitializeInstruction` initializes the metadata in the +account and sets the primary metadata fields (name, symbol, URI). The function +then returns an instruction that will set the metadata fields in the mint +account. + +This function takes eight parameters: + +- `mint`: the mint account that will be initialize +- `metadata`: the metadata account that will be created +- `mintAuthority`: the authority that can mint tokens +- `updateAuthority`: the authority that can sign to update the metadata +- `name`: the longer name of the token +- `symbol`: the shortened symbol for the token, also known as the ticker +- `uri`: the token URI pointing to richer metadata +- `programId`: the SPL Token program ID (in this case it will be the Token + Extension program ID) + +```ts +export interface InitializeInstructionArgs { + programId: PublicKey; + metadata: PublicKey; + updateAuthority: PublicKey; + mint: PublicKey; + mintAuthority: PublicKey; + name: string; + symbol: string; + uri: string; +} + +export function createInitializeInstruction( + args: InitializeInstructionArgs, +): TransactionInstruction; +``` + +The function `createUpdateFieldInstruction` returns the instruction that creates +or updates a field in a token-metadata account. + +This function takes five parameters: + +- `metadata`: the metadata account address. +- `updateAuthority`: the authority that can sign to update the metadata +- `field`: the field that we want to update, this is either one of the built in + `Field`s or a custom field stored in the `additional_metadata` field +- `value`: the updated value of the field +- `programId`: the SPL Token program Id (in this case it will be the Token + Extension program Id) + +```ts +export enum Field { + Name, + Symbol, + Uri, +} + +export interface UpdateFieldInstruction { + programId: PublicKey; + metadata: PublicKey; + updateAuthority: PublicKey; + field: Field | string; + value: string; +} + +export function createUpdateFieldInstruction( + args: UpdateFieldInstruction, +): TransactionInstruction; +``` + + + +If the metadata you are updating requires more space than +the initial allocated space, you'll have to pair it with a `system.transfer` to +have enough rent for the `createUpdateFieldInstruction` to realloc with. You can +get the extra space needed with `getAdditionalRentForUpdatedMetadata`. Or if +you're calling this update as a standalone, you can use the +`tokenMetadataUpdateFieldWithRentTransfer` helper to do all of this at +once. + + + +The function `createRemoveKeyInstruction` returns the instruction that removes +the `additional_metadata` field from a token-metadata account. + +This function takes five parameters: + +- `metadata`: the metadata account address +- `updateAuthority`: the authority that can sign to update the metadata +- `field`: the field that we want to remove +- `programId`: the SPL Token program ID (in this case it will be the Token + Extension program ID) +- `idempotent`: When true, instruction will not error if the key does not exist + +```ts +export interface RemoveKeyInstructionArgs { + programId: PublicKey; + metadata: PublicKey; + updateAuthority: PublicKey; + key: string; + idempotent: boolean; +} + +export function createRemoveKeyInstruction( + args: RemoveKeyInstructionArgs, +): TransactionInstruction; +``` + +The function `createUpdateAuthorityInstruction` returns the instruction that +updates the authority of a token-metadata account. + +This function takes four parameters: + +- `metadata`: the metadata account address +- `oldAuthority`: the current authority that can sign to update the metadata +- `newAuthority`: the new authority that can sign to update the metadata +- `programId`: the SPL Token program ID (in this case it will be the Token + Extension program ID) + +```ts +export interface UpdateAuthorityInstructionArgs { + programId: PublicKey; + metadata: PublicKey; + oldAuthority: PublicKey; + newAuthority: PublicKey | null; +} + +export function createUpdateAuthorityInstruction( + args: UpdateAuthorityInstructionArgs, +): TransactionInstruction; +``` + +The function `createEmitInstruction` "emits" or logs out token-metadata in the +expected TokenMetadata state format. This is a required function for metadata +programs that want to follow the TokenMetadata interface. The emit instruction +allows indexers and other offchain users to call to get metadata. This also +allows custom metadata programs to store +[metadata in a different format while maintaining compatibility with the Interface standards](/developers/guides/token-extensions/metadata-pointer#metadata-interface-instructions). + +This function takes four parameters: + +- `metadata`: the metadata account address +- `programId`: the SPL Token program ID (in this case it will be the Token + Extension program ID) +- `start`: _Optional_ the start the metadata +- `end`: _Optional_ the end the metadata + +```ts +export interface EmitInstructionArgs { + programId: PublicKey; + metadata: PublicKey; + start?: bigint; + end?: bigint; +} + +export function createEmitInstruction( + args: EmitInstructionArgs, +): TransactionInstruction; +``` + +The `pack` function encodes metadata into a byte array, while its counterpart, +`unpack`, decodes metadata from a byte array. These operations are essential for +determining the metadata's byte size, crucial for allocating adequate storage +space. + +```ts +export interface TokenMetadata { + // The authority that can sign to update the metadata + updateAuthority?: PublicKey; + // The associated mint, used to counter spoofing to be sure that metadata belongs to a particular mint + mint: PublicKey; + // The longer name of the token + name: string; + // The shortened symbol for the token + symbol: string; + // The URI pointing to richer metadata + uri: string; + // Any additional metadata about the token as key-value pairs + additionalMetadata: [string, string][]; +} + +export const pack = (meta: TokenMetadata): Uint8Array + +export function unpack(buffer: Buffer | Uint8Array): TokenMetadata +``` + +The function `getTokenMetadata` returns the metadata for the given mint. + +It takes four parameters: + +- `connection`: Connection to use +- `address`: mint account +- `commitment`: desired level of commitment for querying the state +- `programId`: SPL Token program account (in this case it will be the Token + Extension program ID) + +```ts +export async function getTokenMetadata( + connection: Connection, + address: PublicKey, + commitment?: Commitment, + programId = TOKEN_2022_PROGRAM_ID, +): Promise; +``` + +#### Create NFT with metadata extension + +Creating an NFT with the metadata extension is just like creating one with the +metadata-pointer with a few extra steps: + +1. Gather our needed accounts +2. Find/decide on the needed size of our metadata +3. Create the `mint` account +4. Initialize the pointer +5. Initialize the mint +6. Initialize the metadata in the mint account +7. Add any additional custom fields if needed + +First, the `mint` will be a Keypair, usually generated using +`Keypair.generate()`. Then, we must decide what metadata to include and +calculate the total size and cost. + +A mint account's size with the metadata and metadata-pointer extensions +incorporate the following: + +1. the basic metadata fields: name, symbol, and URI +2. the additional custom fields we want to store as a metadata +3. the update authority that can change the metadata in the future +4. the `LENGTH_SIZE` and `TYPE_SIZE` constants from the `@solana/spl-token` + library - these are sizes associated with mint extensions that are usually + added with the call `getMintLen`, but since the metadata extension is + variable length, they need to be added manually +5. the metadata pointer data (this will be the mint's address and is done for + consistency) + + + +There is no need to allocate more space than what is +necessary if you're anticipating more metadata. The +`createUpdateFieldInstruction` will automatically reallocate space! However, +you'll have to add another `system.transfer` transaction to make sure the mint +account has enough rent. + +To determine all of this programmatically, we use the `getMintLen` and `pack` +functions from the `@solana/spl-token` library: + +```ts +const metadata: TokenMetadata = { + mint: mint.publicKey, + name: tokenName, + symbol: tokenSymbol, + uri: tokenUri, + additionalMetadata: [["customField", "customValue"]], +}; + +const mintAndPointerLen = getMintLen([ExtensionType.MetadataPointer]); // Metadata extension is variable length, so we calculate it below +const metadataLen = TYPE_SIZE + LENGTH_SIZE + pack(metadata).length; +const totalLen = mintLen + mintAndPointerLen; +const lamports = await connection.getMinimumBalanceForRentExemption(totalLen); +``` + + + + + +To actually create and initialize the `mint` with the metadata and metadata +pointer, we need several instructions in a particular order: + +1. Create the `mint` account which reserves space on the blockchain with + `SystemProgram.createAccount` +2. Initialize the metadata pointer extension with + `createInitializeMetadataPointerInstruction` +3. Initialize the mint itself with `createInitializeMintInstruction` +4. Initialize the metadata with `createInitializeInstruction` (this ONLY sets + the basic metadata fields) +5. Optional: Set the custom fields with `createUpdateFieldInstruction` (one + field per call) + +```ts +const createMintAccountInstructions = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + lamports, + newAccountPubkey: mint.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + space: totalLen, +}); + +const initMetadataPointerInstructions = + createInitializeMetadataPointerInstruction( + mint.publicKey, + payer.publicKey, + mint.publicKey, // we will point to the mint it self as the metadata account + TOKEN_2022_PROGRAM_ID, + ); + +const initMintInstructions = createInitializeMintInstruction( + mint.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); + +const initMetadataInstruction = createInitializeInstruction({ + programId: TOKEN_2022_PROGRAM_ID, + mint: mint.publicKey, + metadata: mint.publicKey, + name: metadata.name, + symbol: metadata.symbol, + uri: metadata.uri, + mintAuthority: payer.publicKey, + updateAuthority: payer.publicKey, +}); + +const updateMetadataFieldInstructions = createUpdateFieldInstruction({ + metadata: mint.publicKey, + updateAuthority: payer.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + field: metadata.additionalMetadata[0][0], + value: metadata.additionalMetadata[0][1], +}); +``` + +Wrap all of these instructions in a transaction to create the embedded NFT: + +```ts +const transaction = new Transaction().add( + createMintAccountInstructions, + initMetadataPointerInstructions, + initMintInstructions, + initMetadataInstruction, + updateMetadataFieldInstructions, // if you want to add any custom field +); +const signature = await sendAndConfirmTransaction(connection, transaction, [ + payer, + mint, +]); +``` + +Again, the order here matters. + + + + The `createUpdateFieldInstruction` updates only one field +at a time. If you want to have more than one custom field, you will have to call +this method multiple times. Additionally, you can use the same method to update +the basic metadata fields as well: + +```ts +const updateMetadataFieldInstructions = createUpdateFieldInstruction({ + metadata: mint.publicKey, + updateAuthority: payer.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + field: "name", // Field | string + value: "new name", +}); +``` + + + + + +## Lab + +Now it is time to practice what we have learned so far. In this lab, we'll +create a script that will illustrate how to create an NFT with the `metadata` +and `metadata pointer` extensions. + +### 0. Getting started + +Let's go ahead and clone our starter code: + +```bash +git clone https://github.com/Unboxed-Software/solana-lab-token22-metadata.git +cd solana-lab-token22-metadata +git checkout starter +npm install +``` + +Let's take a look at what's been provided in the `starter` branch. + +Along with the NodeJS project being initialized with all of the needed +dependencies, two other files have been provided in the `src/` directory. + +- `cat.png` +- `helpers.ts` +- `index.ts` + +**`cat.png`** is the image we'll use for the NFT. Feel free to replace it with +your own image. + + + +we are using Irys on devnet to upload files, this is capped +at 100 KiB. + + + +**`helpers.ts`** file provides us with a useful helper function +`uploadOffChainMetadata`. + +`uploadOffChainMetadata` is a helper to store the offchain metadata on Arweave +using Irys (formerly Bundlr). In this lab we will be more focused on the Token +Extensions Program interaction, so this uploader function is provided. It is +important to note that an NFT or any offchain metadata can be stored anywhere +with any storage provider like [NFT.storage](https://nft.storage/), Solana's +native [ShadowDrive](https://www.shdwdrive.com/), or +[Irys (formerly Bundlr)](https://irys.xyz/). At the end of the day, all you need +is a url to the hosted metadata json file. + +This helper has some exported interfaces. These will clean up our functions as +we make them. + +```ts +export interface CreateNFTInputs { + payer: Keypair; + connection: Connection; + tokenName: string; + tokenSymbol: string; + tokenUri: string; + tokenAdditionalMetadata?: Record; +} + +export interface UploadOffChainMetadataInputs { + tokenName: string; + tokenSymbol: string; + tokenDescription: string; + tokenExternalUrl: string; + tokenAdditionalMetadata?: Record; + imagePath: string; + metadataFileName: string; +} +``` + +**`index.ts`** is where we'll add our code. Right now, the code sets up a +`connection` and initializes a keypair for us to use. + +The keypair `payer` will be responsible for every payment we need throughout the +whole process. `payer` will also hold all the authorities, like the mint +authority, mint freeze authority, etc. While it's possible to use a distinct +keypair for the authorities, for simplicity's sake, we'll continue using +`payer`. + +Lastly, this lab will all be done on devnet. This is because we are using Irys +to upload metadata to Arweave - the requires a devnet or mainnet connection. If +you are running into airdropping problems: + +- Add the `keypairPath` parameter to `initializeKeypair` - path can be gotten by + running `solana config get` in your terminal +- Get the address of your keypair by running `solana address` in your terminal +- Copy the address and airdrop some devnet sol from + [faucet.solana](https://faucet.solana.com/). + +### 1. Uploading the offchain metadata + +In this section we will decide on our NFT metadata and upload our files to +NFT.Storage using the helper functions provided in the starting code. + +To upload our offchain metadata, we need to first prepare an image that will +represent our NFT. We've provided `cat.png`, but feel free to replace it with +your own. Most image types are supported by most wallets. (Again devnet Irys +allows up to 100KiB per file) + +Next, let's decide on what metadata our NFT will have. The fields we are +deciding on are `name`, `description`, `symbol`, `externalUrl`, and some +`attributes` (additional metadata). We'll provide some cat adjacent metadata, +but feel free to make up your own. + +- `name`: Cat NFT +- `description` = This is a cat +- `symbol` = EMB +- `externalUrl` = https://solana.com/ +- `attributes` = `{ species: 'Cat' breed: 'Cool' }` + +Lastly we just need to format all of this data and send it to our helper +function `uploadOffChainMetadata` to get the uploaded metadata uri. + +When we put all of this together, the `index.ts` file will look as follows: + +```ts +import { Connection } from "@solana/web3.js"; +import { initializeKeypair } from "@solana-developers/helpers"; +import { uploadOffChainMetadata } from "./helpers"; +import dotenv from "dotenv"; +dotenv.config(); + +const connection = new Connection(clusterApiUrl("devnet"), "finalized"); +const payer = await initializeKeypair(connection, { + keypairPath: "your/path/to/keypair.json", +}); + +const imagePath = "src/cat.png"; +const metadataPath = "src/temp.json"; +const tokenName = "Cat NFT"; +const tokenDescription = "This is a cat"; +const tokenSymbol = "EMB"; +const tokenExternalUrl = "https://solana.com/"; +const tokenAdditionalMetadata = { + species: "Cat", + breed: "Cool", +}; + +const tokenUri = await uploadOffChainMetadata( + { + tokenName, + tokenDescription, + tokenSymbol, + imagePath, + metadataPath, + tokenExternalUrl, + tokenAdditionalMetadata, + }, + payer, +); + +// You can log the URI here and run the code to test it +console.log("Token URI:", tokenUri); +``` + +Now run `npm run start` in your terminal and test your code. You should see the +URI logged once the uploading is done. If you visit the link you should see a +JSON object that holds all of our offchain metadata. + +### 2. Create NFT function + +Creating an NFT involves multiple instructions. As a best practice when writing +scripts that engage with the Solana network, it is best to consolidate all of +these instructions in one transaction due to the atomic nature of transactions. +This ensures either the successful execution of all instructions or a complete +rollback in case of errors. That being said, we're going to make a new function +`createNFTWithEmbeddedMetadata` in a new file called +`src/nft-with-embedded-metadata.ts`. + +This function will create an NFT by doing the following: + +1. Create the metadata object +2. Allocate the mint +3. Initialize the metadata-pointer making sure that it points to the mint itself +4. Initialize the mint +5. Initialize the metadata inside the mint (that will set name, symbol, and uri + for the mint) +6. Set the additional metadata in the mint +7. Create the associated token account and mint the NFT to it and remove the + mint authority +8. Put all of that in one transaction and send it to the network +9. Fetch and print the token account, the mint account, an the metadata to make + sure that it is working correctly + +This new function will take `CreateNFTInputs` defined in the `helpers.ts` file. + +As a first step, let's create a new file `src/nft-with-embedded-metadata.ts` and +paste the following: + +```typescript +import { + Keypair, + sendAndConfirmTransaction, + SystemProgram, + Transaction, +} from "@solana/web3.js"; +import { CreateNFTInputs } from "./helpers"; +import { + createInitializeInstruction, + createUpdateFieldInstruction, + pack, + TokenMetadata, +} from "@solana/spl-token-metadata"; +import { + AuthorityType, + createAssociatedTokenAccountInstruction, + createInitializeMetadataPointerInstruction, + createInitializeMintInstruction, + createMintToCheckedInstruction, + createSetAuthorityInstruction, + ExtensionType, + getAccount, + getAssociatedTokenAddress, + getMint, + getMintLen, + getTokenMetadata, + LENGTH_SIZE, + TOKEN_2022_PROGRAM_ID, + TYPE_SIZE, +} from "@solana/spl-token"; + +export default async function createNFTWithEmbeddedMetadata( + inputs: CreateNFTInputs, +) { + const { + payer, + connection, + tokenName, + tokenSymbol, + tokenUri, + tokenAdditionalMetadata, + } = inputs; + + // 0. Setup Mint + // 1. Create the metadata object + // 2. Allocate the mint + // 3. Initialize the metadata-pointer making sure that it points to the mint itself + // 4. Initialize the mint + // 5. Initialize the metadata inside the mint (that will set name, symbol, and uri for the mint) + // 6. Set the additional metadata in the mint + // 7. Create the associated token account and mint the NFT to it and remove the mint authority + // 8. Put all of that in one transaction and send it to the network + // 9. fetch and print the token account, the mint account, an the metadata to make sure that it is working correctly +} +``` + +Now let's fill in the gaps one by one. + +For step 0 we create the mint's keypair, make sure our decimals for our NFT is 0 +and the supply is 1. + +```typescript +// 0. Setup Mint +const mint = Keypair.generate(); +const decimals = 0; // NFT should have 0 decimals +const supply = 1; // NFTs should have a supply of 1 +``` + +Now let's construct the `TokenMetadata` object interfaced from +`@solana/spl-token-metadata`, and pass it all of our inputs. + +Note we have to do some conversion of our `tokenAdditionalMetadata`: + +```typescript +// 1. Create the metadata object +const metadata: TokenMetadata = { + mint: mint.publicKey, + name: tokenName, + symbol: tokenSymbol, + uri: tokenUri, + // additionalMetadata: [['customField', 'customValue']], + additionalMetadata: Object.entries(tokenAdditionalMetadata || []).map( + ([key, value]) => [key, value], + ), +}; +``` + +Now we can create our first onchain instruction using +`SystemProgram.createAccount`. To do this we need to know the size of our NFT's +mint account. Remember we're using two extensions for our NFT, +`metadata pointer` and the `metadata` extensions. Additionally, since the +metadata is 'embedded' using the metadata extension, it's variable length. So we +use a combination of `getMintLen`, `pack` and some hardcoded amounts to get our +final length. + +Then we call `getMinimumBalanceForRentExemption` to see how many lamports it +costs to spin up the account. + +Finally, we put everything into the `SystemProgram.createAccount` function to +get our first instruction: + +```typescript +// 2. Allocate the mint +const mintLen = getMintLen([ExtensionType.MetadataPointer]); +const metadataLen = TYPE_SIZE + LENGTH_SIZE + pack(metadata).length; +const lamports = await connection.getMinimumBalanceForRentExemption( + mintLen + metadataLen, +); + +const createMintAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + lamports, + newAccountPubkey: mint.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + space: mintLen, +}); +``` + + + +The more information in the metadata, the more it +costs. + + + +Step 3 has us initializing the `metadata pointer` extension. Let's do that by +calling the `createInitializeMetadataPointerInstruction` function with the +metadata account point to our mint. + +```typescript +// 3. Initialize the metadata-pointer making sure that it points to the mint itself +const initMetadataPointerInstruction = + createInitializeMetadataPointerInstruction( + mint.publicKey, + payer.publicKey, + mint.publicKey, // Metadata account - points to itself + TOKEN_2022_PROGRAM_ID, + ); +``` + +Next is the `createInitializeMintInstruction`. Note that we do this before we +initialize the metadata. + +```typescript +// 4. Initialize the mint +const initMintInstruction = createInitializeMintInstruction( + mint.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, +); +``` + +Now we can initialize our metadata with the `createInitializeInstruction`. We +pass in all of our NFT metadata except for our `tokenAdditionalMetadata`, which +is covered in our next step. + +```typescript +// 5. Initialize the metadata inside the mint +const initMetadataInstruction = createInitializeInstruction({ + programId: TOKEN_2022_PROGRAM_ID, + mint: mint.publicKey, + metadata: mint.publicKey, + name: metadata.name, + symbol: metadata.symbol, + uri: metadata.uri, + mintAuthority: payer.publicKey, + updateAuthority: payer.publicKey, +}); +``` + +In our NFT, we have `tokenAdditionalMetadata`, and as we saw in the previous +step this cannot be set using the `createInitializeInstruction`. So we have to +make an instruction to set each new additional field. We do this by calling +`createUpdateFieldInstruction` for each of our entries in +`tokenAdditionalMetadata`. + +```typescript +// 6. Set the additional metadata in the mint +const setExtraMetadataInstructions = []; +for (const attributes of Object.entries(tokenAdditionalMetadata || [])) { + setExtraMetadataInstructions.push( + createUpdateFieldInstruction({ + updateAuthority: payer.publicKey, + metadata: mint.publicKey, + field: attributes[0], + value: attributes[1], + programId: TOKEN_2022_PROGRAM_ID, + }), + ); +} +``` + +Now let's mint this NFT to ourselves, and then revoke the mint authority. This +will make it a true NFT where there will ever only be one. We accomplish this +with the following functions: + +- `createAssociatedTokenAccountInstruction` +- `createMintToCheckedInstruction` +- `createSetAuthorityInstruction` + +```typescript +// 7. Create the associated token account and mint the NFT to it and remove the mint authority +const ata = await getAssociatedTokenAddress( + mint.publicKey, + payer.publicKey, + false, + TOKEN_2022_PROGRAM_ID, +); +const createATAInstruction = createAssociatedTokenAccountInstruction( + payer.publicKey, + ata, + payer.publicKey, + mint.publicKey, + TOKEN_2022_PROGRAM_ID, +); + +const mintInstruction = createMintToCheckedInstruction( + mint.publicKey, + ata, + payer.publicKey, + supply, // NFTs should have a supply of one + decimals, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +// NFTs should have no mint authority so no one can mint any more of the same NFT +const setMintTokenAuthorityInstruction = createSetAuthorityInstruction( + mint.publicKey, + payer.publicKey, + AuthorityType.MintTokens, + null, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +Now, let's bundle all of our transactions together and send it out to Solana. It +is very important to note that order matters here. + +```typescript +// 8. Put all of that in one transaction and send it to the network. +const transaction = new Transaction().add( + createMintAccountInstruction, + initMetadataPointerInstruction, + initMintInstruction, + initMetadataInstruction, + ...setExtraMetadataInstructions, + createATAInstruction, + mintInstruction, + setMintTokenAuthorityInstruction, +); +const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [payer, mint], +); +``` + +Lastly, let's fetch and print out all of the information about our NFT so we +know everything worked. + +```typescript +// 9. fetch and print the token account, the mint account, an the metadata to make sure that it is working correctly. +// Fetching the account +const accountDetails = await getAccount( + connection, + ata, + "finalized", + TOKEN_2022_PROGRAM_ID, +); +console.log("Associate Token Account =====>", accountDetails); + +// Fetching the mint +const mintDetails = await getMint( + connection, + mint.publicKey, + undefined, + TOKEN_2022_PROGRAM_ID, +); +console.log("Mint =====>", mintDetails); + +// Since the mint stores the metadata in itself, we can just get it like this +const onChainMetadata = await getTokenMetadata(connection, mint.publicKey); +// Now we can see the metadata coming with the mint +console.log("onchain metadata =====>", onChainMetadata); + +// And we can even get the offchain json now +if (onChainMetadata?.uri) { + try { + const response = await fetch(onChainMetadata.uri); + const offChainMetadata = await response.json(); + console.log("Mint offchain metadata =====>", offChainMetadata); + } catch (error) { + console.error("Error fetching or parsing offchain metadata:", error); + } +} +``` + +Putting it all together you get the following in +`src/nft-with-embedded-metadata.ts`: + +```ts +import { + Keypair, + sendAndConfirmTransaction, + SystemProgram, + Transaction, +} from "@solana/web3.js"; +import { CreateNFTInputs } from "./helpers"; +import { + createInitializeInstruction, + createUpdateFieldInstruction, + pack, + TokenMetadata, +} from "@solana/spl-token-metadata"; +import { + AuthorityType, + createAssociatedTokenAccountInstruction, + createInitializeMetadataPointerInstruction, + createInitializeMintInstruction, + createMintToCheckedInstruction, + createSetAuthorityInstruction, + ExtensionType, + getAccount, + getAssociatedTokenAddress, + getMint, + getMintLen, + getTokenMetadata, + LENGTH_SIZE, + TOKEN_2022_PROGRAM_ID, + TYPE_SIZE, +} from "@solana/spl-token"; + +export default async function createNFTWithEmbeddedMetadata( + inputs: CreateNFTInputs, +) { + const { + payer, + connection, + tokenName, + tokenSymbol, + tokenUri, + tokenAdditionalMetadata, + } = inputs; + + // 0. Setup Mint + const mint = Keypair.generate(); + const decimals = 0; // NFT should have 0 decimals + const supply = 1; // NFTs should have a supply of one + + // 1. Create the metadata object + const metadata: TokenMetadata = { + mint: mint.publicKey, + name: tokenName, + symbol: tokenSymbol, + uri: tokenUri, + // additionalMetadata: [['customField', 'customValue']], + additionalMetadata: Object.entries(tokenAdditionalMetadata || []).map( + ([key, value]) => [key, value], + ), + }; + + // 2. Allocate the mint + const mintLen = getMintLen([ExtensionType.MetadataPointer]); + const metadataLen = TYPE_SIZE + LENGTH_SIZE + pack(metadata).length; + const lamports = await connection.getMinimumBalanceForRentExemption( + mintLen + metadataLen, + ); + + const createMintAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + lamports, + newAccountPubkey: mint.publicKey, + programId: TOKEN_2022_PROGRAM_ID, + space: mintLen, + }); + + // 3. Initialize the metadata-pointer making sure that it points to the mint itself + const initMetadataPointerInstruction = + createInitializeMetadataPointerInstruction( + mint.publicKey, + payer.publicKey, + mint.publicKey, // Metadata account - points to itself + TOKEN_2022_PROGRAM_ID, + ); + + // 4. Initialize the mint + const initMintInstruction = createInitializeMintInstruction( + mint.publicKey, + decimals, + payer.publicKey, + payer.publicKey, + TOKEN_2022_PROGRAM_ID, + ); + + // 5. Initialize the metadata inside the mint + const initMetadataInstruction = createInitializeInstruction({ + programId: TOKEN_2022_PROGRAM_ID, + mint: mint.publicKey, + metadata: mint.publicKey, + name: metadata.name, + symbol: metadata.symbol, + uri: metadata.uri, + mintAuthority: payer.publicKey, + updateAuthority: payer.publicKey, + }); + + // 6. Set the additional metadata in the mint + const setExtraMetadataInstructions = []; + for (const attributes of Object.entries(tokenAdditionalMetadata || [])) { + setExtraMetadataInstructions.push( + createUpdateFieldInstruction({ + updateAuthority: payer.publicKey, + metadata: mint.publicKey, + field: attributes[0], + value: attributes[1], + programId: TOKEN_2022_PROGRAM_ID, + }), + ); + } + + // 7. Create the associated token account and mint the NFT to it and remove the mint authority + const ata = await getAssociatedTokenAddress( + mint.publicKey, + payer.publicKey, + false, + TOKEN_2022_PROGRAM_ID, + ); + const createATAInstruction = createAssociatedTokenAccountInstruction( + payer.publicKey, + ata, + payer.publicKey, + mint.publicKey, + TOKEN_2022_PROGRAM_ID, + ); + + const mintInstruction = createMintToCheckedInstruction( + mint.publicKey, + ata, + payer.publicKey, + supply, // NFTs should have a supply of one + decimals, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + // NFTs should have no mint authority so no one can mint any more of the same NFT + const setMintTokenAuthorityInstruction = createSetAuthorityInstruction( + mint.publicKey, + payer.publicKey, + AuthorityType.MintTokens, + null, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + + // 8. Put all of that in one transaction and send it to the network. + const transaction = new Transaction().add( + createMintAccountInstruction, + initMetadataPointerInstruction, + initMintInstruction, + initMetadataInstruction, + ...setExtraMetadataInstructions, // Destructuring extra metadata fields + createATAInstruction, + mintInstruction, + setMintTokenAuthorityInstruction, + ); + const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [payer, mint], + ); + + // 9. fetch and print the token account, the mint account, an the metadata to make sure that it is working correctly. + // Fetching the account + const accountDetails = await getAccount( + connection, + ata, + "finalized", + TOKEN_2022_PROGRAM_ID, + ); + console.log("Associate Token Account =====>", accountDetails); + + // Fetching the mint + const mintDetails = await getMint( + connection, + mint.publicKey, + undefined, + TOKEN_2022_PROGRAM_ID, + ); + console.log("Mint =====>", mintDetails); + + // Since the mint stores the metadata in itself, we can just get it like this + const onChainMetadata = await getTokenMetadata(connection, mint.publicKey); + // Now we can see the metadata coming with the mint + console.log("onchain metadata =====>", onChainMetadata); + + // And we can even get the offchain JSON now + if (onChainMetadata?.uri) { + try { + const response = await fetch(onChainMetadata.uri); + const offChainMetadata = await response.json(); + console.log("Mint offchain metadata =====>", offChainMetadata); + } catch (error) { + console.error("Error fetching or parsing offchain metadata:", error); + } + } +} +``` + +### 3. Call Create NFT Function + +Let's put everything together in `src/index.ts`. + +Go back to `src/index.ts`, and import the function +`createNFTWithEmbeddedMetadata` from the file we just created. + +```ts +import createNFTWithEmbeddedMetadata from "./nft-with-embedded-metadata"; +``` + +Then call it at the end of the main function and pass the required parameters. + +```ts +await createNFTWithEmbeddedMetadata({ + payer, + connection, + tokenName, + tokenSymbol, + tokenUri, +}); +``` + +`src/index.ts` file should look like this: + +```ts +import { Connection } from "@solana/web3.js"; +import { initializeKeypair, uploadOffChainMetadata } from "./helpers"; +import createNFTWithEmbeddedMetadata from "./nft-with-embedded-metadata"; +import dotenv from "dotenv"; +dotenv.config(); + +const connection = new Connection("http://127.0.0.1:8899", "finalized"); +const payer = await initializeKeypair(connection); + +const imagePath = "NFT.png"; +const tokenName = "NFT Name"; +const tokenDescription = "This is a cool Token Extension NFT"; +const tokenSymbol = "TTT"; + +const tokenUri = await uploadOffChainMetadata({ + connection, + payer, + tokenName, + tokenDescription, + tokenSymbol, + imagePath, +}); + +// You can log the URI here and run the code to test it +console.log("Token URI:", tokenUri); + +await createNFTWithEmbeddedMetadata({ + payer, + connection, + tokenName, + tokenSymbol, + tokenUri, +}); +``` + +Run the program one more time to see your NFT and metadata. + +```bash +npm run start +``` + +You did it! You've made an NFT using the `metadata` and `metadata pointer` +extensions. + +If you run into any problems, check out the +[solution](https://github.com/Unboxed-Software/solana-lab-token22-metadata/tree/solution). + +## Challenge + +Taking what you've learned here, go and create your own NFT or SFT. diff --git a/content/courses/token-extensions/token-extensions-onchain.mdx b/content/courses/token-extensions/token-extensions-onchain.mdx new file mode 100644 index 000000000..179fa6961 --- /dev/null +++ b/content/courses/token-extensions/token-extensions-onchain.mdx @@ -0,0 +1,1932 @@ +--- +title: Use Token Extensions in onchain programs +objectives: + - Accept both token programs' accounts, and mints in your program + - Explain the differences between the Token Program and Token Extension + programs + - Explain how to use Anchor Interfaces +description: "Use token extensions in onchain programs." +--- + +## Summary + +- The `Token Extensions Program` is a superset of the `Token Program` with a + different program id +- `token_program` is an Anchor account constraint allowing you to verify an + account belongs to a specific token program +- Anchor introduced the concept of Interfaces to easily allow for programs to + support interaction with both `Token Program` and `Token Extensions Program` + +## Overview + +The `Token Extensions Program` is a program on Solana mainnet that provides +additional functionality to Solana tokens and mints. The +`Token Extensions Program` is a superset of the `Token Program`. Essentially it +is a byte for byte recreation with additional functionality tagged on at the +end. However they are sill separate programs. With two types of Token Programs, +we must anticipate being sent the program type in instructions. + +In this lesson, you'll learn how to design your program to accept +`Token Program` and `Token Extensions Program` accounts using Anchor. You will +also learn how to interact with `Token Extensions Program` accounts, identifying +which token program an account belongs to, and some differences between +`Token Program` and the `Token Extensions Program` onchain. + +### Difference between legacy Token Program and Token Extensions Program + +We must clarify that the `Token Extensions Program` is separate from the +original `Token Program`. The `Token Extensions Program` is a superset of the +original `Token Program`, meaning all the instructions and functionality in the +original `Token Program` come with the `Token Extensions Program`. + +Previously, one primary program (the `Token Program`) was in charge of creating +accounts. As more and more use cases came to Solana, there was a need for new +token functionality. Historically, only way to add new token functionality was +to create a new type of token. A new token required its own program, and any +wallet or client that wanted to use this new token had to add specific logic to +support it. Fortunately the headache of supporting different types of tokens, +made this option not very popular. However, new functionality was still very +much needed, and the `Token Extensions Program` was built to address this. + +As mentioned before, the `Token Extensions Program` is a strict superset of the +original token program and comes with all the previous functionality. The +`Token Extensions Program` development team chose this approach to ensure +minimal disruption to users, wallets, and dApps while adding new functionality. +The `Token Extensions Program` supports the same instruction set as the Token +program and is the same byte-for-byte throughout the very last instruction, +allowing existing programs to support `Token Extensions` out of the box. However +this does not mean that `Token Extensions Program` tokens and `Token Program` +tokens are interoperable - they are not. We'll have to handle each separately. + +### How to determine which program owns a particular token + +With Anchor managing the two different token programs is pretty straight +forward. Now when we work with tokens within our programs we'll check the +`token_program` constraint. + +The two token programs `ID` are as follows: + +```rust +use spl_token::ID; // Token Program +use anchor_spl::token_2022::ID; // Token Extensions Program +``` + +To check for the regular `Token Program` you'd use the following: + +```rust +use spl_token::ID; + +// verify given token/mint accounts belong to the spl-token program +#[account( + mint::token_program = ID, +)] +pub token_a_mint: Box>, +#[account( + token::token_program = ID, +)] +pub token_a_account: Box>, +``` + +You can do the same thing for the `Token Extensions Program`, just with a +different ID. + +```rust +use anchor_spl::token_2022::ID; + +// verify given token/mint accounts belong to the Token Extension program +#[account( + mint::token_program = ID, +)] +pub token_a_mint: Box>, +#[account( + token::token_program = ID, +)] +pub token_a_account: Box>, +``` + +If a client passed in the wrong token program account, the instruction would +fail. However, this raises a problem, what if we want to support both +`Token Program` and `Token Extensions Program`? If we hardcode the check for the +program `ID`, we'd need twice as many instructions. Fortunately, you can verify +that the token accounts passed into your program belong to a particular token +program. You would do this similarly to the previous examples. Instead of +passing in the static `ID` of the token program, you check the given +`token_program`. + +```rust +// verify the given token and mint accounts match the given token_program +#[account( + mint::token_program = token_program, +)] +pub token_a_mint: Box>, +#[account( + token::token_program = token_program, +)] +pub token_a_account: Box>, +pub token_program: Interface<'info, token_interface::TokenInterface>, +``` + +You can do the same thing with an associated token account by supplying a +specific token program. + +```rust +#[account( + associated_token::token_program = token_program +)] +pub associated_token: Box>, +pub token_program: Interface<'info, token_interface::TokenInterface>, +``` + +If you'd like to check which token program a token account and mint belongs to +in your program logic, you can refer to the owner field on the `AccountInfo` +struct. The following code will log the owning program's ID. You could use this +field in a conditional to execute different logic for `spl-token` and +`Token Extensions Program` accounts. + +```rust +msg!("Token Program Owner: {}", ctx.accounts.token_account.to_account_info().owner); +``` + +### Anchor Interfaces + +Interfaces are Anchor's newest feature that simplifies working with +`Token Extensions` in a program. There are two relevant interface wrapper types +from the `anchor_lang` crate: + +- [`Interface`](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/interface/index.html) +- [`InterfaceAccount`](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/interface_account/index.html) + +And three corresponding Account Types from the `anchor_spl` crate: + +- [`Mint`](https://docs.rs/anchor-spl/latest/anchor_spl/token_interface/struct.Mint.html) +- [`TokenAccount`](https://docs.rs/anchor-spl/latest/anchor_spl/token_interface/struct.TokenAccount.html) +- [`TokenInterface`](https://docs.rs/anchor-spl/latest/anchor_spl/token_interface/struct.TokenInterface.html) + +In the previous section, we defined the `token_program` in our example as: + +```rust +pub token_program: Interface<'info, token_interface::TokenInterface>, +``` + +This code makes use of `Interface` and `token_interface::TokenInterface`. + +`Interface` is a wrapper over the original `Program` type, allowing multiple +possible program IDs. It's a type validating that the account is one of a set of +given programs. The `Interface` type checks the following: + +- If the given account is executable +- If the given account is one of a set of expected accounts from the given + interface type + +You must use the `Interface` wrapper with a specific interface type. The +`anchor_lang` and `anchor_spl` crates provide the following `Interface` type of +out the box: + +- [TokenInterface](https://docs.rs/anchor-spl/latest/anchor_spl/token_interface/struct.TokenInterface.html) + +`TokenInterface` provides an interface type that expects the pubkey of the +account passed in to match either `spl_token::ID` or `spl_token_2022::ID`. These +program IDs are hard coded on the `TokenInterface` type in Anchor. + +```rust +static IDS: [Pubkey; 2] = [spl_token::ID, spl_token_2022::ID]; + +#[derive(Clone)] +pub struct TokenInterface; + +impl anchor_lang::Ids for TokenInterface { + fn ids() -> &'static [Pubkey] { + &IDS + } +} +``` + +Anchor checks that the ID of the account passed in matches one of the two IDs +above. If the given account does not match either of these two, Anchor will +throw an `InvalidProgramId` error and prevent the transaction from executing. + +```rust +impl CheckId for T { + fn check_id(id: &Pubkey) -> Result<()> { + if !Self::ids().contains(id) { + Err(error::Error::from(error::ErrorCode::InvalidProgramId).with_account_name(*id)) + } else { + Ok(()) + } + } +} + +. +. +. + +impl<'a, T: CheckId> TryFrom<&'a AccountInfo<'a>> for Interface<'a, T> { + type Error = Error; + /// Deserializes the given `info` into a `Program`. + fn try_from(info: &'a AccountInfo<'a>) -> Result { + T::check_id(info.key)?; + if !info.executable { + return Err(ErrorCode::InvalidProgramExecutable.into()); + } + Ok(Self::new(info)) + } +} +``` + +The `InterfaceAccount` type is similar to the `Interface` type in that it is +also a wrapper, this time around `AccountInfo`. `InterfaceAccount` is used on +accounts; it verifies program ownership and deserializes the underlying data +into a Rust type. This lesson will focus on using the `InterfaceAccount` on +token and mint accounts. We can use the `InterfaceAccount` wrapper with the +`Mint` or `TokenAccount` types from the `anchor_spl::token_interface` crate we +mentioned. Here is an example: + +```rust +use { + anchor_lang::prelude::*, + anchor_spl::{token_interface}, +}; + +#[derive(Accounts)] +pub struct Example<'info>{ + // Token account + #[account( + token::token_program = token_program + )] + pub token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + // Mint account + #[account( + mut, + mint::token_program = token_program + )] + pub mint_account: InterfaceAccount<'info, token_interface::Mint>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +If you're familiar with Anchor, then you may notice the `TokenAccount` and +`Mint` account types are not new. Although what is new is how they work with the +`InterfaceAccount` wrapper. The `InterfaceAccount` wrapper allows for either +`Token Program` or `Token Extensions Program` accounts to be passed in and +deserialized, just like the `Interface` and the `TokenInterface` types. These +wrappers and account types work together to provide a smooth and +straight-forward experience for developers, giving you the flexibility to +interact with both `Token Program` and the `Token Extensions Program` in your +program. + +However, you cannot use any of these types from the `token_interface` module +with the regular Anchor `Program` and `Account` wrappers. These new types are +used with either the `Interface` or `InterfaceAccount` wrappers. For example, +the following would not be valid, and any transactions sent to an instruction +using this account deserialization would return an error. + +```rust +// This is invalid, using as an example. +// Cannot wrap Account over a token_interface::* type. +pub token_account: Account<'info, token_interface::TokenAccount> +``` + +## Lab + +Now let's get some hands-on experience with the `Token Extensions Program` +onchain by implementing a generalized token staking program that will accept +both `Token Program` and `Token Extensions Program` accounts. As far as staking +programs go, this will be a simple implementation with the following design: + +- We'll create a stake pool account to hold all the staked tokens. There will + only be one staking pool for a given token. The program will own the account. +- Every stake pool will have a state account that will hold information + regarding the amount of tokens staked in the pool, etc. +- Users can stake as many tokens as they like, transferring them from their + token account to the stake pool. +- Each user will have a state account created for each pool they stake in. This + state account will keep track of how many tokens they have staked in this + pool, when they last staked, etc. +- Users will be minted staking reward tokens upon unstaking. There is no + separate claim process required. +- We'll determine a user's staking rewards using a simple algorithm. +- The program will accept both `Token Program` and `Token Extensions Program` + accounts. + +The program will have four instructions: `init_pool`, `init_stake_entry`, +`stake`, `unstake`. + +This lab will utilize a lot of Anchor and Solana APIs that have been covered +previously in this course. We will not spend time explaining some of the +concepts we expect you to know. With that said, let's get started. + +#### 1. Verify Solana/Anchor/Rust Versions + +We will be interacting with the `Token Extension` program in this lab and that +requires you have solana cli version ≥ `1.18.0`. + +To check your version run: + +```bash +solana --version +``` + +If the version printed out after running `solana --version` is less than +`1.18.0` then you can update the +[cli version manually](/docs/intro/installation). Note, at the time of +writing this, you cannot simply run the `solana-install update` command. This +command will not update the CLI to the correct version for us, so we have to +explicitly download version `1.18.0`. You can do so with the following command: + +```bash +solana-install init 1.18.0 +``` + +If you run into the following error at any point attempting to build the +program, that likely means you do not have the correct version of the Solana CLI +installed. + +```bash +anchor build +error: package `solana-program v1.18.0` cannot be built because it requires rustc 1.72.0 or newer, while the currently active rustc version is 1.68.0-dev +Either upgrade to rustc 1.72.0 or newer, or use +cargo update -p solana-program@1.18.0 --precise ver +where `ver` is the latest version of `solana-program` supporting rustc 1.68.0-dev +``` + +You will also want the latest version of the Anchor CLI installed. You can +follow along the steps listed here to update via avm +https://www.anchor-lang.com/docs/avm or simply run: + +```bash +avm install latest +avm use latest +``` + +At the time of writing, the latest version of the Anchor CLI is `0.29.0` + +Now, we can check our Rust version. + +```bash +rustc --version +``` + +At the time of writing, version `1.26.0` was used for the Rust compiler. If you +would like to update, you can do so via `rustup` +https://doc.rust-lang.org/book/ch01-01-installation.html + +```bash +rustup update +``` + +Now, we should have all the correct versions installed. + +#### 2. Get starter code and add dependencies + +Let's grab the starter branch. + +```bash +git clone https://github.com/Unboxed-Software/token22-staking +cd token22-staking +git checkout starter +``` + +#### 3. Update Program ID and Anchor Keypair + +Once in the starter branch, run `anchor keys list` to get your program ID. + +Copy and paste this program ID in the `Anchor.toml` file: + +```rust +// in Anchor.toml +[programs.localnet] +token_22_staking = "" +``` + +And in the `programs/token-22-staking/src/lib.rs` file: + +```rust +declare_id!(""); +``` + +Lastly set your developer keypair path in `Anchor.toml`. + +```toml +[provider] +cluster = "Localnet" +wallet = "/YOUR/PATH/HERE/id.json" +``` + +If you don't know what your current keypair path is you can always run the +Solana cli to find out. + +```bash +solana config get +``` + +#### 4. Confirm the program builds + +Let's build the starter code to confirm we have everything configured correctly. +If it does not build, please revisit the steps above. + +```bash +anchor build +``` + +You can safely ignore the warnings of the build script, these will go away as we +add in the necessary code. + +Feel free to run the provided tests to make sure the rest of the development +environment is set up correctly. You'll have to install the node dependencies +using `npm` or `yarn`. The tests should run, but they'll all fail until we have +completed our program. + +```bash +yarn install +anchor test +``` + +#### 5. Explore program design + +Now that we have confirmed the program builds, let's take a look at the layout +of the program. You'll notice inside `/programs/token22-staking/src` there are a +few different files: + +- `lib.rs` +- `error.rs` +- `state.rs` +- `utils.rs` + +The `errors.rs` and `utils.rs` files are already filled out for you. `errors.rs` +is where we have defined our custom errors for our program. To do this, you just +have to create a public `enum` and define each error. + +`utils.rs` is a file that only contains one function called +`check_token_program`. This is just a file where you can write helper functions +if you have the need. This function was written ahead of time and will be used +in our program to simply log the specific token program that was passed in the +instruction. We will be using both `Token Extensions Program` and `spl-token` in +this program, so this function will help clarify that distinction. + +`lib.rs` is the entrypoint to our program, as is the common practice in all +Solana programs. Here we define our program ID using the `declare_id` Anchor +macro and the public `token_22_staking` module. This module is where we define +our publicly callable instructions, these can be thought of as our program's +API. + +We have four separate instructions defined here: + +- `init_pool` +- `init_stake_entry` +- `stake` +- `unstake` + +Each of these instructions makes a call to a `handler` method that is defined +elsewhere. We do this to modularize the program, which helps keep the program +organized. This is generally a good idea when working with larger programs. + +Each of these specific `handler` methods are defined in their own file in the +`instructions` directory. You'll notice there is a file corresponding to each +instruction, as well as an additional `mod.rs` file. Each of these instruction +files is where we will write the logic for each individual instruction. The +`mod.rs` file is what makes these `handler` methods callable from the `lib.rs` +file. + +#### 6. Implement `state.rs` + +Open up the `/src/state.rs` file. Here, we will define some state data +structures and a few constants that we will need throughout our program. Let's +start by bringing in the packages we'll need here. + +```rust +use { + anchor_lang::prelude::*, + solana_program::{pubkey::Pubkey}, +}; +``` + +Next, we we will need a handful of seeds defined that will be referenced +throughout the program. These seeds will be used to derive different PDAs our +program will expect to receive. + +```rust +pub const STAKE_POOL_STATE_SEED: &str = "state"; +pub const VAULT_SEED: &str = "vault"; +pub const VAULT_AUTH_SEED: &str = "vault_authority"; +pub const STAKE_ENTRY_SEED: &str = "stake_entry"; +``` + +Now, we'll define two data structs that will define the data of two different +accounts our program will use to hold state. The `PoolState` and `StakeEntry` +accounts. + +The `PoolState` account is meant to hold information about a specific staking +pool. + +```rust +#[account] +pub struct PoolState { + pub bump: u8, + pub amount: u64, + pub token_mint: Pubkey, + pub staking_token_mint: Pubkey, + pub staking_token_mint_bump: u8, + pub vault_bump: u8, + pub vault_auth_bump: u8, + pub vault_authority: Pubkey, +} +``` + +The `StakeEntry` account will hold information about a specific user's stake in +that pool. + +```rust +#[account] +pub struct StakeEntry { + pub user: Pubkey, + pub user_stake_token_account: Pubkey, + pub bump: u8, + pub balance: u64, + pub last_staked: i64, +} +``` + +#### 7. `init_pool` Instruction + +Now that we understand our program's architecture, let's get started with the +first instruction `init_pool`. + +Open `init_pool.rs` and you should see the following: + +```rust +use { + anchor_lang::prelude::*, + crate::{state::*, utils::*}, + anchor_spl::{token_interface}, + std::mem::size_of +}; + +pub fn handler(ctx: Context) -> Result <()> { + check_token_program(ctx.accounts.token_program.key()); + + Ok(()) +} + +#[derive(Accounts)] +pub struct InitializePool<'info> { + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +The `handler` method is defined and so is the `InitializePool` accounts struct. +The accounts struct simply expects to receive a `token_program` account and +that's it. The `handler` method calls the `check_token_program` method that is +defined in the `utils.rs` file. As it stands, this instruction does not really +do a whole lot. + +To get started implementing the logic of this instruction, let's first think +about the accounts that will be required. We will need the following to +initialize a staking pool: + +- `pool_authority` - PDA that is the authority over all staking pools. This will + be a PDA derived with a specific seed. +- `pool_state` - State account created in this instruction at a PDA. This + account will hold state regarding this specific staking pool like the amount + of tokens staked, how many users have staked, etc. +- `token_mint` - The mint of tokens expected to be staked in this staking pool. + There will be a unique staking pool for each token. +- `token_vault` - Token account of the same mint as `token_mint` at a PDA. This + is a token account with the `pool_authority` PDA as the authority. This gives + the program control over the token account. All tokens staked in this pool + will be held in this token account. +- `staking_token_mint` - The reward token mint for staking in this pool. +- `payer` - Account responsible for paying for the creation of the staking pool. +- `token_program` - The token program associated with the given token and mint + accounts. Should work for either the Token Extension or the Token program. +- `system_program` - System program. +- `rent` - Rent program. + +Let's implement this accounts struct starting with the `pool_authority` account +and its constraints. + +The `pool_authority` account is a PDA derived with the `VAULT_AUTH_SEED` that we +defined in the `state.rs` file. This account does not hold any state, so we do +not need to deserialize it into any specific account structure. For this reason, +we use the `UncheckedAccount` Anchor account type. + +```rust +#[derive(Accounts)] +pub struct InitializePool<'info> { + /// CHECK: PDA, auth over all token vaults + #[account( + seeds = [VAULT_AUTH_SEED.as_bytes()], + bump + )] + pub pool_authority: UncheckedAccount<'info>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +Note that the `UncheckedAccount` is considered unsafe by Anchor because Anchor +does not do any additional verification under the hood. However, this is okay +here because we do verify that the account is the expected PDA and we do not +read or write from the account. However, the `/// CHECK:` comment is required +above an account utilizing the `UncheckedAccount` or `AccountInfo` structs. +Without that annotation, your program will throw the following error while +building: + +```bash +Struct field "pool_authority" is unsafe, but is not documented. +Please add a `/// CHECK:` doc comment explaining why no checks through types are necessary. +See https://www.anchor-lang.com/docs/the-accounts-struct#safety-checks for more information. +``` + +Next, we'll define the `pool_state` account. + +This account utilizes the `init` constraint, which indicates to Anchor that we +need to create the account. The account is expected to be a PDA derived with the +`token_mint` account key and `STAKE_POOL_STATE_SEED` as keys. `payer` is +required to pay the rent required to create this account. We allocate enough +space for the account to store the `PoolState` data struct that we defined in +the `state.rs` file. Lastly, we use the `Account` wrapper to deserialize the +given account into the `PoolState` struct. + +```rust +// pool state account +#[account( + init, + seeds = [token_mint.key().as_ref(), STAKE_POOL_STATE_SEED.as_bytes()], + bump, + payer = payer, + space = 8 + size_of::() +)] +pub pool_state: Account<'info, PoolState>, +``` + +Moving on to the `token_mint` account. + +We make use of two account constraints on this `token_mint` account. +`mint::token_program = ` verifies that the given account is a +mint created from the given ``. Before the Token Extensions +Program, this was not really a concern as there was only one token program. Now, +there are two! The reason we verify the `token_mint` account belongs to the +given `token_program` is because token accounts and mints of one program are not +compatible with token accounts and mints from the other program. So, for every +instruction in our program, we will be verifying that all the given token +accounts and mints belong to the same `token_program`. + +The second constraint `mint::authority = payer` verifies that the authority over +the mint passed in is the `payer` account, which will also be required to be a +signer. This may seem counterintuitive, but we do this because at the moment we +are inherently restricting the program to one staking pool per token due to the +PDA seeds we use for the `pool_state` account. We also allow the creator of the +pool to define what the reward token mint is for staking in that pool. Because +the program currently limits one pool per token, we wouldn't want to allow just +anybody to create a staking pool for a token. This gives the creator of the pool +control over what the reward is for staking here. Imagine if we did not require +the `mint::authority`, this would allow anyone to create the staking pool for +`Token X` and define what the reward is for everyone that stakes `Token X` with +this staking program. If they decide to define the reward token as the meme coin +`FooBar`, then everyone would be stuck with that staking pool in this program. +For this reason, we will only allow the `token_mint` authority to create a +staking pool for said `token_mint`. This program design would probably not be a +good choice for the real world, it does not scale very well. But, it serves as a +great example to help get the points across in this lesson while keeping things +relatively simple. This can also serve as a good exercise in program design. How +would you design this program to make it more scalable for mainnet? + +Lastly, we utilize the `InterfaceAccount` struct to deserialize the given +account into `token_interface::Mint`. The `InterfaceAccount` type is a wrapper +around `AccountInfo` that verifies program ownership and deserializes underlying +data into a given Rust type. Used with the `token_interface::Mint` struct, +Anchor knows to deserialize this into a Mint account. The +`token_interface::Mint` struct provides support for both `Token Program` and +`Token Extensions Program` mints out of the box! This interface concept was +created specifically for this use case. You can read more about the +`InterfaceAccount` in the +[`anchor_lang` docs](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/interface_account/struct.InterfaceAccount.html). + +```rust +// Mint of token +#[account( + mint::token_program = token_program, + mint::authority = payer +)] +pub token_mint: InterfaceAccount<'info, token_interface::Mint>, +``` + +Looking at the `pool_token_vault` where the tokens staked in this pool will be +held. + +We initialize the token account with the `init` constraint, create the token +account with mint = `token_mint`, authority = `pool_authority`, and +`token_program`. This token account is created at a PDA using the `token_mint`, +`pool_authority`, and `VAULT_SEED` as seeds. `pool_authority` is assigned as +authority over this token account so that the program has control over it. + +```rust +// pool token account for Token Mint + #[account( + init, + token::mint = token_mint, + token::authority = pool_authority, + token::token_program = token_program, + // use token_mint, pool auth, and constant as seeds for token a vault + seeds = [token_mint.key().as_ref(), pool_authority.key().as_ref(), VAULT_SEED.as_bytes()], + bump, + payer = payer, + )] + pub token_vault: InterfaceAccount<'info, token_interface::TokenAccount>, +``` + +Moving on to `staking_token_mint` + +We just verify the mint belongs to the given `token_program`. Again, we are +using `InterfaceAccount` and `token_interface::Mint` here. + +```rust +// Mint of staking token +#[account( + mut, + mint::token_program = token_program +)] +pub staking_token_mint: InterfaceAccount<'info, token_interface::Mint>, +``` + +Lastly, we have a few familiar accounts. + +```rust +// payer, will pay for creation of pool vault +#[account(mut)] +pub payer: Signer<'info>, +pub token_program: Interface<'info, token_interface::TokenInterface>, +pub system_program: Program<'info, System>, +pub rent: Sysvar<'info, Rent> +``` + +Take a look at the `token_program`. This account uses the `Interface` and +`token_interface::TokenInterface` structs similar to the `TokenInterface` and +mint/token structs we used earlier. This follows the same idea as those, the +`Interface` and `token_interface::TokenInterface` structs allow for either token +program to be passed in here. This is why we must verify that all of the token +and mint accounts passed in belong to the given `token_program`. + +Our accounts struct should look like this now: + +```rust +#[derive(Accounts)] +pub struct InitializePool<'info> { + /// CHECK: PDA, auth over all token vaults + #[account( + seeds = [VAULT_AUTH_SEED.as_bytes()], + bump + )] + pub pool_authority: UncheckedAccount<'info>, + // pool state account + #[account( + init, + seeds = [token_mint.key().as_ref(), STAKE_POOL_STATE_SEED.as_bytes()], + bump, + payer = payer, + space = 8 + size_of::() + )] + pub pool_state: Account<'info, PoolState>, + // Mint of token + #[account( + mint::token_program = token_program, + mint::authority = payer + )] + pub token_mint: InterfaceAccount<'info, token_interface::Mint>, + // pool token account for Token Mint + #[account( + init, + token::mint = token_mint, + token::authority = pool_authority, + token::token_program = token_program, + // use token_mint, pool auth, and constant as seeds for token a vault + seeds = [token_mint.key().as_ref(), pool_authority.key().as_ref(), VAULT_SEED.as_bytes()], + bump, + payer = payer, + )] + pub token_vault: InterfaceAccount<'info, token_interface::TokenAccount>, + // Mint of staking token + #[account( + mut, + mint::token_program = token_program + )] + pub staking_token_mint: InterfaceAccount<'info, token_interface::Mint>, + // payer, will pay for creation of pool vault + #[account(mut)] + pub payer: Signer<'info>, + pub token_program: Interface<'info, token_interface::TokenInterface>, + pub system_program: Program<'info, System>, + pub rent: Sysvar<'info, Rent> +} +``` + +Setting up the account struct is the bulk of the logic for this instruction. All +we have to do inside the `handler` function, is to initialize all of the +`pool_state` fields. + +The `handler` function should be: + +```rust +pub fn handler(ctx: Context) -> Result <()> { + check_token_program(ctx.accounts.token_program.key()); + + // initialize pool state + let pool_state = &mut ctx.accounts.pool_state; + pool_state.bump = ctx.bumps.pool_state; + pool_state.amount = 0; + pool_state.vault_bump = ctx.bumps.token_vault; + pool_state.vault_auth_bump = ctx.bumps.pool_authority; + pool_state.token_mint = ctx.accounts.token_mint.key(); + pool_state.staking_token_mint = ctx.accounts.staking_token_mint.key(); + pool_state.vault_authority = ctx.accounts.pool_authority.key(); + + msg!("Staking pool created!"); + + Ok(()) +} +``` + +After that, save your work and build to make sure there are no issues with your +program at this point. + +```bash +anchor build +``` + +#### 8. `init_stake_entry` Instruction + +Now we can move on to the `init_stake_entry.rs` file. This instruction creates a +staking account for a user to keep track of some state while they stake their +tokens. The `StakeEntry` account is required to exist before a user can stake +tokens. The `StakeEntry` account struct was defined in the `state.rs` file +earlier. + +Let's get started with the accounts required for this instruction. We will need +the following: + +- `user` - The user that is creating the `stake_entry` account. This account + must sign the transaction and will need to pay for the rent required to create + the `stake_entry` account. +- `user_stake_entry` - State account that will be created at a PDA derived from + the user, mint the staking pool was created for, and the `STAKE_ENTRY_SEED` as + seeds. +- `user_stake_token_account` - User's associated token account for the staking + reward token. +- `staking_token_mint` - Mint of the staking reward token of this pool. +- `pool_state` - `PoolState` account for this staking pool. +- `token_program` - Token Program. +- `associated_token_program` - Associated token program. +- `system_program` - System Program. + +Let's start by adding in the `user` account to the `InitializeStakeEntry` +account struct. + +It's necessary to verify that the user account has the authority to sign, +indicating ownership, and is also changeable, as they are the payer of the +transaction (which will mutate their balance). + +```rust +#[derive(Accounts)] +pub struct InitializeStakeEntry<'info> { + #[account(mut)] + pub user: Signer<'info>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +The `user_stake_entry` account requires a few more constraints. We need to +initialize the account, derive the address using the expected seeds, define who +is paying for the creation of the account, and allocate enough space for the +`StakeEntry` data struct. We deserialize the given account into the `StakeEntry` +account. + +```rust +#[account( + init, + seeds = [user.key().as_ref(), pool_state.token_mint.key().as_ref(), STAKE_ENTRY_SEED.as_bytes()], + bump, + payer = user, + space = 8 + size_of::() +)] +pub user_stake_entry: Account<'info, StakeEntry>, +``` + +The `user_stake_token_account` is, again, the account where the user's staking +rewards will eventually be sent. We create the account in this instruction so we +don't have to worry about it later on when it's time to dole out the staking +rewards. Because we initialize this account in this instruction, it puts a limit +on the number of pools a user can stake in with the same reward token. This +current design would prevent a user from creating another `user_stake_entry` +account for another pool with the same `staking_token_mint`. This is another +design choice that probably would not scale in production. Think about how else +this could be designed. + +We use some similar Anchor SPL constraints as in the previous instruction, this +time targeting the associated token program. With the `init` constraint, these +tell Anchor what mint, authority, and token program to use while initializing +this associated token account. + +```rust +#[account( + init, + associated_token::mint = staking_token_mint, + associated_token::authority = user, + associated_token::token_program = token_program, + payer = user, +)] +pub user_stake_token_account: InterfaceAccount<'info, token_interface::TokenAccount>, +``` + + + +We are using the `InterfaceAccount` and +`token_interface::TokenAccount` types here. The `token_interface::TokenAccount` +type can only be used in conjunction with `InterfaceAccount`. + + + +Next, we add the `staking_token_mint` account. Notice we are using our first +custom error here. This constraint verifies that the pubkey on the +`staking_token_mint` account is equal to the pubkey stored in the +`staking_token_mint` field of the given `PoolState` account. This field was +initialized in the `handler` method of the `inti_pool` instruction in the +previous step. + +```rust +#[account( + constraint = staking_token_mint.key() == pool_state.staking_token_mint + @ StakeError::InvalidStakingTokenMint, + mint::token_program = token_program +)] +pub staking_token_mint: InterfaceAccount<'info, token_interface::Mint>, +``` + +The `pool_state` account is pretty much the same here as in the `init_pool` +instruction. However, in the `init_pool` instruction we saved the bump used to +derive this account so we don't actually have to re-calculate it every time we +want to verify the PDA. We can conveniently call `bump = pool_state.bump` and +this will use the bump stored in this account. + +```rust +#[account( + seeds = [pool_state.token_mint.key().as_ref(), STAKE_POOL_STATE_SEED.as_bytes()], + bump = pool_state.bump + )] + pub pool_state: Account<'info, PoolState>, +``` + +The remaining accounts are ones that we are familiar with already and there are +not any special constraints on them. + +```rust + pub token_program: Interface<'info, token_interface::TokenInterface>, + pub associated_token_program: Program<'info, AssociatedToken>, + pub system_program: Program<'info, System> +``` + +The final `InitializeStakeEntry` account struct should be: + +```rust +#[derive(Accounts)] +pub struct InitializeStakeEntry<'info> { + #[account(mut)] + pub user: Signer<'info>, + #[account( + init, + seeds = [user.key().as_ref(), pool_state.token_mint.key().as_ref(), STAKE_ENTRY_SEED.as_bytes()], + bump, + payer = user, + space = 8 + size_of::() + )] + pub user_stake_entry: Account<'info, StakeEntry>, + #[account( + init, + associated_token::mint = staking_token_mint, + associated_token::authority = user, + associated_token::token_program = token_program, + payer = user, + )] + pub user_stake_token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + #[account( + constraint = staking_token_mint.key() == pool_state.staking_token_mint + @ StakeError::InvalidStakingTokenMint, + mint::token_program = token_program + )] + pub staking_token_mint: InterfaceAccount<'info, token_interface::Mint>, + #[account( + seeds = [pool_state.token_mint.key().as_ref(), STAKE_POOL_STATE_SEED.as_bytes()], + bump = pool_state.bump + )] + pub pool_state: Account<'info, PoolState>, + pub token_program: Interface<'info, token_interface::TokenInterface>, + pub associated_token_program: Program<'info, AssociatedToken>, + pub system_program: Program<'info, System> +} +``` + +The `handler` method is also very straight-forward in this instruction. All we +need to is initialize the state of the newly created `user_stake_entry` account. + +```rust +pub fn handler(ctx: Context) -> Result<()> { + check_token_program(ctx.accounts.token_program.key()); + + // initialize user stake entry state + let user_entry = &mut ctx.accounts.user_stake_entry; + user_entry.user = ctx.accounts.user.key(); + user_entry.user_stake_token_account = ctx.accounts.user_stake_token_account.key(); + user_entry.bump = ctx.bumps.user_stake_entry; + user_entry.balance = 0; + + Ok(()) +} +``` + +Save your work and build to verify there are no compilation errors. + +```bash +anchor build +``` + +#### 9. `stake` Instruction + +The `stake` instruction is what is called when users actually want to stake +their tokens. This instruction should transfer the amount of tokens the user +wants to stake from their token account to the pool vault account that is owned +by the program. There's a lot of validation in this instruction to prevent any +potentially malicious transactions from succeeding. + +The accounts required are: + +- `pool_state` - State account of the staking pool. +- `token_mint` - Mint of the token being staked. This is required for the + transfer. +- `pool_authority` - PDA given authority over all staking pools. +- `token_vault` - Token vault account where the tokens staked in this pool are + held. +- `user` - User attempting to stake tokens. +- `user_token_account` - User owned token account where the tokens they would + like to stake will be transferred from. +- `user_stake_entry` - User `StakeEntry` account created in the previous + instruction +- `token_program` +- `system_program` + +Again, let's build the `Stake` account struct first. + +First taking a look at the `pool_state` account. This is the same account we +have used in previous instructions, derived with the same seeds and bump. + +```rust +#[derive(Accounts)] +pub struct Stake<'info> { + // pool state account + #[account( + mut, + seeds = [token_mint.key().as_ref(), STAKE_POOL_STATE_SEED.as_bytes()], + bump = pool_state.bump, + )] + pub pool_state: Account<'info, PoolState>, + pub token_program: Interface<'info, token_interface::TokenInterface>, +} +``` + +Next, is the `token_mint` which is required for the transfer CPI in this +instruction. This is the mint of the token that is being staked. We verify that +the given mint is of the given `token_program` to make sure we are not mixing +any `spl-token` and `Token Extensions Program` accounts. + +```rust +// Mint of token to stake +#[account( + mut, + mint::token_program = token_program +)] +pub token_mint: InterfaceAccount<'info, token_interface::Mint>, +``` + +The `pool_authority` account is again the PDA that is the authority over all of +the staking pools. + +```rust +/// CHECK: PDA, auth over all token vaults +#[account( + seeds = [VAULT_AUTH_SEED.as_bytes()], + bump +)] +pub pool_authority: UncheckedAccount<'info>, +``` + +Now we have the `token_vault` which is where the tokens will be held while they +are staked. This account MUST be verified since this is where the tokens are +transferred to. Here, we verify the given account is the expected PDA derived +from the `token_mint`, `pool_authority`, and `VAULT_SEED` seeds. We also verify +the token account belongs to the given `token_program`. We use +`InterfaceAccount` and `token_interface::TokenAccount` here again to support +either `spl-token` or `Token Extensions Program` accounts. + +```rust +// pool token account for Token Mint +#[account( + mut, + // use token_mint, pool auth, and constant as seeds for token a vault + seeds = [token_mint.key().as_ref(), pool_authority.key().as_ref(), VAULT_SEED.as_bytes()], + bump = pool_state.vault_bump, + token::token_program = token_program +)] +pub token_vault: InterfaceAccount<'info, token_interface::TokenAccount>, +``` + +The `user` account is marked as mutable and must sign the transaction. They are +the ones initiating the transfer and they are the owner of the tokens being +transferred, so their signature is a requirement for the transfer to take place. + +```rust +#[account( + mut, + constraint = user.key() == user_stake_entry.user + @ StakeError::InvalidUser +)] +pub user: Signer<'info>, +``` + + + +We also verify that the given user is the same pubkey +stored in the given `user_stake_entry` account. If it is not, our program will +throw the `InvalidUser` custom error. + + + +The `user_token_account` is the token account where the tokens being transferred +to be staked should be currently held. The mint of this token account must match +the mint of the staking pool. If it does not, a custom `InvalidMint` error will +be thrown. We also verify the given token account matches the given +`token_program`. + +```rust +#[account( + mut, + constraint = user_token_account.mint == pool_state.token_mint + @ StakeError::InvalidMint, + token::token_program = token_program +)] +pub user_token_account: InterfaceAccount<'info, token_interface::TokenAccount>, +``` + +The last three accounts are ones we are familiar with by now. + +```rust +#[account( + mut, + seeds = [user.key().as_ref(), pool_state.token_mint.key().as_ref(), STAKE_ENTRY_SEED.as_bytes()], + bump = user_stake_entry.bump, + +)] +pub user_stake_entry: Account<'info, StakeEntry>, +pub token_program: Interface<'info, token_interface::TokenInterface>, +pub system_program: Program<'info, System> +``` + +The full `Stake` accounts struct should look like: + +```rust +#[derive(Accounts)] +pub struct Stake<'info> { + // pool state account + #[account( + mut, + seeds = [token_mint.key().as_ref(), STAKE_POOL_STATE_SEED.as_bytes()], + bump = pool_state.bump, + )] + pub pool_state: Account<'info, PoolState>, + // Mint of token to stake + #[account( + mut, + mint::token_program = token_program + )] + pub token_mint: InterfaceAccount<'info, token_interface::Mint>, + /// CHECK: PDA, auth over all token vaults + #[account( + seeds = [VAULT_AUTH_SEED.as_bytes()], + bump + )] + pub pool_authority: UncheckedAccount<'info>, + // pool token account for Token Mint + #[account( + mut, + // use token_mint, pool auth, and constant as seeds for token a vault + seeds = [token_mint.key().as_ref(), pool_authority.key().as_ref(), VAULT_SEED.as_bytes()], + bump = pool_state.vault_bump, + token::token_program = token_program + )] + pub token_vault: InterfaceAccount<'info, token_interface::TokenAccount>, + #[account( + mut, + constraint = user.key() == user_stake_entry.user + @ StakeError::InvalidUser + )] + pub user: Signer<'info>, + #[account( + mut, + constraint = user_token_account.mint == pool_state.token_mint + @ StakeError::InvalidMint, + token::token_program = token_program + )] + pub user_token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + #[account( + mut, + seeds = [user.key().as_ref(), pool_state.token_mint.key().as_ref(), STAKE_ENTRY_SEED.as_bytes()], + bump = user_stake_entry.bump, + + )] + pub user_stake_entry: Account<'info, StakeEntry>, + pub token_program: Interface<'info, token_interface::TokenInterface>, + pub system_program: Program<'info, System> +} +``` + +That is it for the accounts struct. Save your work and verify your program still +compiles. + +```bash +anchor build +``` + +Next, we are going to implement a helper function to assist with the transfer +CPI that we will have to make. We'll add the skeleton for the implementation of +a `transfer_checked_ctx` method on our `Stake` data struct. Below the `Stake` +accounts struct we just built, add the following: + +```rust +impl<'info> Stake <'info> { + // transfer_checked for Token2022 + pub fn transfer_checked_ctx(&self) -> CpiContext<'_, '_, '_, 'info, TransferChecked<'info>> { + + } +} +``` + +This method takes `&self` as an argument, which gives us access to members of +the `Stake` struct inside of the method by calling `self`. This method is +expected to return a `CpiContext`, +[which is an Anchor primitive](https://docs.rs/anchor-lang/latest/anchor_lang/context/struct.CpiContext.html). + +A `CpiContext` is defined as: + +```rust +pub struct CpiContext<'a, 'b, 'c, 'info, T> +where + T: ToAccountMetas + ToAccountInfos<'info>, +{ + pub accounts: T, + pub remaining_accounts: Vec>, + pub program: AccountInfo<'info>, + pub signer_seeds: &'a [&'b [&'c [u8]]], +} +``` + +Where `T` is the accounts struct for the instruction you are invoking. + +This is very similar to the `Context` object that traditional Anchor +instructions expect as input (i.e. `ctx: Context`). This is the same +concept here, except we are defining one for a Cross-Program Invocation instead! + +In our case, we will be invoking the `transfer_checked` instruction in either +token programs, hence the `transfer_checked_ctx` method name and the +`TransferChecked` type in the returned `CpiContext`. The regular `transfer` +instruction has been deprecated in the `Token Extensions Program` and it is +suggested you use `transfer_checked` going forward. + +Now that we know what the goal of this method is, we can implement it! First, we +will need to define the program we will be invoking. This should be the +`token_program` that was passed into our accounts struct. + +```rust +impl<'info> Stake <'info> { + // transfer_checked for spl-token or Token2022 + pub fn transfer_checked_ctx(&self) -> CpiContext<'_, '_, '_, 'info, TransferChecked<'info>> { + let cpi_program = self.token_program.to_account_info(); + } +} +``` + +Notice how we are simply able to reference the accounts in the `Stake` data +struct by calling `self`. + +Then, we need to define the accounts we'll be passing in the CPI. We can do this +via the `TransferChecked` data type, which we are importing from the +[`anchor_spl::token_2022` crate](https://docs.rs/anchor-spl/latest/anchor_spl/token_2022/struct.TransferChecked.html) +at the top of our file. This data type is defined as: + +```rust +pub struct TransferChecked<'info> { + pub from: AccountInfo<'info>, + pub mint: AccountInfo<'info>, + pub to: AccountInfo<'info>, + pub authority: AccountInfo<'info>, +} +``` + +This data type expects four different `AccountInfo` objects, all of which should +have been passed into our program. Just like with the `cpi_program`, we can +build this `TransferChecked` data struct by referencing `self` which gives us +access to all of the accounts defined in the `Stake` data structure. Note, this +is only possible because `transfer_checked_ctx` is being implemented on the +`Stake` data type with this line `impl<'info> Stake <'info>`. Without it, there +is no self to reference. + +```rust +impl<'info> Stake <'info> { + // transfer_checked for spl-token or Token2022 + pub fn transfer_checked_ctx(&self) -> CpiContext<'_, '_, '_, 'info, TransferChecked<'info>> { + let cpi_program = self.token_program.to_account_info(); + let cpi_accounts = TransferChecked { + from: self.user_token_account.to_account_info(), + to: self.token_vault.to_account_info(), + authority: self.user.to_account_info(), + mint: self.token_mint.to_account_info() + }; + } +} +``` + +So we have our `cpi_program` and `cpi_accounts` defined, but this method is +supposed to return a `CpiContext` object. To do that, we simply need to pass +these two into the `CpiContext` constructor `CpiContext::new`. + +```rust +impl<'info> Stake <'info> { + // transfer_checked for Token2022 + pub fn transfer_checked_ctx(&self) -> CpiContext<'_, '_, '_, 'info, TransferChecked<'info>> { + let cpi_program = self.token_program.to_account_info(); + let cpi_accounts = TransferChecked { + from: self.user_token_account.to_account_info(), + to: self.token_vault.to_account_info(), + authority: self.user.to_account_info(), + mint: self.token_mint.to_account_info() + }; + + CpiContext::new(cpi_program, cpi_accounts) + } +} +``` + +With this defined, we can call `transfer_checked_ctx` at any point in our +`handler` method and it will return a `CpiContext` object that we can use to +execute a CPI. + +Moving on to the `handler` function, we'll need to do a couple of things here. +First, we need to use our `transfer_checked_ctx` method to create the correct +`CpiContext` and make the CPI. Then, we have some critical updates to make to +our two state accounts. As a reminder, we have two state accounts `PoolState` +and `StakeEntry`. The former holds information regarding current state of the +overall staking pool, while the latter is in charge of keeping an accurate +recording of the a specific user's stake in a pool. With that in mind, any time +there is an update to the staking pool we should be updating both the +`PoolState` and a given user's `StakeEntry` accounts in some way. + +For starters, let's implement the actual CPI. Since we defined the program and +accounts required for the CPI ahead of time in the `transfer_checked_ctx()` +method, the actual CPI is very straight-forward. We'll make use of another +helper function from the `anchor_spl::token_2022` crate, specifically the +`transfer_checked` function. This is +[defined as the following](https://docs.rs/anchor-spl/latest/anchor_spl/token_2022/fn.transfer_checked.html): + +```rust +pub fn transfer_checked<'info>( + ctx: CpiContext<'_, '_, '_, 'info, TransferChecked<'info>>, + amount: u64, + decimals: u8 +) -> Result<()> +``` + +It takes three input parameters: + +- `CpiContext` +- amount +- decimals + +The `CpiContext` is exactly what is returned in our `transfer_checked_ctx()` +method, so for this first argument we can simply call the method with +`ctx.accounts.transfer_checked_ctx()`. + +The amount is simply the amount of tokens to transfer, which our `handler` +method expects as an input parameter. + +Lastly, the `decimals` argument is the amount of decimals on the token mint of +what is being transferred. This is a requirement of the transfer checked +instruction. Since the `token_mint` account is passed in, you can actually fetch +the decimals on the token mint in this instruction. Then, we just pass that in +as the third argument. + +All in all, it should look something like this: + +```rust +pub fn handler(ctx: Context, stake_amount: u64) -> Result <()> { + check_token_program(ctx.accounts.token_program.key()); + + msg!("Pool initial total: {}", ctx.accounts.pool_state.amount); + msg!("User entry initial balance: {}", ctx.accounts.user_stake_entry.balance); + + let decimals = ctx.accounts.token_mint.decimals; + // transfer_checked for either spl-token or the Token Extension program + transfer_checked(ctx.accounts.transfer_checked_ctx(), stake_amount, decimals)?; + + Ok(()) +} +``` + +The `transfer_checked` method builds a `transfer_checked` instruction object and +actually invokes the program in the `CpiContext` under the hood. We are just +utilizing Anchor's wrapper over the top of this process. If you're curious, +[here is the source code](https://docs.rs/anchor-spl/latest/src/anchor_spl/token_2022.rs.html#35-61). + +```rust +pub fn transfer_checked<'info>( + ctx: CpiContext<'_, '_, '_, 'info, TransferChecked<'info>>, + amount: u64, + decimals: u8, +) -> Result<()> { + let ix = spl_token_2022::instruction::transfer_checked( + ctx.program.key, + ctx.accounts.from.key, + ctx.accounts.mint.key, + ctx.accounts.to.key, + ctx.accounts.authority.key, + &[], + amount, + decimals, + )?; + solana_program::program::invoke_signed( + &ix, + &[ + ctx.accounts.from, + ctx.accounts.mint, + ctx.accounts.to, + ctx.accounts.authority, + ], + ctx.signer_seeds, + ) + .map_err(Into::into) +} +``` + +Using Anchor's `CpiContext` wrapper is much cleaner and it abstracts a lot away, +but it's important you understand what's going on under the hood. + +Once the `transfer_checked` function has completed, we can start updating our +state accounts because that means the transfer has taken place. The two accounts +we'll want to update are the `pool_state` and `user_entry` accounts, which +represent the overall staking pool data and this specific user's data regarding +their stake in this pool. + +Since this is the `stake` instruction and the user is transferring tokens into +the pool, both values representing the amount the user has staked and the total +amount staked in the pool should increase by the `stake_amount`. + +To do this, we will deserialize the `pool_state` and `user_entry` accounts as +mutable and increase the `pool_state.amount` and `user_entry.balance` fields by +the `stake_amount` using `checked_add()`. `CheckedAdd` is a Rust feature that +allows you to safely perform mathematical operations without worrying about +buffer overflow. `checked_add()` adds two numbers, checking for overflow. If +overflow happens, `None` is returned. + +Lastly, we'll also update the `user_entry.last_staked` field with the current +unix timestamp from the `Clock`. This is just meant to keep track of the most +recent time a specific user staked tokens. + +Add this after `transfer_checked` and before `Ok(())` in the `handler` function. + +```rust +let pool_state = &mut ctx.accounts.pool_state; +let user_entry = &mut ctx.accounts.user_stake_entry; + +// update pool state amount +pool_state.amount = pool_state.amount.checked_add(stake_amount).unwrap(); +msg!("Current pool stake total: {}", pool_state.amount); + +// update user stake entry +user_entry.balance = user_entry.balance.checked_add(stake_amount).unwrap(); +msg!("User stake balance: {}", user_entry.balance); +user_entry.last_staked = Clock::get().unwrap().unix_timestamp; +``` + +Now that was a lot and we covered some new stuff, so feel free to go back +through and make sure it all makes sense. Check out all of the external +resources that are linked for any of the new topics. Once you're ready to move +on, save your work and verify the program still builds! + +```bash +anchor build +``` + +#### 10. `unstake` Instruction + +Lastly, the `unstake` transaction will be pretty similar to the `stake` +transaction. We'll need to transfer tokens out of the stake pool to the user, +this is also when the user will receive their staking rewards. Their staking +rewards will be minted to the user in this same transaction. + +Something to note here, we are not going to allow the user to determine how many +tokens are unstaked, we will simply unstake all of the tokens that they +currently have staked. Additionally, we are not going to implement a very +realistic algorithm to determine how many reward tokens they have accrued. We'll +simply take their stake balance and multiply by 10 to get the amount of reward +tokens to mint them. We do this again to simplify the program and remain focused +on the goal of the lesson, the `Token Extensions Program`. + +The account structure will be very similar to the `stake` instruction, but there +are a few differences. We'll need: + +- `pool_state` +- `token_mint` +- `pool_authority` +- `token_vault` +- `user` +- `user_token_account` +- `user_stake_entry` +- `staking_token_mint` +- `user_stake_token_account` +- `token_program` +- `system_program` + +The main difference between the required accounts in `stake` and `unstake` is +that we need the `staking_token_mint` and `user_stake_token_account` for this +instruction to mint the user their staking rewards. We won't cover each account +individually because the struct is the exact same as the previous instruction, +just with the addition of these two new accounts. + +First, the `staking_token_mint` account is the mint of the staking reward token. +The mint authority must be the `pool_authority` PDA so that the program has the +ability to mint tokens to users. The given `staking_token_mint` account also +must match the given `token_program`. We'll add a custom constraint verifying +that this account matches the pubkey stored in the `staking_token_mint` field of +the `pool_state` account, if not we will return the custom +`InvalidStakingTokenMint` error. + +```rust +// Mint of staking token + #[account( + mut, + mint::authority = pool_authority, + mint::token_program = token_program, + constraint = staking_token_mint.key() == pool_state.staking_token_mint + @ StakeError::InvalidStakingTokenMint + )] + pub staking_token_mint: InterfaceAccount<'info, token_interface::Mint>, +``` + +The `user_stake_token_account` follows a similar vein. It must match the mint +`staking_token_mint`, the `user` must be the authority since these are their +staking rewards, and this account must match what we have stored on the +`user_stake_entry` account as their stake token account. + +```rust +#[account( + mut, + token::mint = staking_token_mint, + token::authority = user, + token::token_program = token_program, + constraint = user_stake_token_account.key() == user_stake_entry.user_stake_token_account + @ StakeError::InvalidUserStakeTokenAccount + )] + pub user_stake_token_account: InterfaceAccount<'info, token_interface::TokenAccount>, +``` + +Here is what the final `Unstake` struct should look like: + +```rust +#[derive(Accounts)] +pub struct Unstake<'info> { + // pool state account + #[account( + mut, + seeds = [token_mint.key().as_ref(), STAKE_POOL_STATE_SEED.as_bytes()], + bump = pool_state.bump, + )] + pub pool_state: Account<'info, PoolState>, + // Mint of token + #[account( + mut, + mint::token_program = token_program + )] + pub token_mint: InterfaceAccount<'info, token_interface::Mint>, + /// CHECK: PDA, auth over all token vaults + #[account( + seeds = [VAULT_AUTH_SEED.as_bytes()], + bump + )] + pub pool_authority: UncheckedAccount<'info>, + // pool token account for Token Mint + #[account( + mut, + // use token_mint, pool auth, and constant as seeds for token a vault + seeds = [token_mint.key().as_ref(), pool_authority.key().as_ref(), VAULT_SEED.as_bytes()], + bump = pool_state.vault_bump, + token::token_program = token_program + )] + pub token_vault: InterfaceAccount<'info, token_interface::TokenAccount>, + // require a signature because only the user should be able to unstake their tokens + #[account( + mut, + constraint = user.key() == user_stake_entry.user + @ StakeError::InvalidUser + )] + pub user: Signer<'info>, + #[account( + mut, + constraint = user_token_account.mint == pool_state.token_mint + @ StakeError::InvalidMint, + token::token_program = token_program + )] + pub user_token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + #[account( + mut, + seeds = [user.key().as_ref(), pool_state.token_mint.key().as_ref(), STAKE_ENTRY_SEED.as_bytes()], + bump = user_stake_entry.bump, + + )] + pub user_stake_entry: Account<'info, StakeEntry>, + // Mint of staking token + #[account( + mut, + mint::authority = pool_authority, + mint::token_program = token_program, + constraint = staking_token_mint.key() == pool_state.staking_token_mint + @ StakeError::InvalidStakingTokenMint + )] + pub staking_token_mint: InterfaceAccount<'info, token_interface::Mint>, + #[account( + mut, + token::mint = staking_token_mint, + token::authority = user, + token::token_program = token_program, + constraint = user_stake_token_account.key() == user_stake_entry.user_stake_token_account + @ StakeError::InvalidUserStakeTokenAccount + )] + pub user_stake_token_account: InterfaceAccount<'info, token_interface::TokenAccount>, + pub token_program: Interface<'info, token_interface::TokenInterface>, + pub system_program: Program<'info, System> +} +``` + +Now, we have two different CPIs to make in this instruction - a transfer and a +mint. We are going to be using a `CpiContext` for both in this instruction as +well. There is a catch however, in the `stake` instruction we did not require a +"signature" from a PDA but in this instruction we do. So, we cannot follow the +exact same pattern as before but we can do something very similar. + +Again, let's create two skeleton helper functions implemented on the `Unstake` +data struct: `transfer_checked_ctx` and `mint_to_ctx`. + +```rust +impl<'info> Unstake <'info> { + // transfer_checked for Token2022 + pub fn transfer_checked_ctx<'a>(&'a self, seeds: &'a [&[&[u8]]]) -> CpiContext<'_, '_, '_, 'info, TransferChecked<'info>> { + + } + + // mint_to + pub fn mint_to_ctx<'a>(&'a self, seeds: &'a [&[&[u8]]]) -> CpiContext<'_, '_, '_, 'info, MintTo<'info>> { + + } +} +``` + +We'll work on `transfer_checked_ctx` first, the implementation of this method is +almost exactly the same as in the `stake` instruction. The main difference is +here we have two arguments: `self` and `seeds`. The second argument will be the +vector of PDA signature seeds that we would normally pass into `invoke_signed` +ourselves. Since we need to sign with a PDA, instead of calling the +`CpiContext::new` constructor, we'll call `CpiContext::new_with_signer` instead. + +`new_with_signer` is defined as: + +```rust +pub fn new_with_signer( + program: AccountInfo<'info>, + accounts: T, + signer_seeds: &'a [&'b [&'c [u8]]] +) -> Self +``` + +Additionally, the `from` and `to` accounts in our `TransferChecked` struct will +be reversed from before. + +```rust +// transfer_checked for spl-token or Token2022 +pub fn transfer_checked_ctx<'a>(&'a self, seeds: &'a [&[&[u8]]]) -> CpiContext<'_, '_, '_, 'info, TransferChecked<'info>> { + + let cpi_program = self.token_program.to_account_info(); + let cpi_accounts = TransferChecked { + from: self.token_vault.to_account_info(), + to: self.user_token_account.to_account_info(), + authority: self.pool_authority.to_account_info(), + mint: self.token_mint.to_account_info() + }; + + CpiContext::new_with_signer(cpi_program, cpi_accounts, seeds) +} +``` + +Check out the +[`anchor_lang` crate docs to learn more about `CpiContext`](https://docs.rs/anchor-lang/latest/anchor_lang/context/struct.CpiContext.html#method.new_with_signer). + +Moving on to the `mint_to_ctx` function, we need to do the exact same thing we +just did with `transfer_checked_ctx` but target the `mint_to` instruction +instead! To do this, we'll need to use the `MintTo` struct instead of +`TransferChecked`. `MintTo` is defined as: + +```rust +pub struct MintTo<'info> { + pub mint: AccountInfo<'info>, + pub to: AccountInfo<'info>, + pub authority: AccountInfo<'info>, +} +``` + +[`anchor_spl::token_2022::MintTo` rust crate docs](https://docs.rs/anchor-spl/latest/anchor_spl/token_2022/struct.MintTo.html). + +With this in mind, we can implement `mint_to_ctx` the same exact way we did +`transfer_checked_ctx`. We'll be targeting the exact same `token_program` with +this CPI, so `cpi_program` should be the same as before. We construct the +`MinTo` struct the same as we did the `TransferChecked` struct, just passing the +appropriate accounts here. The `mint` is the `staking_token_mint` because that +is the mint we will be minting to the user, `to` is the user's +`user_stake_token_account`, and `authority` is the `pool_authority` because this +PDA should have sole authority over this mint. + +Lastly, the function returns a `CpiContext` object constructed using the signer +seeds passed into it. + +```rust +// mint_to +pub fn mint_to_ctx<'a>(&'a self, seeds: &'a [&[&[u8]]]) -> CpiContext<'_, '_, '_, 'info, MintTo<'info>> { + let cpi_program = self.token_program.to_account_info(); + let cpi_accounts = MintTo { + mint: self.staking_token_mint.to_account_info(), + to: self.user_stake_token_account.to_account_info(), + authority: self.pool_authority.to_account_info() + }; + + CpiContext::new_with_signer(cpi_program, cpi_accounts, seeds) +} +``` + +Now we can move on to the logic of our `handler` function. This instruction will +need to update both the pool and user state accounts, transfer all of the user's +staked tokens, and mint the user their reward tokens. To get started, we are +going to log some info and determine how many tokens to transfer to the user. + +We have kept track of the user's stake amount in the `user_stake_entry` account, +so we know exactly how many tokens this user has staked at this point in time. +We can fetch this amount from the `user_entry.balance` field. Then, we'll log +some information so that we can inspect this later. We'll also verify that the +amount to transfer out is _not_ greater than the amount that is stored in the +pool as an extra safety measure. If so, we will return a custom `OverdrawError` +and prevent the user from draining the pool. + +```rust +pub fn handler(ctx: Context) -> Result <()> { + check_token_program(ctx.accounts.token_program.key()); + + let user_entry = &ctx.accounts.user_stake_entry; + let amount = user_entry.balance; + let decimals = ctx.accounts.token_mint.decimals; + + msg!("User stake balance: {}", user_entry.balance); + msg!("Withdrawing all of users stake balance. Tokens to withdraw: {}", amount); + msg!("Total staked before withdrawal: {}", ctx.accounts.pool_state.amount); + + // verify user and pool have >= requested amount of tokens staked + if amount > ctx.accounts.pool_state.amount { + return Err(StakeError::OverdrawError.into()) + } + + // More code to come + + Ok(()) +} +``` + +Next, we will fetch the signer seeds needed for the PDA signature. The +`pool_authority` is what will be required to sign in these CPIs, so we use that +account's seeds. + +```rust +// program signer seeds +let auth_bump = ctx.accounts.pool_state.vault_auth_bump; +let auth_seeds = &[VAULT_AUTH_SEED.as_bytes(), &[auth_bump]]; +let signer = &[&auth_seeds[..]]; +``` + +Once we have those seeds stored in the `signer` variable, we can easily pass it +into the `transfer_checked_ctx()` method. At the same time, we'll call the +`transfer_checked` helper function from the Anchor crate to actually invoke the +CPI behind the scenes. + +```rust +// transfer staked tokens +transfer_checked(ctx.accounts.transfer_checked_ctx(signer), amount, decimals)?; +``` + +Next, we'll calculate how many reward tokens to mint the user and invoke the +`mint_to` instruction using our `mint_to_ctx` function. Remember, we are just +taking the amount of tokens the user has staked and multiplying it by 10 to get +their reward amount. This is a very simple algorithm that would not make sense +to use in production, but it works here as an example. + +Notice we use `checked_mul()` here, similar to how we used `checked_add` in the +`stake` instruction. Again, this is to prevent buffer overflow. + +```rust +// mint users staking rewards, 10x amount of staked tokens +let stake_rewards = amount.checked_mul(10).unwrap(); + +// mint rewards to user +mint_to(ctx.accounts.mint_to_ctx(signer), stake_rewards)?; +``` + +Lastly, we will need to update our state accounts by subtracting the amount that +was unstaked from both the pool and user's balances. We'll be using +`checked_sub()` for this. + +```rust +// borrow mutable references +let pool_state = &mut ctx.accounts.pool_state; +let user_entry = &mut ctx.accounts.user_stake_entry; + +// subtract transferred amount from pool total +pool_state.amount = pool_state.amount.checked_sub(amount).unwrap(); +msg!("Total staked after withdrawal: {}", pool_state.amount); + +// update user stake entry +user_entry.balance = user_entry.balance.checked_sub(amount).unwrap(); +user_entry.last_staked = Clock::get().unwrap().unix_timestamp; +``` + +Putting that all together gives us our final `handler` function: + +```rust +pub fn handler(ctx: Context) -> Result <()> { + check_token_program(ctx.accounts.token_program.key()); + + let user_entry = &ctx.accounts.user_stake_entry; + let amount = user_entry.balance; + let decimals = ctx.accounts.token_mint.decimals; + + msg!("User stake balance: {}", user_entry.balance); + msg!("Withdrawing all of users stake balance. Tokens to withdraw: {}", amount); + msg!("Total staked before withdrawal: {}", ctx.accounts.pool_state.amount); + + // verify user and pool have >= requested amount of tokens staked + if amount > ctx.accounts.pool_state.amount { + return Err(StakeError::OverdrawError.into()) + } + + // program signer seeds + let auth_bump = ctx.accounts.pool_state.vault_auth_bump; + let auth_seeds = &[VAULT_AUTH_SEED.as_bytes(), &[auth_bump]]; + let signer = &[&auth_seeds[..]]; + + // transfer staked tokens + transfer_checked(ctx.accounts.transfer_checked_ctx(signer), amount, decimals)?; + + // mint users staking rewards, 10x amount of staked tokens + let stake_rewards = amount.checked_mul(10).unwrap(); + + // mint rewards to user + mint_to(ctx.accounts.mint_to_ctx(signer), stake_rewards)?; + + // borrow mutable references + let pool_state = &mut ctx.accounts.pool_state; + let user_entry = &mut ctx.accounts.user_stake_entry; + + // subtract transferred amount from pool total + pool_state.amount = pool_state.amount.checked_sub(amount).unwrap(); + msg!("Total staked after withdrawal: {}", pool_state.amount); + + // update user stake entry + user_entry.balance = user_entry.balance.checked_sub(amount).unwrap(); + user_entry.last_staked = Clock::get().unwrap().unix_timestamp; + + Ok(()) +} +``` + +That is it for our staking program! There has been an entire test suite written +ahead of time for you to run against this program. Go ahead and install the +needed packages for testing and run the tests: + +```bash +npm install +anchor test +``` + +If you run into problems feel free to checkout the +[solution branch](https://github.com/Unboxed-Software/token22-staking/tree/solution). + +## Challenge + +Create your own program that is Token Program and Token Extensions Program +agnostic. diff --git a/content/courses/token-extensions/transfer-fee.mdx b/content/courses/token-extensions/transfer-fee.mdx new file mode 100644 index 000000000..072150b84 --- /dev/null +++ b/content/courses/token-extensions/transfer-fee.mdx @@ -0,0 +1,1225 @@ +--- +title: Transfer Fee Extension +objectives: + - Create transfer fee configured mint + - Transfer tokens of that mint + - Collect fees for the transfer +description: + "Create a token that allows a fee to be charged each time the token is traded." +--- + +## Summary + +- The Token Extension Program's `transfer fee` extension allows fees to be + withheld on every transfer. These fees are held on the recipient's account, + and can only be redeemed from the `withdrawWithheldAuthority` authority +- Withheld tokens can be withdrawn directly from the recipient accounts or can + be harvested back to the mint and then withdrawn +- Transfers with mints using the `transfer fee` extension need to use the + `transferCheckedWithFee` instruction + +## Overview + +Suppose you're a Solana game developer and you're making a large open world +multiplayer role playing game. You'll have a currency in this game that all the +players will earn and trade with. To make the economy in the game circular, you +may want to charge a small transfer fee every time this currency changes hands, +you'd call this the developer tax. This can be accomplished with the +`transfer fee` extension. The neat part is this will work on every transfer, +in-game and out! + +The Token Extension Program's `transfer fee` extension enables you to configure +a transfer fee on a mint such that fees are assessed at the protocol level. On +every transfer, some amount of that mint is withheld on the recipient account +which cannot be used by the recipient. At any point after the transfer, the +`withdraw` authority can claim these withheld tokens. + +The `transfer fee` extension is customizable and updatable. Here are the inputs +that we'll delve into a bit later: + +- Fee basis points: This is the fee assessed on every transfer. For example, if + 1000 tokens with 50 basis points are transferred, it will yield 5 tokens. +- Maximum fee: The cap on transfer fees. With a maximum fee of 5000 tokens, a + transfer of 10,000,000,000,000 tokens will only yield 5000 tokens. +- Transfer fee authority: The entity that can modify the fees. +- Withdraw withheld authority: The entity that can move tokens withheld on the + mint or token accounts. + +### Calculating fee basis points + +Before we go into the extension, here's a quick intro to "fee basis points". + +A basis point is a unit of measurement used in finance to describe the +percentage change in the value or rate of a financial instrument. One basis +point is equivalent to 0.01% or 0.0001 in decimal form. + +To get the fee we must calculate it as follows: + +``` +Fee = (token_amount * fee_basis_points) / 10000 +``` + +The constant 10,000 is used to convert the fee basis point percentage to the +equivalent amount. + +### Configuring a mint with a transfer fee + +Initializing a mint with the `transfer fee` extension involves three +instructions: + +- `SystemProgram.createAccount` +- `createInitializeTransferFeeConfigInstruction` +- `createInitializeMintInstruction` + +The first instruction `SystemProgram.createAccount` allocates space on the +blockchain for the mint account. This instruction accomplishes three things: + +- Allocates `space` +- Transfers `lamports` for rent +- Assigns to it's owning program + +As with all Token Extension Program's mints, we need to calculate the space and +lamports needed for the mint. We can get these by calling `getMintLen` and +`getMinimumBalanceForRentExemption` + +```ts +const extensions = [ExtensionType.TransferFeeConfig]; +const mintLength = getMintLen(extensions); + +const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, +}); +``` + +The second instruction `createInitializeTransferFeeConfigInstruction` +initializes the transfer fee extension. + +It takes the following parameters: + +- `mint`: Token mint account +- `transferFeeConfigAuthority`: Optional authority that can update the fees +- `withdrawWithheldAuthority`: Optional authority that can withdraw fees +- `transferFeeBasisPoints`: Amount of transfer collected as fees, expressed as + basis points of the transfer amount +- `maximumFee`: Maximum fee assessed on transfers +- `programId`: SPL Token program account + +```ts +const initializeTransferFeeConfigInstruction = + createInitializeTransferFeeConfigInstruction( + mintKeypair.publicKey, + payer.publicKey, + payer.publicKey, + feeBasisPoints, + maxFee, + TOKEN_2022_PROGRAM_ID, + ); +``` + +The third instruction `createInitializeMintInstruction` initializes the mint. + +```ts +const initializeMintInstruction = createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + null, + TOKEN_2022_PROGRAM_ID, +); +``` + +Lastly, you need to add all of these instructions to a transaction and send it +off the the blockchain. + +```ts +const mintTransaction = new Transaction().add( + createAccountInstruction, + initializeTransferFeeConfigInstruction, + initializeMintInstruction, +); + +const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, +); +``` + +### Transferring mint with transfer fees + +There are a couple of notes when transferring tokens with the `transfer fee` +extension. + +First, the recipient is the one who "pays" for the fee. If I send 100 tokens +with basis points of 500 (5%), the recipient will receive 95 tokens (five +withheld) + +Second, the fee is calculated not by the tokens sent, but the smallest unit of +said token. In Solana programming, we always specify amounts to be transferred, +minted or burned in their smallest unit. To send one SOL to someone, we actually +send `1 * 10 ^ 9` lamports. Another way to look at it is if you wanted to send +one US dollar, you're actually sending 100 pennies. Let's make this dollar a +token with a 500 basis points (5%) transfer fee. Sending one dollar, would +result in a five cent fee. Now let's say we have a max fee of 10 cents, this +will always be the highest fee, even if we send $10,000. + +The calculation can be summed up like this: + +```ts +const transferAmount = BigInt(tokensToSend * 10 ** decimals); +const basisPointFee = + (transferAmount * BigInt(feeBasisPoints)) / BigInt(10_000); +const fee = basisPointFee > maxFee ? maxFee : basisPointFee; +``` + +Third and final, there are two ways to transfer tokens with the `transfer fee` +extension: `transfer_checked` or `transfer_checked_with_fee`. The regular +`transfer` function lacks the necessary logic to handle fees. + +You have the choice of which function to use for transferring: + +- `transfer_checked_with_fee`: You have to calculate and provide the correct + fees +- `transfer_checked`: This will calculate the fees for you + +```ts +/** + * Transfer tokens from one account to another, asserting the token mint and decimals + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param source Source account + * @param mint Mint for the account + * @param destination Destination account + * @param owner Owner of the source account + * @param amount Number of tokens to transfer + * @param decimals Number of decimals in transfer amount + * @param multiSigners Signing accounts if `owner` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ + +const secondTransferAmount = BigInt(1 * 10 ** decimals); +const secondTransferSignature = await transferChecked( + connection, + payer, + sourceAccount, + mint, + destinationAccount, + sourceKeypair, + secondTransferAmount, + decimals, // Can also be gotten by getting the mint account details with `getMint(...)` + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +### Collecting fees + +There are two ways to "collect fees" from the withheld portion of the token +accounts. + +1. The `withdrawWithheldAuthority` can withdraw directly from the withheld + portion of a user's token account into any "token vault" +2. We can "harvest" the withheld tokens and store them within the mint account + itself, which can be withdrawn at any point from the + `withdrawWithheldAuthority` + +But first, why have these two options? + +Simply put, directly withdrawing is a permissioned function, meaning only the +`withdrawWithheldAuthority` can call it. Whereas harvesting is permissionless, +where anyone can call the harvest function consolidating all of the fees into +the mint itself. + +But why not just directly transfer the tokens to the fee collector on each +transfer? + +Two reasons: one, where the mint creator wants the fees to end up may change. +Two, this would create a bottleneck. + +Say you have a very popular token with `transfer fee` enabled and your fee vault +is the recipient of the fees. If thousands of people are trying to transact the +token simultaneously, they'll all have to update your fee vault's balance - your +fee vault has to be "writable". While it's true Solana can execute in parallel, +it cannot execute in parallel if the same accounts are being written to at the +same time. So, these thousands of people would have to wait in line, slowing +down the transfer drastically. This is solved by setting aside the `withheld` +transfer fees within the recipient's account - this way, only the sender and +receiver's accounts are writable. Then the `withdrawWithheldAuthority` can +withdraw to the fee vault anytime after. + +#### Directly withdrawing fees + +In the first case, If we want to withdraw all withheld transfer fees from all +token accounts directly we can do the following: + +1. Grab all token accounts associated with the mint using `getProgramAccounts` +2. Add all token accounts with some withheld tokens to a list +3. Call the `withdrawWithheldTokensFromAccounts` function (the `authority` needs + to be a signer) + +```ts +// grabs all of the token accounts for a given mint +const accounts = await connection.getProgramAccounts(TOKEN_2022_PROGRAM_ID, { + commitment: "finalized", + filters: [ + { + memcmp: { + offset: 0, + bytes: mint.toString(), + }, + }, + ], +}); + +const accountsToWithdrawFrom = []; +for (const accountInfo of accounts) { + const unpackedAccount = unpackAccount( + accountInfo.pubkey, + accountInfo.account, + TOKEN_2022_PROGRAM_ID, + ); + + // If there is withheld tokens add it to our list + const transferFeeAmount = getTransferFeeAmount(unpackedAccount); + if ( + transferFeeAmount != null && + transferFeeAmount.withheldAmount > BigInt(0) + ) { + accountsToWithdrawFrom.push(accountInfo.pubkey); + } +} + +/** + * Withdraw withheld tokens from accounts + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param mint The token mint + * @param destination The destination account + * @param authority The mint's withdraw withheld tokens authority + * @param multiSigners Signing accounts if `owner` is a multisig + * @param sources Source accounts from which to withdraw withheld fees + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ +await withdrawWithheldTokensFromAccounts( + connection, + payer, + mint, + feeVaultAccount, + authority, + [], + accountsToWithdrawFrom, + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); +``` + +#### Harvesting fees + +The second approach we call "harvesting" - this is a permissionless function +meaning anyone can call it. This approach is great for "cranking" the harvest +instruction with tools like [clockwork](https://www.clockwork.xyz/). The +difference is when we harvest, the withheld tokens get stored in the mint +itself. Then the `withdrawWithheldAuthority` can withdraw the tokens from the +mint at any point. + +To harvest: + +1. gather all of the accounts you want to harvest from (same flow as above) +2. call `harvestWithheldTokensToMint` +3. To withdraw from the mint, call `withdrawWithheldTokensFromMint` + +```ts +/** + * Harvest withheld tokens from accounts to the mint + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param mint The token mint + * @param sources Source accounts from which to withdraw withheld fees + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ +await harvestWithheldTokensToMint( + connection, + payer, + mint, + accountsToHarvestFrom, + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); + +/** + * Withdraw withheld tokens from mint + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param mint The token mint + * @param destination The destination account + * @param authority The mint's withdraw withheld tokens authority + * @param multiSigners Signing accounts if `owner` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ +await withdrawWithheldTokensFromMint( + connection, + payer, + mint, + feeVaultAccount, + authority, + [], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); +``` + +### Updating fees + +As of right now there is no way to set the transfer fee post +[creation with the JS library](https://solana.stackexchange.com/questions/7775/spl-token-2022-how-to-modify-transfer-fee-configuration-for-an-existing-mint). +However you can from the CLI assuming the result of `solana config` wallet is +the `transferFeeConfigAuthority`: + +```bash +solana address +## The result of ^ needs to be the `transferFeeConfigAuthority` +spl-token set-transfer-fee +``` + +### Updating authorities + +If you'd like to change the `transferFeeConfigAuthority` or the +`withdrawWithheldAuthority` you can with the `setAuthority` function. Just pass +in the correct accounts and the `authorityType`, which in these cases are: +`TransferFeeConfig` and `WithheldWithdraw`, respectively. + +```ts +/** + * Assign a new authority to the account + * + * @param connection Connection to use + * @param payer Payer of the transaction fees + * @param account Address of the account + * @param currentAuthority Current authority of the specified type + * @param authorityType Type of authority to set + * @param newAuthority New authority of the account + * @param multiSigners Signing accounts if `currentAuthority` is a multisig + * @param confirmOptions Options for confirming the transaction + * @param programId SPL Token program account + * + * @return Signature of the confirmed transaction + */ + +await setAuthority( + connection, + payer, + mint, + currentAuthority, + AuthorityType.TransferFeeConfig, // or AuthorityType.WithheldWithdraw + newAuthority, + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + +## Lab + +In this lab, we are going to create a transfer fee configured mint. We'll use a +fee vault to hold the transfer fees, and we'll collect the fees using both the +direct and the harvesting methods. + +#### 1. Getting started + +To get started, create an empty directory named `transfer-fee` and navigate to +it. We'll be initializing a brand new project. Run `npm init` and follow through +the prompts. + +Next, we'll need to add our dependencies. Run the following to install the +required packages: + +```bash +npm i @solana-developers/helpers@2 @solana/spl-token @solana/web3.js@1 esrun dotenv typescript +``` + +Create a directory named `src`. In this directory, create a file named +`index.ts`. This is where we will run checks against the rules of this +extension. Paste the following code in `index.ts`: + +```ts +import { Connection, Keypair } from "@solana/web3.js"; +import { initializeKeypair } from "@solana-developers/helpers"; +import { transferCheckedWithFee } from "@solana/spl-token"; + +/** + * Create a connection and initialize a keypair if one doesn't already exists. + * If a keypair exists, airdrop a SOL token if needed. + */ +const connection = new Connection("http://127.0.0.1:8899"); +const payer = await initializeKeypair(connection); + +console.log(`public key: ${payer.publicKey.toBase58()}`); + +const mintKeypair = Keypair.generate(); +const mint = mintKeypair.publicKey; +console.log("\nmint public key: " + mintKeypair.publicKey.toBase58() + "\n\n"); + +// CREATE MINT WITH TRANSFER FEE + +// CREATE FEE VAULT ACCOUNT + +// CREATE A SOURCE ACCOUNT AND MINT TOKEN + +// CREATE DESTINATION ACCOUNT + +// TRANSFER TOKENS + +// FETCH ACCOUNTS WITH WITHHELD TOKENS + +// WITHDRAW WITHHELD TOKENS + +// VERIFY UPDATED FEE VAULT BALANCE + +// HARVEST WITHHELD TOKENS TO MINT + +// WITHDRAW HARVESTED TOKENS + +// VERIFY UPDATED FEE VAULT BALANCE +``` + +`index.ts` has a main function that creates a connection to the specified +validator node and calls `initializeKeypair`. This `main` function is where +we'll be writing our script. + +Go ahead and run the script. You should see the `mint` public key logged to your +terminal. + +```bash +esrun src/index.ts +``` + +If you run into an error in `initializeKeypair` with airdropping, follow the +next step. + +#### 2. Run validator node + +For the sake of this guide, we'll be running our own validator node. + +In a separate terminal, run the following command: `solana-test-validator`. This +will run the node and also log out some keys and values. The value we need to +retrieve and use in our connection is the JSON RPC URL, which in this case is +`http://127.0.0.1:8899`. We then use that in the connection to specify to use +the local RPC URL. + +```typescript +const connection = new Connection("http://127.0.0.1:8899", "confirmed"); +``` + +Alternatively, if you'd like to use testnet or devnet, import the +`clusterApiUrl` from `@solana/web3.js` and pass it to the connection as such: + +```typescript +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); +``` + +If you decide to use devnet, and have issues with airdropping SOL. Feel free to +add the `keypairPath` parameter to `initializeKeypair`. You can get this from +running `solana config get` in your terminal. And then go to +[faucet.solana.com](https://faucet.solana.com/) and airdrop some sol to your +address. You can get your address from running `solana address` in your +terminal. + +#### 3. Create a mint with transfer fee + +Let's create a function `createMintWithTransferFee` in a new file +`src/create-mint.ts`. + +To create a mint with the `transfer fee` extension, we need three instructions: +`SystemProgram.createAccount`, `createInitializeTransferFeeConfigInstruction` +and `createInitializeMintInstruction`. + +We'll also want the our new `createMintWithTransferFee` function to have +following arguments: + +- `connection` : The connection object +- `payer` : Payer for the transaction +- `mintKeypair` : Keypair for the new mint +- `decimals` : Mint decimals +- `feeBasisPoints` : Fee basis points for the transfer fee +- `maxFee` : Maximum fee points for the transfer fee + +```ts +import { + sendAndConfirmTransaction, + Connection, + Keypair, + SystemProgram, + Transaction, + TransactionSignature, +} from "@solana/web3.js"; + +import { + ExtensionType, + createInitializeMintInstruction, + getMintLen, + TOKEN_2022_PROGRAM_ID, + createInitializeTransferFeeConfigInstruction, +} from "@solana/spl-token"; + +export async function createMintWithTransferFee( + connection: Connection, + payer: Keypair, + mintKeypair: Keypair, + decimals: number, + feeBasisPoints: number, + maxFee: bigint, +): Promise { + const extensions = [ExtensionType.TransferFeeConfig]; + const mintLength = getMintLen(extensions); + + const mintLamports = + await connection.getMinimumBalanceForRentExemption(mintLength); + + console.log("Creating a transaction with transfer fee instruction..."); + const mintTransaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payer.publicKey, + newAccountPubkey: mintKeypair.publicKey, + space: mintLength, + lamports: mintLamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeTransferFeeConfigInstruction( + mintKeypair.publicKey, + payer.publicKey, + payer.publicKey, + feeBasisPoints, + maxFee, + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMintInstruction( + mintKeypair.publicKey, + decimals, + payer.publicKey, + null, + TOKEN_2022_PROGRAM_ID, + ), + ); + + console.log("Sending transaction..."); + const signature = await sendAndConfirmTransaction( + connection, + mintTransaction, + [payer, mintKeypair], + { commitment: "finalized" }, + ); + console.log("Transaction sent"); + + return signature; +} +``` + +Now let's import and call our new function in `src/index.ts`. We'll create a +mint that has nine decimal points, 1000 fee basis points (10%), and a max fee +of 5000. + +```ts +// CREATE MINT WITH TRANSFER FEE +const decimals = 9; +const feeBasisPoints = 1000; +const maxFee = BigInt(5000); + +await createMintWithTransferFee( + connection, + payer, + mintKeypair, + decimals, + feeBasisPoints, + maxFee, +); +``` + +Run the script to make sure it's working so far. + +```bash +esrun src/index.ts +``` + +#### 4. Create a fee vault account + +Before we transfer any tokens and accrue transfer fees, let's create a "fee +vault" that will be the final recipient of all transfer fees. + +For simplicity, let's make the fee vault the associated token account (ATA) of +our payer. + +```ts +// CREATE FEE VAULT ACCOUNT +console.log("\nCreating a fee vault account..."); + +const feeVaultAccount = await createAssociatedTokenAccount( + connection, + payer, + mintKeypair.publicKey, + payer.publicKey, + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); + +const initialBalance = ( + await connection.getTokenAccountBalance(feeVaultAccount, "finalized") +).value.amount; + +console.log("Current fee vault balance: " + initialBalance + "\n\n"); +``` + +Let's run the script again, we should have a zero balance. + +```bash +esrun src/index.ts +``` + +#### 5. Create two token accounts and mint to one + +Let's now create two test token accounts we'll call the `source` and +`destination` accounts. Then let's mint some tokens to the `source`. + +We can do this by calling `createAccount` and `mintTo`. + +We'll mint 10 full tokens. + +```ts +// CREATE TEST ACCOUNTS AND MINT TOKENS +console.log("Creating source account..."); + +const sourceKeypair = Keypair.generate(); +const sourceAccount = await createAccount( + connection, + payer, + mint, + sourceKeypair.publicKey, + undefined, + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); + +console.log("Creating destination account..."); + +const destinationKeypair = Keypair.generate(); +const destinationAccount = await createAccount( + connection, + payer, + mint, + destinationKeypair.publicKey, + undefined, + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); + +console.log("Minting 10 tokens to source...\n\n"); + +const amountToMint = 10 * 10 ** decimals; + +await mintTo( + connection, + payer, + mint, + sourceAccount, + payer, + amountToMint, + [payer], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); +``` + +If you'd like, run the script to check that everything is working: + +```bash +esrun src/index.ts +``` + +#### 6. Transfer one token + +Now, let's transfer 1 token from our `sourceAccount` to our `destinationAccount` +and see what happens. + +To transfer a token with the `transfer fee` extension enabled, we have to call +`transferCheckedWithFee`. This requires us to decide how much we want to send, +and to calculate the correct fee associated. + +To do this, we can do a little math: + +First, to send one full token is actually sending `1 * (10 ^ decimals)` tokens. +In Solana programming, we always specify amounts to be transferred, minted or +burned in their smallest unit. To send one SOL to someone, we actually send +`1 * 10 ^ 9` lamports. Another way to look at it is if you wanted to send one US +dollar, you're actually sending 100 pennies. + +Now, we can take the resulting amount: `1 * (10 ^ decimals)` and calculate the +fee using the basis points. We can do this by taking the `transferAmount` +multiplying it by the `feeBasisPoints` and dividing by `10_000` (the definition +of a fee basis point). + +Lastly, we need to check if the fee is more than the max fee, if it is, then we +call `transferCheckedWithFee` with our max fee. + +```ts +const transferAmount = BigInt(1 * 10 ** decimals); +const basisPointFee = + (transferAmount * BigInt(feeBasisPoints)) / BigInt(10_000); +const fee = basisPointFee > maxFee ? maxFee : basisPointFee; +``` + +With all of this information, take a second, what do you think the final +balances and withheld amounts for this transaction will be? + +Now, let's transfer one of our tokens and print out the resulting balances: + +```ts +// TRANSFER TOKENS +console.log("Transferring with fee transaction..."); + +const transferAmount = BigInt(1 * 10 ** decimals); +const fee = (transferAmount * BigInt(feeBasisPoints)) / BigInt(10_000); + +const transferSignature = await transferCheckedWithFee( + connection, + payer, + sourceAccount, + mint, + destinationAccount, + sourceKeypair.publicKey, + transferAmount, + decimals, + fee, + [sourceKeypair], + { commitment: "finalized" }, + TOKEN_2022_PROGRAM_ID, +); + +const sourceAccountAfterTransfer = await getAccount( + connection, + sourceAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const destinationAccountAfterTransfer = await getAccount( + connection, + destinationAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const withheldAmountAfterTransfer = getTransferFeeAmount( + destinationAccountAfterTransfer, +); + +console.log(`Source Token Balance: ${sourceAccountAfterTransfer.amount}`); +console.log( + `Destination Token Balance: ${destinationAccountAfterTransfer.amount}`, +); +console.log( + `Withheld Transfer Fees: ${withheldAmountAfterTransfer?.withheldAmount}\n`, +); +``` + +Go ahead and run the script: + +```bash +esrun src/index.ts +``` + +You should get the following: + +```bash +Transferring with fee transaction... +Source Token Balance: 9000000000 +Destination Token Balance: 999995000 +Withheld Transfer Fees: 5000 +``` + +A little breakdown: + +Our fee basis points are 1000, meaning 10% of the amount transferred should be +used as a fee. In this case 10% of 1,000,000,000 is 100,000,000, which is way +bigger than our 5000 max fee. So that's why we see 5000 withheld. Additionally, +note that the receiver is the one who "pays" for the transfer fee. + + + +From now on, to calculate fees, you may want to use the +`calculateFee` helper function. We did it manually for demonstration purposes. +The following is one way to accomplish this: + +```ts +const transferAmount = BigInt(1 * 10 ** decimals); +const mintAccount = await getMint( + connection, + mint, + undefined, + TOKEN_2022_PROGRAM_ID, +); +const transferFeeAmount = getTransferFeeConfig(mintAccount); +const fee = calculateFee( + transferFeeAmount?.newerTransferFee!, + secondTransferAmount, +); +``` + + + + + +#### 7. Withdrawing fees + +There are two ways in which we can collect fees from the recipient's account +into the fee vault. The first one is withdrawing the withheld fees directly from +the recipient's account itself to the fee vault account using +`withdrawWithheldTokensFromAccounts`. The second approach is "harvesting" the +fees from the recipient's account to the mint with `harvestWithheldTokensToMint` +and then withdrawing it from the mint to the fee vault account with +`withdrawWithheldTokensFromMint`. + +#### 7.1 Withdraw fees directly from the recipient accounts + +First, let's withdraw the fees directly. We can accomplish this by calling +`withdrawWithheldTokensFromAccounts`. This is a permissioned function, meaning +only the `withdrawWithheldAuthority` can sign for it. + +The `withdrawWithheldTokensFromAccounts` function takes the following +parameters: + +- `connection`: The connection to use +- `payer`: The payer keypair of the transaction fees +- `mint`: The token mint +- `destination`: The destination account - in our case, the fee vault +- `authority`: The mint's withdraw withheld tokens authority - in our case, the + payer +- `multiSigners`: Signing accounts if `owner` is a multisig +- `sources`: Source accounts from which to withdraw withheld fees +- `confirmOptions`: Options for confirming the transaction +- `programId`: SPL Token program account - in our case `TOKEN_2022_PROGRAM_ID` + +Now, let's directly withdraw the fees from the destination account and check the +resulting balances: + +```ts +// DIRECTLY WITHDRAW +await withdrawWithheldTokensFromAccounts( + connection, + payer, + mint, + feeVaultAccount, + payer.publicKey, + [], + [destinationAccount], + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const withheldAccountAfterWithdraw = await getAccount( + connection, + destinationAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const withheldAmountAfterWithdraw = getTransferFeeAmount( + withheldAccountAfterWithdraw, +); + +const feeVaultAfterWithdraw = await getAccount( + connection, + feeVaultAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +console.log( + `Withheld amount after withdraw: ${withheldAmountAfterWithdraw?.withheldAmount}`, +); +console.log( + `Fee vault balance after withdraw: ${feeVaultAfterWithdraw.amount}\n`, +); +``` + +Go ahead and run the script: + +```bash +esrun src/index.ts +``` + +You should get the following: + +```bash +Withheld amount after withdraw: 0 +Fee vault balance after withdraw: 5000 +``` + + + +The `withdrawWithheldTokensFromAccounts` can also be used +to collect all fees from all token accounts, if you fetch them all first. +Something like the following would work: + +```ts +const accounts = await connection.getProgramAccounts(TOKEN_2022_PROGRAM_ID, { + commitment: "finalized", + filters: [ + { + memcmp: { + offset: 0, + bytes: mint.toString(), + }, + }, + ], +}); + +const accountsToWithdrawFrom = []; +for (const accountInfo of accounts) { + const unpackedAccount = unpackAccount( + accountInfo.pubkey, + accountInfo.account, + TOKEN_2022_PROGRAM_ID, + ); + + const transferFeeAmount = getTransferFeeAmount(unpackedAccount); + if ( + transferFeeAmount != null && + transferFeeAmount.withheldAmount > BigInt(0) + ) { + accountsToWithdrawFrom.push(accountInfo.pubkey); + } +} + +await withdrawWithheldTokensFromAccounts( + connection, + payer, + mint, + feeVaultAccount, + payer.publicKey, + [], + accountsToWithdrawFrom, + undefined, + TOKEN_2022_PROGRAM_ID, +); +``` + + + + + +#### 7.2 Harvest and then withdraw + +Let's look at the second option to retrieving the withheld fees: "harvesting". +The difference here is that instead of withdrawing the fees directly, we +"harvest" them back to the mint itself using `harvestWithheldTokensToMint`. This +is a permissionless function, meaning anyone can call it. This is useful if you +use something like [clockwork](https://www.clockwork.xyz/) to automate these +harvesting functions. + +After the fees are harvested to the mint account, we can call +`withdrawWithheldTokensFromMint` to transfer these tokens into our fee vault. +This function is permissioned and we need the `withdrawWithheldAuthority` to +sign for it. + +To do this, we need to transfer some more tokens to accrue more fees. This time, +we're going to take a shortcut and use the `transferChecked` function instead. +This will automatically calculate our fees for us. Then we'll print out the +balances to see where we are at: + +```ts +// TRANSFER TOKENS PT2 +console.log("Transferring with fee transaction pt2..."); + +const secondTransferAmount = BigInt(1 * 10 ** decimals); +const secondTransferSignature = await transferChecked( + connection, + payer, + sourceAccount, + mint, + destinationAccount, + sourceKeypair, + secondTransferAmount, + decimals, // Can also be gotten by getting the mint account details with `getMint(...)` + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const sourceAccountAfterSecondTransfer = await getAccount( + connection, + sourceAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const destinationAccountAfterSecondTransfer = await getAccount( + connection, + destinationAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const withheldAmountAfterSecondTransfer = getTransferFeeAmount( + destinationAccountAfterTransfer, +); + +console.log(`Source Token Balance: ${sourceAccountAfterSecondTransfer.amount}`); +console.log( + `Destination Token Balance: ${destinationAccountAfterSecondTransfer.amount}`, +); +console.log( + `Withheld Transfer Fees: ${withheldAmountAfterSecondTransfer?.withheldAmount}\n`, +); +``` + +Now, let's harvest the tokens back to the mint account. We will do this using +the `harvestWithheldTokensToMint` function. This function takes the following +parameters: + +- `connection`: Connection to use +- `payer`: Payer of the transaction fees +- `mint`: The token mint +- `sources`: Source accounts from which to withdraw withheld fees +- `confirmOptions`: Options for confirming the transaction +- `programId`: SPL Token program account + +Then we'll check the resulting balances. However, since the withheld amount will +now be stored in the mint, we have to fetch the mint account with `getMint` and +then read the `transfer fee` extension data on it by calling +`getTransferFeeConfig`: + +```ts +// HARVEST WITHHELD TOKENS TO MINT +await harvestWithheldTokensToMint( + connection, + payer, + mint, + [destinationAccount], + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const withheldAccountAfterHarvest = await getAccount( + connection, + destinationAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const withheldAmountAfterHarvest = getTransferFeeAmount( + withheldAccountAfterHarvest, +); + +const mintAccountAfterHarvest = await getMint( + connection, + mint, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const mintTransferFeeConfigAfterHarvest = getTransferFeeConfig( + mintAccountAfterHarvest, +); + +console.log( + `Withheld amount after harvest: ${withheldAmountAfterHarvest?.withheldAmount}`, +); +console.log( + `Mint withheld amount after harvest: ${mintTransferFeeConfigAfterHarvest?.withheldAmount}\n`, +); +``` + +Lastly, let's withdraw these fees from the mint itself using the +`withdrawWithheldTokensFromMint` function. This function takes the following +parameters: + +- `connection`: Connection to use +- `payer`: Payer of the transaction fees +- `mint`: The token mint +- `destination`: The destination account +- `authority`: The mint's withdraw withheld tokens authority +- `multiSigners`: Signing accounts if `owner` is a multisig +- `confirmOptions`: Options for confirming the transaction +- `programId`: SPL Token program account + +After that, let's check the balances: + +```ts +// WITHDRAW HARVESTED TOKENS +await withdrawWithheldTokensFromMint( + connection, + payer, + mint, + feeVaultAccount, + payer, + [], + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const mintAccountAfterSecondWithdraw = await getMint( + connection, + mint, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +const mintTransferFeeConfigAfterSecondWithdraw = getTransferFeeConfig( + mintAccountAfterSecondWithdraw, +); + +const feeVaultAfterSecondWithdraw = await getAccount( + connection, + feeVaultAccount, + undefined, + TOKEN_2022_PROGRAM_ID, +); + +console.log( + `Mint withheld balance after second withdraw: ${mintTransferFeeConfigAfterSecondWithdraw?.withheldAmount}`, +); +console.log( + `Fee Vault balance after second withdraw: ${feeVaultAfterSecondWithdraw.amount}`, +); +``` + +Now, let's run it. + +```bash +esrun src/index.ts +``` + +You should see the balances after every step of the way. + +That's it! We have successfully created a mint with a transfer fee. If you get +stuck at any point, you can find the working code in the `solution` branch of +[this repository](https://github.com/Unboxed-Software/solana-lab-transfer-fee/tree/solution). + +#### Challenge + +Create a transfer fee enabled mint and transfer some tokens with different +decimals, fee transfer points and max fees. diff --git a/content/courses/token-extensions/transfer-hook.mdx b/content/courses/token-extensions/transfer-hook.mdx new file mode 100644 index 000000000..a5a4fa122 --- /dev/null +++ b/content/courses/token-extensions/transfer-hook.mdx @@ -0,0 +1,1875 @@ +--- +title: Transfer Hook +objectives: + - Create a program that applies the "transfer-hook" interface + - Create a mint with a transfer hook + - Transfer a token with a transfer hook successfully +description: + "Create a token that invokes a function in an onchain program whenever the + token in transferred." +--- + +## Summary + +- The `transfer hook` extension allows developers to run custom logic on their + tokens on every transfer + +- When a token has a transfer hook, the Token Extensions Program will invoke the + transfer hook instruction on every token transfer + +- For the program to be able to act as a transfer hook program, it needs to + implement the `TransferHook` interface + +- Transfer hooks may use additional accounts beyond those involved in a normal, + non-hooked transfer. These are called 'extra accounts' and must be provided by + the transfer instruction, and are set up in in `extra-account-metas` when + creating the token mint. + +- Within the transfer hook CPI, the sender, mint, receiver and owner are all + de-escalated, meaning they are read-only to the hook. Meaning none of those + accounts can sign or be written to. + +## Overview + +The `transfer-hook` extension allows custom onchain logic to be run after each +transfer within the same transaction. More specifically, the `transfer-hook` +extension requires a 'hook' or 'callback' in the form of a Solana program +following the +[Transfer Hook Interface](https://github.com/solana-labs/solana-program-library/tree/master/token/transfer-hook/interface). +Then every time any token of that mint is transferred the Token Extensions +Program calls this 'hook' as a CPI. + +Additionally, the `transfer-hook` extension also stores `extra-account-metas`, +which are any additional accounts needed for the hook to function. + +This extension allows many new use cases, including: + +- Enforcing artist royalty payments to transfer NFTs. +- Stopping tokens from being transferred to known bad actors (blocklists). +- Requiring accounts to own a particular NFT to receive a token (allowlists). +- Token analytics. + +In this lesson, we'll explore how to implement transfer hooks onchain and work +with them in the frontend. + +### Implementing transfer hooks onchain + +The first part of creating a mint with a `transfer hook` is to find or create an +onchain program that follows the +[Transfer Hook Interface](https://github.com/solana-labs/solana-program-library/tree/master/token/transfer-hook/interface). + +The +[Transfer Hook Interface](https://github.com/solana-labs/solana-program-library/blob/master/token/transfer-hook/interface/src/instruction.rs) +specifies the transfer hook program includes: + +- `Execute` (required): An instruction handler that the Token Extensions Program + invokes on every token transfer + +- `InitializeExtraAccountMetaList` (optional): creates an account + (`extra_account_meta_list`) that stores a list of additional accounts (i.e. + those needed by the transfer hook program, beyond the accounts needed for a + simple transfer) required by the `Execute` instruction + +- `UpdateExtraAccountMetaList` (optional): updates the list of additional + accounts by overwriting the existing list + +Technically it's not required to implement the `InitializeExtraAccountMetaList` +instruction using the interface, but it's still required to have the +`extra_account_meta_list` account. This account can be created by any +instruction on a Transfer Hook program. However, the Program Derived Address +(PDA) for the account must be derived using the following seeds: + +- The hard-coded string `extra-account-metas` + +- The Mint Account address + +- The Transfer Hook program ID + +```typescript +const [pda] = PublicKey.findProgramAddressSync( + [Buffer.from("extra-account-metas"), mint.publicKey.toBuffer()], + program.programId, // transfer hook program ID +); +``` + +By storing the extra accounts required by the `Execute` instruction in the +`extra_account_meta_list` PDA, these accounts can be automatically added to a +token transfer instruction from the client. We'll see how to do that in the +offchain section. + +#### 1. `initialize_extra_account_meta_list` instruction: + +When we transfer a token using the Token Extensions Program, the program will +examine our mint to determine if it has a transfer hook. If a transfer hook is +present, the Token Extensions Program will initiate a CPI (cross-program +invocation) to our transfer hook program. The Token Extensions Program will then +pass all the accounts in the transfer (including the extra accounts specified in +the `extra_account_meta_list`) to the transfer hook program. However, before +passing the 4 essential accounts (`sender`, `mint`, `receiver`, `owner`), it +will de-escalate them (i.e. remove the mutable or signing abilities for security +reasons). + +In other words, when our hook receives these accounts, they will be read-only. +The transfer hook program cannot modify these accounts, nor can it sign any +transactions with them. Although we cannot alter or sign with any of these four +accounts, we can specify `is_signer` and `is_writable` to any of the additional +accounts in the `extra_account_meta_list` PDA. Additionally, we can use the +`extra_account_meta_list` PDA as a signer for any new data accounts specified in +the hook program. + +The `extra_account_meta_list` has to be created before any transfer occurs. It's +also worth noting that we can update the list of accounts in the +`extra_account_meta_list` by implementing and using the +`UpdateExtraAccountMetaList` instruction if necessary. + +The `extra_account_meta_list` is just a list of `ExtraAccountMeta`. Let's take a +look at the struct `ExtraAccountMeta` +[in the source code](https://github.com/solana-labs/solana-program-library/blob/4f1668510adef2117ee3c043bd26b79789e67c8d/libraries/tlv-account-resolution/src/account.rs#L90): + +```rust +impl ExtraAccountMeta { + /// Create a `ExtraAccountMeta` from a public key + /// This represents standard `AccountMeta` + pub fn new_with_pubkey( + pubkey: &Pubkey, + is_signer: bool, + is_writable: bool, + ) -> Result { + Ok(Self { + discriminator: 0, + address_config: pubkey.to_bytes(), + is_signer: is_signer.into(), + is_writable: is_writable.into(), + }) + } + + /// Create an `ExtraAccountMeta` PDA from a list of seeds + pub fn new_with_seeds( + seeds: &[Seed], + is_signer: bool, + is_writable: bool, + ) -> Result { + Ok(Self { + discriminator: 1, + address_config: Seed::pack_into_address_config(seeds)?, + is_signer: is_signer.into(), + is_writable: is_writable.into(), + }) + } + + /// Create an `ExtraAccountMeta` PDA for an external program from a list of seeds + /// This PDA belongs to a program elsewhere in the account list, rather + /// than the executing program. For a PDA on the executing program, use + /// `ExtraAccountMeta::new_with_seeds`. + pub fn new_external_pda_with_seeds( + program_index: u8, + seeds: &[Seed], + is_signer: bool, + is_writable: bool, + ) -> Result { + Ok(Self { + discriminator: program_index + .checked_add(U8_TOP_BIT) + .ok_or(AccountResolutionError::InvalidSeedConfig)?, + address_config: Seed::pack_into_address_config(seeds)?, + is_signer: is_signer.into(), + is_writable: is_writable.into(), + }) + } +``` + +We have three methods for creating an `ExtraAccountMeta`: + +1. `ExtraAccountMeta::new_with_pubkey` - For any normal account (not a program + account) + +2. `ExtraAccountMeta::new_with_seeds` - For a program account PDA from the + calling transfer hook program + +3. `ExtraAccountMeta::new_external_pda_with_seeds` - For a program account PDA + from a different external program + +Now that we know the accounts we can store them in `extra_account_meta_list`. +Let's talk about the `InitializeExtraAccountMetaList` instruction itself. For +most implementations, it should simply just create the `extra_account_meta_list` +account and load it up with any additional accounts it needs. + +Let's take a look at a simple example where we'll initialize an +`extra_account_meta_list` with two additional arbitrary accounts, `some_account` +and a `pda_account`. The `initialize_extra_account_meta_list` function will do +the following: + +1. Prepare the accounts we need to store in the `extra_account_meta_list` + account as a vector (we'll discuss that in-depth in a moment). + +2. Calculate the size and rent required to store the list of + `ExtraAccountMetas`. + +3. Make a CPI to the System Program to create an account and set the Transfer + Hook Program as the owner, and then initialize the account data to store the + list of `ExtraAccountMetas`. + +```rust +#[derive(Accounts)] +pub struct InitializeExtraAccountMetaList<'info> { + #[account(mut)] + payer: Signer<'info>, + + /// CHECK: ExtraAccountMetaList Account, must use these seeds + #[account( + mut, + seeds = [b"extra-account-metas", mint.key().as_ref()], + bump + )] + pub extra_account_meta_list: AccountInfo<'info>, + pub mint: InterfaceAccount<'info, Mint>, + + pub system_program: Program<'info, System>, + + // Accounts to add to the extra-account-metas + pub some_account: UncheckedAccount<'info>, + #[account(seeds = [b"some-seed"], bump)] + pub pda_account: UncheckedAccount<'info>, + +} + +pub fn initialize_extra_account_meta_list(ctx: Context) -> Result<()> { + let account_metas = vec![ + ExtraAccountMeta::new_with_pubkey(&ctx.accounts.some_account.key(), false, true)?, // Read only + ExtraAccountMeta::new_with_seeds( + &[ + Seed::Literal { + bytes: "some-seed".as_bytes().to_vec(), + }, + ], + true, // is_signer + true // is_writable + )?, + ]; + + // calculate account size + let account_size = ExtraAccountMetaList::size_of(account_metas.len())? as u64; + + // calculate minimum required lamports + let lamports = Rent::get()?.minimum_balance(account_size as usize); + + let mint = ctx.accounts.mint.key(); + let signer_seeds: &[&[&[u8]]] = &[&[b"extra-account-metas", &mint.as_ref(), &[ctx.bumps.extra_account_meta_list]]]; + + // create ExtraAccountMetaList account + create_account( + CpiContext::new(ctx.accounts.system_program.to_account_info(), CreateAccount { + from: ctx.accounts.payer.to_account_info(), + to: ctx.accounts.extra_account_meta_list.to_account_info(), + }).with_signer(signer_seeds), + lamports, + account_size, + ctx.program_id + )?; + + // initialize ExtraAccountMetaList account with extra accounts + ExtraAccountMetaList::init::( + &mut ctx.accounts.extra_account_meta_list.try_borrow_mut_data()?, + &account_metas + )?; + + Ok(()) +} +``` + +Let's dive a little deeper into the `ExtraAccountMeta` you can store. + +You can directly store the account address, store the seeds to derive a PDA of +the program itself and store the seeds to derive a PDA for a program other than +the Transfer Hook program. + +The first method is straightforward `ExtraAccountMeta::new_with_pubkey`; you +just need an account address. You can pass it to the instruction or get it from +a library (like the system program or the token program), or you can even +hardcode it. + +However, the most interesting part here is storing the seeds, and it could +either be a PDA of the transfer hook program itself or a PDA of another program +like an associated token account. We can do both of them by using +`ExtraAccountMeta::new_with_seeds` and +`ExtraAccountMeta::new_external_pda_with_seeds`, respectively, and pass the +seeds to them. + +To learn how we could pass the seeds, let's take a look at the source code +itself: + +```rust +pub fn new_with_seeds( + seeds: &[Seed], + is_signer: bool, + is_writable: bool, +) + +pub fn new_external_pda_with_seeds( + program_index: u8, + seeds: &[Seed], + is_signer: bool, + is_writable: bool, +) +``` + +Both of these methods are similar; the only change is we need to pass the +`program_id` for the PDAs that are not of our program in the +`new_external_pda_with_seeds` method. Other than that we need to provide a list +of seeds (which we'll talk about soon) and two booleans for `is_signer` and +`is_writable` to determine if the account should be a signer or writable. + +Providing the seeds themselves takes a little explanation. Hard-coded literal +seeds are easy enough, but what happens if you want a seed to be variable, say +created with the public key of a passed-in account? To make sense of this, let's +break it down to make it easier to understand. First, take a look at the seed +enum implementation from +[spl_tlv_account_resolution::seeds::Seed](https://github.com/solana-labs/solana-program-library/blob/master/libraries/tlv-account-resolution/src/seeds.rs): + +```rust +pub enum Seed + /// Uninitialized configuration byte space + Uninitialized, + /// A literal hard-coded argument + /// Packed as: + /// * 1 - Discriminator + /// * 1 - Length of literal + /// * N - Literal bytes themselves + Literal { + /// The literal value represented as a vector of bytes. + /// + /// For example, if a literal value is a string literal, + /// such as "my-seed", this value would be + /// `"my-seed".as_bytes().to_vec()`. + bytes: Vec, + }, + /// An instruction-provided argument, to be resolved from the instruction + /// data + /// Packed as: + /// * 1 - Discriminator + /// * 1 - Start index of instruction data + /// * 1 - Length of instruction data starting at index + InstructionData { + /// The index where the bytes of an instruction argument begin + index: u8, + /// The length of the instruction argument (number of bytes) + /// + /// Note: Max seed length is 32 bytes, so `u8` is appropriate here + length: u8, + }, + /// The public key of an account from the entire accounts list. + /// Note: This includes any extra accounts required. + /// + /// Packed as: + /// * 1 - Discriminator + /// * 1 - Index of account in the accounts list + AccountKey { + /// The index of the account in the entire accounts list + index: u8, + }, + /// An argument to be resolved from the inner data of some account + /// Packed as: + /// * 1 - Discriminator + /// * 1 - Index of account in the accounts list + /// * 1 - Start index of account data + /// * 1 - Length of account data starting at index + AccountData { + /// The index of the account in the entire accounts list + account_index: u8, + /// The index where the bytes of an account data argument begin + data_index: u8, + /// The length of the argument (number of bytes) + /// + /// Note: Max seed length is 32 bytes, so `u8` is appropriate here + length: u8, + }, +} +``` + +As we can see from the code above, there are four main ways to provide seeds: + +1. A literal hard-coded argument, such as the string `"some-seed"`. + +2. An instruction-provided argument, to be resolved from the instruction data. + This can be done by giving the start index and the length of the data we want + to have as a seed. + +3. The public key of an account from the entire accounts list. This can be done + by giving the index of the account (we'll talk about this more soon). + +4. An argument to be resolved from the inner data of some account. This can be + done by giving the index of the account, the start index of the data, along + with the length of the data we want to have as a seed. + +To use the 2 last methods of setting the seed, you need to get the account +index. This represents the index of the account passed into the `Execute` +function of the hook. The indexes are standardized: + +- index 0-3 will always be, `source`, `mint`, `destination`, and `owner` + respectively + +- index 4: will be the `extra_account_meta_list` + +- index 5+: will be in whatever order you create your `account_metas` + +```rust + // index 0-3 are the accounts required for token transfer (source, mint, destination, owner) + // index 4 is the extra_account_meta_list account + let account_metas = vec![ + // index 5 - some_account + ExtraAccountMeta::new_with_pubkey(&ctx.accounts.some_account.key(), false, true)?, + // index 6 - pda_account + ExtraAccountMeta::new_with_seeds( + &[ + Seed::Literal { + bytes: "some-seed".as_bytes().to_vec(), + }, + ], + true, // is_signer + true // is_writable + )?, + ]; +``` + +Now, let's say that the `pda_account` was created from "some-seed" and belonged +to `some_account`. This is where we can specify the account key index: + +```rust + // index 0-3 are the accounts required for token transfer (source, mint, destination, owner) + // index 4 is the extra_account_meta_list account + let account_metas = vec![ + // index 5 - some_account + ExtraAccountMeta::new_with_pubkey(&ctx.accounts.some_account.key(), false, true)?, + // index 6 - pda_account + ExtraAccountMeta::new_with_seeds( + &[ + Seed::AccountKey { + index: 5, // index of `some_account` + }, + Seed::Literal { + bytes: "some-seed".as_bytes().to_vec(), + }, + ], + true, // is_signer + true // is_writable + )?, + ]; +``` + +Note: remember that the accounts indexed 0-4 are defined by the `Execute` +function of the transfer hook. They are: `source`, `mint`, `destination`, +`owner`, `extra_account_meta_list` respectively. The first four of which, are +de-escalated, or read-only. These will always be read-only. If you try to be +sneaky and add any of these first four accounts into the +`extra_account_meta_list`, they will always be interpreted as read-only, even if +you specify them differently with `is_writable` or `is_signer`. + +#### 2. `transfer_hook` Instruction + +In Anchor, when the `Execute` function is called, it looks for and calls the +`transfer_hook` instruction. It is the place where we can implement our custom +logic for the token transfer. + +When the Token Extensions Program invokes our program, it will invoke this +instruction and pass to it all the accounts plus the amount of the transfer that +just happened. The first 5 accounts will always be `source`, `mint`, +`destination`, `owner`, `extraAccountMetaList`, and the rest are the extra +accounts that we added to the `ExtraAccountMetaList` account if there is any. + +Let's take a look at an example `TransferHook` struct for this instruction: + +```rust +// Order of accounts matters for this struct. +// The first 4 accounts are the accounts required for token transfer (source, mint, destination, owner) +// Remaining accounts are the extra accounts required from the ExtraAccountMetaList account +// These accounts are provided via CPI to this program from the Token Extensions Program +#[derive(Accounts)] +pub struct TransferHook<'info> { + #[account(token::mint = mint, token::authority = owner)] + pub source_token: InterfaceAccount<'info, TokenAccount>, + pub mint: InterfaceAccount<'info, Mint>, + #[account(token::mint = mint)] + pub destination_token: InterfaceAccount<'info, TokenAccount>, + /// CHECK: source token account owner + /// This account is not being checked because it is used for ownership validation within the `transfer_hook` instruction. + pub owner: UncheckedAccount<'info>, + + /// CHECK: ExtraAccountMetaList Account, + /// This account list is not being checked because it is used dynamically within the program logic. + #[account(seeds = [b"extra-account-metas", mint.key().as_ref()], bump)] + pub extra_account_meta_list: UncheckedAccount<'info>, + + // Accounts to add to the extra-account-metas + pub some_account: UncheckedAccount<'info>, + #[account(seeds = [b"some-seed"], bump)] + pub pda_account: UncheckedAccount<'info>, +} +``` + +As mentioned in the comment, the order here matters; we need the first 5 +accounts as shown above, and then the rest of the accounts need to follow the +order of the accounts in the `extraAccountMetaList` account. + +Other than that, you can write any functionality you want in within the transfer +hook. But remember, if the hook fails, the entire transaction fails. + +```rust + pub fn transfer_hook(ctx: Context, amount: u64) -> Result<()> { + // do your logic here + Ok(()) + } +``` + +#### 3. Fallback + +One last caveat to the onchain portion of transfer hooks: when dealing with +Anchor, we need to specify a `fallback` instruction in the Anchor program to +handle the Cross-Program Invocation (CPI) from the Token Extensions Program. + +This is necessary because Anchor generates instruction discriminators +differently from the ones used in the Transfer Hook interface instructions. The +instruction discriminator for the `transfer_hook` instruction will not match the +one for the Transfer Hook interface. + +Next, versions of Anchor should solve this for us, but for now, we can implement +this simple workaround: + +```rust +// fallback instruction handler as work-around to anchor instruction discriminator check +pub fn fallback<'info>(program_id: &Pubkey, accounts: &'info [AccountInfo<'info>], data: &[u8]) -> Result<()> { + let instruction = TransferHookInstruction::unpack(data)?; + + // match instruction discriminator to transfer hook interface execute instruction + // token2022 program CPIs this instruction on token transfer + match instruction { + TransferHookInstruction::Execute { amount } => { + let amount_bytes = amount.to_le_bytes(); + + // invoke custom transfer hook instruction on our program + __private::__global::transfer_hook(program_id, accounts, &amount_bytes) + } + _ => { + return Err(ProgramError::InvalidInstructionData.into()); + } + } +} +``` + +### Using transfer hooks from the frontend + +Now that we've looked at the onchain portion, let's look at how we interact with +them in the frontend. + +Let's assume we have a deployed Solana program that follows the Transfer Hook +Interface. + +In order to create a mint with a transfer hook and ensure successful transfers, +follow these steps: + +1. Create the mint with the transfer hook extension and point to the onchain + transfer hook program you want to use. + +2. Initialize the `extraAccountList` account. This step must be done before any + transfer, and it is the responsibility of the mint owner/creator. It only + needs to happen once for each mint. + +3. Make sure to pass all the required accounts when invoking the transfer + instruction from the Token Extensions Program. + +#### Create a Mint with the `Transfer-Hook` Extension: + +To create a mint with the transfer-hook extension, we need three instructions: + +1. `createAccount` - Reserves space on the blockchain for the mint account + +2. `createInitializeTransferHookInstruction` - initializes the transfer hook + extension, this takes the transfer hook's program address as a parameter. + +3. `createInitializeMintInstruction` - Initializes the mint. + +```ts +const extensions = [ExtensionType.TransferHook]; + +const mintLen = getMintLen(extensions); + +const lamports = await connection.getMinimumBalanceForRentExemption(mintLen); + +const transaction = new Transaction().add( + // Allocate the mint account + SystemProgram.createAccount({ + fromPubkey: wallet.publicKey, + newAccountPubkey: mint.publicKey, + space: mintLen, + lamports: lamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + // Initialize the transfer hook extension and point to our program + createInitializeTransferHookInstruction( + mint.publicKey, + wallet.publicKey, + program.programId, // Transfer Hook Program ID + TOKEN_2022_PROGRAM_ID, + ), + // Initialize mint instruction + createInitializeMintInstruction(mint.publicKey, decimals, wallet.publicKey, null, TOKEN_2022_PROGRAM_ID), +``` + +#### Initialize `ExtraAccountMetaList` account: + +The next step of getting the mint ready for any transactions is initializing the +`ExtraAccountMetaList`. Generally, this is done by calling the +`initializeExtraAccountMetaList` function on the program containing the transfer +hook. Since this is part of the Transfer Hook Interface, this should be +standardized. Additionally, if the transfer hook program was made with Anchor, +it will most likely have autogenerated IDLs, which are TypeScript interfaces +that represent the instructions and accounts of the program. This makes it easy +to interact with the program from the client side. + +If you made your own program in Anchor, the IDLs will be in the `target/idl` +folder after compilation. Inside tests or client code you can access the methods +directly from `anchor.workspace.program_name.method`: + +```ts +import * as anchor from "@coral-xyz/anchor"; + +const program = anchor.workspace.TransferHook as anchor.Program; +// now program.method will give you the methods of the program +``` + +so to initialize the `ExtraAccountMetaList` all that we need to do is to call +the `initializeExtraAccountMetaList` from the methods and pass the right +accounts to it, you can use the autocomplete feature to get more help with that + +```ts +const initializeExtraAccountMetaListInstruction = await program.methods + .initializeExtraAccountMetaList() + .accounts({ + mint: mint.publicKey, + extraAccountMetaList: extraAccountMetaListPDA, + anotherMint: crumbMint.publicKey, + }) + .instruction(); + +const transaction = new Transaction().add( + initializeExtraAccountMetaListInstruction, +); +``` + +After calling `initializeExtraAccountMetaList`, you're all set to transfer +tokens with the transfer hook enabled mint. + +#### Transfer tokens successfully: + +To actually transfer tokens with the `transfer hook` extension, you need to call +`createTransferCheckedWithTransferHookInstruction`. This is a special helper +function provided by `@solana/spl-token` that will gather and submit all of the +needed extra accounts needed to be specified in the `ExtraAccountMetaList`. + +```ts +const transferInstruction = + await createTransferCheckedWithTransferHookInstruction( + connection, + sourceTokenAccount, + mint.publicKey, + destinationTokenAccount, + wallet.publicKey, + BigInt(1), // amount + 0, // Decimals + [], + "confirmed", + TOKEN_2022_PROGRAM_ID, + ); +``` + +Under the hood, the `createTransferCheckedWithTransferHookInstruction` method +will examine if the mint has a transfer hook, if it does it will get the extra +accounts and add them to the transfer instruction. +[Take a look at the source code](https://github.com/solana-labs/solana-program-library/blob/8ae0c89c12cf05d0787ee349dd5454e1dcbe4a4f/token/js/src/extensions/transferHook/instructions.ts#L261) + +```ts +/** + * Construct an transferChecked instruction with extra accounts for transfer hook + * + * @param connection Connection to use + * @param source Source account + * @param mint Mint to update + * @param destination Destination account + * @param owner Owner of the source account + * @param amount The amount of tokens to transfer + * @param decimals Number of decimals in transfer amount + * @param multiSigners The signer account(s) for a multisig + * @param commitment Commitment to use + * @param programId SPL Token program account + * + * @return Instruction to add to a transaction + */ +export async function createTransferCheckedWithTransferHookInstruction( + connection: Connection, + source: PublicKey, + mint: PublicKey, + destination: PublicKey, + owner: PublicKey, + amount: bigint, + decimals: number, + multiSigners: (Signer | PublicKey)[] = [], + commitment?: Commitment, + programId = TOKEN_PROGRAM_ID, +) { + const instruction = createTransferCheckedInstruction( + source, + mint, + destination, + owner, + amount, + decimals, + multiSigners, + programId, + ); + + const mintInfo = await getMint(connection, mint, commitment, programId); + const transferHook = getTransferHook(mintInfo); + + if (transferHook) { + await addExtraAccountMetasForExecute( + connection, + instruction, + transferHook.programId, + source, + mint, + destination, + owner, + amount, + commitment, + ); + } + + return instruction; +} +``` + +### Theoretical Example - Artist Royalties + +Let's take what we know about the `transfer hook` extension and conceptually try +to understand how we could implement artist royalties for NFTs. If you're not +familiar, an artist royalty is a fee paid on any sale of an NFT. Historically, +these were more suggestions than enforcements, since at any time, a user could +strike a private deal and exchange their NFT for payment on a platform or +program that did not enforce these royalties. That being said, we can get a +little closer with transfer hooks. + +**First Approach** - Transfer SOL right from the `owner` to the artist right in +the hook. Although this may sound like a good avenue to try, it won't work, for +two reasons. First, the hook would not know how much to pay the artist - this is +because the transfer hook does not take any arguments other than the needed +`source`, `mint`, `destination`, `owner`, `extraAccountMetaList`, and all of the +accounts within the list. Secondly, we would be paying from the `owner` to the +artist, which cannot be done since `owner` is deescalated. It cannot sign and it +cannot be written to - this means we don't have the authority to update +`owner`'s balance. Although we can't use this approach, it's a good way to +showcase the limitations of the transfer hook. + +**Second Approach** - Create a data PDA owned by the `extraAccountMetaList` that +tracks if the royalty has been paid. If it has, allow the transfer, if it has +not, deny it. This approach is multi step and would require an additional +function in the transfer hook program. + +Say we have a new function called `payRoyalty` in our transfer hook program. +This function would be required to: + +1. Create a data PDA owned by the `extraAccountMetaList` + +a. This account would hold information about the trade + +2. Transfer the amount for the royalty from the `owner` to the artist. + +3. Update the data PDA with the sale information + +Then you'd transfer, and all the transfer hook should do is check the sales data +on the PDA. It would allow or disallow the transfer from there. + +Remember this the above is just a theoretical discussion and is in no way +all-encompassing. For example, how would you enforce the prices of the NFTs? Or, +what if the owner of the NFT wants to transfer it to a different wallet of +theirs - should there be an approved list of "allowed" wallets? Or, should the +artist be a signer involved in every sale/transfer? This system design makes for +a great homework assignment! + +## Lab + +In this lab we'll explore how transfer hooks work by creating a Cookie Crumb +program. We'll have a Cookie NFT that has a transfer hook which will mint a +Crumb SFT (NFT with a supply > 1) to the sender after each transfer - leaving a +"crumb trail". A fun side effect is we'll able to tell how many times this NFT +has been transferred just by looking at the crumb supply. + +### 0. Setup + +#### 1. Verify Solana/Anchor/Rust Versions + +We'll be interacting with the `Token Extensions Program` in this lab and that +requires you to have the Solana CLI version ≥ 1.18.1. + +To check your version run: + +```bash +solana --version +``` + +If the version printed out after running `solana --version` is less than +`1.18.0` then you can update the CLI version manually. Note, at the time of +writing this, you cannot simply run the `solana-install update` command. This +command will not update the CLI to the correct version for us, so we have to +explicitly download version `1.18.0`. You can do so with the following command: + +```bash +solana-install init 1.18.1 +``` + +If you run into this error at any point attempting to build the program, that +likely means you do not have the correct version of the Solana CLI installed. + +```bash +anchor build +error: package `solana-program v1.18.1` cannot be built because it requires rustc 1.72.0 or newer, while the currently active rustc version is 1.68.0-dev +Run: +cargo update -p solana-program@1.18.0 --precise ver +where `ver` is the latest version of `solana-program` supporting rustc 1.68.0-dev +``` + +You will also want the latest version of the Anchor CLI installed. You can +follow the steps to +[update Anchor via avm](https://www.anchor-lang.com/docs/avm) + +or simply run + +```bash +avm install latest +avm use latest +``` + +At the time of writing, the latest version of the Anchor CLI is `0.30.1` + +Now, we should have all the correct versions installed. + +#### 2. Get starter code + +Let's grab the starter branch. + +```bash +git clone https://github.com/Unboxed-Software/solana-lab-transfer-hooks +cd solana-lab-transfer-hooks +git checkout starter +``` + +#### 3. Update Program ID and Anchor Keypair + +Once in the starter branch, run + +```bash +anchor keys sync +``` + +This syncs your program key with the one in the `Anchor.toml` and the declared +program id in the `programs/transfer-hook/src/lib.rs` file. + +The last thing you have to do is set your keypair path in `Anchor.toml`: + +```toml +[provider] +cluster = "Localnet" +wallet = "~/.config/solana/id.json" +``` + +#### 4. Confirm the program builds + +Let's build the starter code to confirm we have everything configured correctly. +If it does not build, please revisit the steps above. + +```bash +anchor build +``` + +You can safely ignore the warnings of the build script, these will go away as we +add in the necessary code. But at the end, you should see a message like this: + +```bash +Finished release [optimized] target(s) +``` + +Feel free to run the provided tests to make sure the rest of the dev environment +is set up correctly. You'll have to install the node dependencies using `npm` or +`yarn`. The tests should run, but they'll all fail until we have completed our +program. + +```bash +yarn install +anchor test +``` + +We will be filling these tests in later. + +### 1. Write the transfer hook program + +In this section we'll dive into writing the onchain transfer hook program using +anchor, all the code will go into the `programs/transfer-hook/src/lib.rs` file. + +Take a look inside `lib.rs`, you'll notice we have some starter code: + +Three instructions + +- `initialize_extra_account_meta_list` + +- `transfer_hook` + +- `fallback` + +Two instruction account structs + +- `InitializeExtraAccountMetaList` + +- `TransferHook`. + +- The `initialize_extra_account_meta_list` function initializes the additional + accounts needed for the transfer hook. + +- The `transfer_hook` is the actual CPI called "after" the transfer has been + made. + +- The `fallback` is an anchor adapter function we have to fill out. + +We're going to look at each in depth. + +#### 1. Initialize Extra Account Meta List instruction + +The cookie transfer hook program needs some extra accounts to be able to mint +the crumbs within the `transfer_hook` function, these are: + +1. `crumb_mint` - The "crumb" mint account of the token to be minted by the + transfer_hook instruction. + +2. `crumb_mint_ata` - The associated token account of the crumb mint of the + person sending the cookie. + +3. `mint_authority` - For the crumb mint, this will be the account owned by the + transfer hook program + +4. `token_program` - this mint will be a regular SPL token mint. + +5. `associated_token_program` - needed to construct the ATA + +We are going to store these accounts in the `extra_account_meta_list` account, +by invoking the instruction `initialize_extra_account_meta_list` and passing the +required accounts to it. + +First, we have to build the struct `InitializeExtraAccountMetaList`, then we can +write the instruction itself. + +**`InitializeExtraAccountMetaList` Struct** + +The Instruction requires the following accounts: + +1. `extra_account_meta_list` - The PDA that will hold the extra account. + +2. `crumb_mint` - The mint account of the crumb token. + +3. `mint` - The mint account of the cookie NFT. + +4. `mint_authority` - The mint authority account of the crumb token. - This is a + PDA seeded by `b"mint-authority"` + +5. `payer` - The account that will pay for the creation of the + `extra_account_meta_list` account. + +6. `token_program` - The token program account. + +7. `system_program` - The system program account. + +The code for the struct will go as follows: + +```rust +#[derive(Accounts)] +pub struct InitializeExtraAccountMetaList<'info> { + #[account(mut)] + payer: Signer<'info>, + + /// CHECK: ExtraAccountMetaList Account, must use these seeds + #[account( + mut, + seeds = [b"extra-account-metas", mint.key().as_ref()], + bump + )] + pub extra_account_meta_list: AccountInfo<'info>, + pub mint: InterfaceAccount<'info, Mint>, + pub token_program: Interface<'info, TokenInterface>, + pub system_program: Program<'info, System>, + + #[account(mint::authority = mint_authority)] + pub crumb_mint: InterfaceAccount<'info, Mint>, + + /// CHECK: mint authority Account for crumb mint + #[account(seeds = [b"mint-authority"], bump)] + pub mint_authority: UncheckedAccount<'info>, +} +``` + +Note that we are not specifying the `crumb_mint_ata` or the +`associated_token_program`. This is because the `crumb_mint_ata` is variable and +will be driven by the other accounts in the `extra_account_meta_list`, and +`associated_token_program` will be hardcoded. + +Also, notice we are asking Anchor to drive the `mint_authority` account from the +seed `b"mint-authority"`. The resulting PDA allows the program itself to sign +for the mint. + +**`initialize_extra_account_meta_list` Instruction** + +Let's write the `initialize_extra_account_meta_list` function, it will do the +following: + +1. List the accounts required for the transfer hook instruction inside a vector. + +2. Calculate the size and rent required to store the list of + `extra_account_meta_list`. + +3. Make a CPI to the System Program to create an account and set the Transfer + Hook Program as the owner. + +4. Initialize the account data to store the list of `extra_account_meta_list`. + +here is the code for it: + +```rust +pub fn initialize_extra_account_meta_list(ctx: Context) -> Result<()> { + // index 0-3 are the accounts required for token transfer (source, mint, destination, owner) + // index 4 is the extra_account_meta_list account + let account_metas = vec![ + // index 5, Token program + ExtraAccountMeta::new_with_pubkey(&token::ID, false, false)?, + // index 6, Associated Token program + ExtraAccountMeta::new_with_pubkey(&associated_token_id, false, false)?, + // index 7, crumb mint + ExtraAccountMeta::new_with_pubkey(&ctx.accounts.crumb_mint.key(), false, true)?, // is_writable true + // index 8, mint authority + ExtraAccountMeta::new_with_seeds( + &[ + Seed::Literal { + bytes: "mint-authority".as_bytes().to_vec(), + }, + ], + false, // is_signer + false // is_writable + )?, + // index 9, crumb mint ATA + ExtraAccountMeta::new_external_pda_with_seeds( + 6, // associated token program index + &[ + Seed::AccountKey { index: 3 }, // owner index + Seed::AccountKey { index: 5 }, // token program index + Seed::AccountKey { index: 7 }, // crumb mint index + ], + false, // is_signer + true // is_writable + )? + ]; + + // calculate account size + let account_size = ExtraAccountMetaList::size_of(account_metas.len())? as u64; + // calculate minimum required lamports + let lamports = Rent::get()?.minimum_balance(account_size as usize); + + let mint = ctx.accounts.mint.key(); + let signer_seeds: &[&[&[u8]]] = &[&[b"extra-account-metas", &mint.as_ref(), &[ctx.bumps.extra_account_meta_list]]]; + + // Create ExtraAccountMetaList account + create_account( + CpiContext::new(ctx.accounts.system_program.to_account_info(), CreateAccount { + from: ctx.accounts.payer.to_account_info(), + to: ctx.accounts.extra_account_meta_list.to_account_info(), + }).with_signer(signer_seeds), + lamports, + account_size, + ctx.program_id + )?; + + // Initialize the account data to store the list of ExtraAccountMetas + ExtraAccountMetaList::init::( + &mut ctx.accounts.extra_account_meta_list.try_borrow_mut_data()?, + &account_metas + )?; + + Ok(()) +} +``` + +Pay careful attention to the indexes for each account. Most notably, see that +`index 9` is the index for the `crumb_mint_ata` account. It constructs the ATA +using `ExtraAccountMeta::new_external_pda_with_seeds` and pass in the seeds from +other accounts by their index. Specifically, the ATA belongs to whatever `owner` +calls the transfer. So when a cookie is sent, the crumb will be minted to the +sender. + +#### 2. Transfer Hook instruction + +In this step, we'll implement the `transfer_hook` instruction. This instruction +will be called by the Token Extensions Program when a token transfer occurs. + +The `transfer_hook` instruction will mint one crumb token each time a cookie +transfer occurs. + +Again we'll have a struct `TransferHook` that will hold the accounts required +for the instruction. + +**`TransferHook` Struct** + +In our program the `TransferHook` struct will have 10 accounts: + +1. `source_token` - The source token account from which the NFT is transferred. + +2. `mint` - The mint account of the Cookie NFT. + +3. `destination_token` - The destination token account to which the NFT is + transferred. + +4. `owner` - The owner of the source token account. + +5. `extra_account_meta_list` - The ExtraAccountMetaList account that stores the + additional accounts required by the transfer_hook instruction + +6. `token_program` - The token program account. + +7. `associated_token_program` - The associated token program account. + +8. `crumb_mint` - The mint account of the token to be minted by the + transfer_hook instruction. + +9. `mint_authority` - The mint authority account of the token to be minted by + the transfer_hook instruction. + +10. `crumb_mint_ata` - The `owner`'s ATA of the crumb mint + +> Very Important Note: The order of accounts in this struct matters. This is the +> order in which the Token Extensions Program provides these accounts when it +> invokes this Transfer Hook program. + +Here is the instruction struct: + +```rust +// Order of accounts matters for this struct. +// The first 4 accounts are the accounts required for token transfer (source, mint, destination, owner) +// Remaining accounts are the extra accounts required from the ExtraAccountMetaList account +// These accounts are provided via CPI to this program from the Token Extensions Program + +#[derive(Accounts)] +pub struct TransferHook<'info> { + #[account(token::mint = mint, token::authority = owner)] + pub source_token: InterfaceAccount<'info, TokenAccount>, + pub mint: InterfaceAccount<'info, Mint>, + #[account(token::mint = mint)] + pub destination_token: InterfaceAccount<'info, TokenAccount>, + /// CHECK: source token account owner + pub owner: UncheckedAccount<'info>, + /// CHECK: ExtraAccountMetaList Account, + #[account(seeds = [b"extra-account-metas", mint.key().as_ref()], bump)] + pub extra_account_meta_list: UncheckedAccount<'info>, + pub token_program: Interface<'info, TokenInterface>, + pub associated_token_program: Program<'info, AssociatedToken>, + pub crumb_mint: InterfaceAccount<'info, Mint>, + /// CHECK: mint authority Account, + #[account(seeds = [b"mint-authority"], bump)] + pub mint_authority: UncheckedAccount<'info>, + #[account( + token::mint = crumb_mint, + token::authority = owner, + )] + pub crumb_mint_ata: InterfaceAccount<'info, TokenAccount>, +} +``` + +**`transfer_hook` Instruction** + +This instruction is fairly simple, it will only make one CPI to the Token +Program to mint a new crumb token for each transfer, all that we need to do is +to pass the right accounts to the `mint_to` CPI. + +Since the mint_authority is a PDA of the transfer hook program itself, the +program can sign for it. Therefore we'll use `new_with_signer` and pass +mint_authority seeds as the signer seeds. + +```rust +pub fn transfer_hook(ctx: Context, _amount: u64) -> Result<()> { + let signer_seeds: &[&[&[u8]]] = &[&[b"mint-authority", &[ctx.bumps.mint_authority]]]; + + // mint a crumb token for each transaction + token::mint_to( + CpiContext::new_with_signer( + ctx.accounts.token_program.to_account_info(), + token::MintTo { + mint: ctx.accounts.crumb_mint.to_account_info(), + to: ctx.accounts.crumb_mint_ata.to_account_info(), + authority: ctx.accounts.mint_authority.to_account_info(), + }, + signer_seeds + ), + 1 + ).unwrap(); + + Ok(()) +} +``` + +You may have noticed that we are using `token::mint_to` instead of +`token_2022::mint_to`, additionally in the `extra_account_meta_list` we're +saving the Token Program, not the Token Extensions Program. This is because the +crumb SFT _has_ to be a Token Program mint, not a Token Extensions Program mint. +The reason why is interesting: when first writing this, we wanted to make both +the Cookie and Crumb tokens to be Token Extensions Program mints. However, when +we did this, we would get a very interesting error: `No Reentrancy`. This +happens because the transfer hook is called as a CPI from within the Token +Extensions Program, and Solana does not allow +[recursive CPIs into the same program](https://defisec.info/solana_top_vulnerabilities). + +To illustrate: + +```text +Token Extensions Program -CPI-> Transfer Hook Program -❌CPI❌-> Token Extensions Program +Token Extensions Program -CPI-> Transfer Hook Program -✅CPI✅-> Token Program +``` + +So, that's why we're making the crumb SFT a Token Program mint. + +#### 3. Fallback instruction + +The last instruction we have to fill out is the `fallback`, this is necessary +because Anchor generates instruction discriminators differently from the ones +used in Transfer Hook interface instructions. The instruction discriminator for +the `transfer_hook` instruction will not match the one for the Transfer Hook +interface. + +Newer versions of Anchor should solve this for us, but for now, we can implement +this simple workaround: + +```rust +// fallback instruction handler as a workaround to anchor instruction discriminator check +pub fn fallback<'info>(program_id: &Pubkey, accounts: &'info [AccountInfo<'info>], data: &[u8]) -> Result<()> { + let instruction = TransferHookInstruction::unpack(data)?; + + // match instruction discriminator to transfer hook interface execute instruction + // token2022 program CPIs this instruction on token transfer + match instruction { + TransferHookInstruction::Execute { amount } => { + let amount_bytes = amount.to_le_bytes(); + + // invoke custom transfer hook instruction on our program + __private::__global::transfer_hook(program_id, accounts, &amount_bytes) + } + _ => { + return Err(ProgramError::InvalidInstructionData.into()); + } + } +} +``` + +#### 4. Build the program + +Let's make sure our program builds and that tests are runnable before we +continue actually writing tests for it. + +```bash +anchor test +``` + +This command will build, deploy and run tests within the `tests/` directory. + +If you're seeing any errors try to go through the steps again and make sure you +didn't miss anything. + +### 2. Write the tests + +Now we'll write some TS scripts to test our code. All of our tests will live +inside `tests/anchor.ts`. + +The outline of what will we do here is: + +1. Understand the environment + +2. Run the (empty) tests + +3. Write the "Create Cookie NFT with Transfer Hook and Metadata" test + +4. Write the "Create Crumb Mint" test + +5. Write the "Initializes ExtraAccountMetaList Account" test + +6. Write the "Transfer and Transfer Back" test + +#### 1. Understand the environment + +When anchor projects are created, they come configured to create typescript +tests with `mocha` and `chai`. When you look at `tests/anchor.ts` you'll see +everything already set up with the tests we'll create. + +The following functionality is already provided to you: + +1. Get the program IDL. + +2. Get the wallet. + +3. Get the connection. + +4. Set up the environment + +5. Airdrop some SOLs into the wallet if needed before running any of the tests. + +6. 4 empty tests that we'll talk about later + +Let's get familiar with the accounts pre-setup for us: + +- `payerWallet`: This is the wallet from `Anchor.toml`, it will be used to pay + for everything + +- `cookieMint`: The Token Extensions Program mint we'll attach metadata and the + transfer hook to + +- `crumbMint`: The Token Program mint we'll attach metadata to, this will be + what's minted as a result of the transfer hook + +- `recipient`: Another wallet to send the cookie to/from + +- `sourceCookieAccount`: The ATA of the payer and the cookie mint + +- `extraAccountMetaListPDA`: Where we will store all of the extra accounts for + our hook + +- `crumbMintAuthority`: The authority to mint the crumb, owned by the Transfer + Hook program + +We've also provided two sets of hardcoded metadata for the Cookie NFT and the +Crumb SFT. + +- `cookieMetadata` + +- `crumbMetadata` + +#### 2. Running the tests + +Since the Crumb SFT is a Token Program mint, to attach metadata to it, we need +to create a Metaplex metadata account. To do this, we need to include the +Metaplex program. This has been provided for you. + +If you take a look at `Anchor.toml` you'll see that we load in the Metaplex bpf +at the genesis block. This gives our testing validator access to the account. + +```toml +[test] +startup_wait = 5000 +shutdown_wait = 2000 +upgradeable = false + +[[test.genesis]] +address = "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s" +program = "tests/metaplex_token_metadata_program.so" +``` + +If you wish to run a separate local validator to look at the explorer links, you +can. However, you need to start your local validator such that it loads in the +Metaplex program at genesis. + +In a separate terminal within the project directory run: + +```bash +solana-test-validator --bpf-program metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s ./tests/metaplex_token_metadata_program.so +``` + +Then you can test with: + +```bash +anchor test --skip-local-validator +``` + +#### 3. Write the "Create Cookie NFT with Transfer Hook and Metadata" test + +Our first test will create our Cookie NFT, which will have metadata and our +transfer hook attached. + +To accomplish all of this we will create several instructions: + +- `SystemProgram.createAccount`: Saves space for the mint on the blockchain + +- `createInitializeMetadataPointerInstruction`: Points to the mint itself since + the metadata will be stored within the mint + +- `createInitializeTransferHookInstruction`: Configures the transfer function to + call our transfer hook program + +- `createInitializeMintInstruction`: Initializes the mint account + +- `createInitializeInstruction`: Adds the metadata to the mint + +- `createAssociatedTokenAccountInstruction`: Creates the ATA for the mint to be + minted to - owned by the payer + +- `createMintToInstruction`: Mints one NFT to the ATA + +- `createSetAuthorityInstruction`: Revokes the mint authority, making a true + non-fungible token. + +Send all of these instructions in a transaction to the blockchain up and you +have Cookie NFT: + +```ts +it("Creates a Cookie NFT with Transfer Hook and Metadata", async () => { + // NFTs have 0 decimals + const decimals = 0; + + const extensions = [ + ExtensionType.TransferHook, + ExtensionType.MetadataPointer, + ]; + const mintLen = getMintLen(extensions); + const metadataLen = TYPE_SIZE + LENGTH_SIZE + pack(cookieMetadata).length; + const lamports = await connection.getMinimumBalanceForRentExemption( + mintLen + metadataLen, + ); + + const transaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payerWallet.publicKey, + newAccountPubkey: cookieMint.publicKey, + space: mintLen, + lamports: lamports, + programId: TOKEN_2022_PROGRAM_ID, + }), + createInitializeMetadataPointerInstruction( + cookieMint.publicKey, //mint + payerWallet.publicKey, //authority + cookieMint.publicKey, //metadata address + TOKEN_2022_PROGRAM_ID, + ), + createInitializeTransferHookInstruction( + cookieMint.publicKey, // mint + payerWallet.publicKey, // authority + program.programId, // Transfer Hook Program ID + TOKEN_2022_PROGRAM_ID, + ), + createInitializeMintInstruction( + cookieMint.publicKey, // mint + decimals, // decimals + payerWallet.publicKey, // mint authority + null, // freeze authority + TOKEN_2022_PROGRAM_ID, + ), + createInitializeInstruction({ + programId: TOKEN_2022_PROGRAM_ID, + mint: cookieMint.publicKey, + metadata: cookieMint.publicKey, + name: cookieMetadata.name, + symbol: cookieMetadata.symbol, + uri: cookieMetadata.uri, + mintAuthority: payerWallet.publicKey, + updateAuthority: payerWallet.publicKey, + }), + createAssociatedTokenAccountInstruction( + payerWallet.publicKey, // payer + sourceCookieAccount, // associated token account + payerWallet.publicKey, // owner + cookieMint.publicKey, // mint + TOKEN_2022_PROGRAM_ID, + ), + createMintToInstruction( + cookieMint.publicKey, // mint + sourceCookieAccount, // destination + payerWallet.publicKey, // authority + 1, // amount - NFTs there will only be one + [], // multi signers + TOKEN_2022_PROGRAM_ID, + ), + createSetAuthorityInstruction( + // revoke mint authority + cookieMint.publicKey, // mint + payerWallet.publicKey, // current authority + AuthorityType.MintTokens, // authority type + null, // new authority + [], // multi signers + TOKEN_2022_PROGRAM_ID, + ), + ); + + const txSig = await sendAndConfirmTransaction(connection, transaction, [ + payerWallet.payer, + cookieMint, + ]); + console.log(getExplorerLink("transaction", txSig, "localnet")); +}); +``` + +Feel free to run the first test to make sure everything is working: + +```bash +anchor test +``` + +#### 4. Write the "Create Crumb Mint" test + +Now that we have our cookie NFT, we need our crumb SFTs. Creating the crumbs +that will be minted on each transfer of our cookie will be our second test. + +Remember our crumbs are a Token Program mint, and to attach metadata we need to +use Metaplex. + +First, we need to grab some Metaplex accounts and format our metadata. + +To format our metadata, we need to satisfy Metaplex's `DataV2` struct - for +this, we only need to append some additional fields to our `crumbMetadata`. + +The Metaplex accounts we will need are: + +- `TOKEN_METADATA_PROGRAM_ID`: The Metaplex program + +- `metadataPDA`: The metadata account PDA derived from our `crumbMint` + +Lastly, to create our crumb, we need the following instructions: + +- `SystemProgram.createAccount`: Saves space for our mint + +- `createInitializeMintInstruction`: Initializes our mint + +- `createCreateMetadataAccountV3Instruction`: Creates the metadata account + +- `createSetAuthorityInstruction`: This sets the mint authority to the + `crumbMintAuthority`, which is the PDA our transfer hook program owns + +Putting it all together we get the following: + +```ts +it("Create Crumb Mint", async () => { + // SFT Should have 0 decimals + const decimals = 0; + + const size = MINT_SIZE; + + const lamports = await connection.getMinimumBalanceForRentExemption(size); + + const TOKEN_METADATA_PROGRAM_ID = new PublicKey( + "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + ); + + const metadataData: DataV2 = { + ...crumbMetadata, + sellerFeeBasisPoints: 0, + creators: null, + collection: null, + uses: null, + }; + + const metadataPDAAndBump = PublicKey.findProgramAddressSync( + [ + Buffer.from("metadata"), + TOKEN_METADATA_PROGRAM_ID.toBuffer(), + crumbMint.publicKey.toBuffer(), + ], + TOKEN_METADATA_PROGRAM_ID, + ); + + const metadataPDA = metadataPDAAndBump[0]; + + const transaction = new Transaction().add( + SystemProgram.createAccount({ + fromPubkey: payerWallet.publicKey, + newAccountPubkey: crumbMint.publicKey, + space: size, + lamports: lamports, + programId: TOKEN_PROGRAM_ID, + }), + createInitializeMintInstruction( + crumbMint.publicKey, // mint + decimals, // decimals + payerWallet.publicKey, // mint authority + null, // freeze authority + TOKEN_PROGRAM_ID, + ), + createCreateMetadataAccountV3Instruction( + { + metadata: metadataPDA, + mint: crumbMetadata.mint, + mintAuthority: payerWallet.publicKey, + payer: payerWallet.publicKey, + updateAuthority: payerWallet.publicKey, + }, + { + createMetadataAccountArgsV3: { + collectionDetails: null, + data: metadataData, + isMutable: true, + }, + }, + ), + createSetAuthorityInstruction( + // set authority to transfer hook PDA + crumbMint.publicKey, // mint + payerWallet.publicKey, // current authority + AuthorityType.MintTokens, // authority type + crumbMintAuthority, // new authority + [], // multi signers + TOKEN_PROGRAM_ID, + ), + ); + + const txSig = await sendAndConfirmTransaction( + provider.connection, + transaction, + [payerWallet.payer, crumbMint], + { skipPreflight: true }, + ); + + console.log(getExplorerLink("transaction", txSig, "localnet")); +}); +``` + +#### 5. Write the "Initializes ExtraAccountMetaList Account" test + +Our next test is the last step of setup before we can start transferring our +cookie and seeing the transfer hook work. We need to create the +`ExtraAccountMetaList` account. + +We only need to execute one instruction this time: +`initializeExtraAccountMetaList`. This is the function that we've implemented. + +Remember it takes the following additional accounts: + +- `mint`: The cookie mint + +- `extraAccountMetaList`: The PDA that holds the extra accounts + +- `crumbMint`: The crumb mint + +```ts +// Account to store extra accounts required by the transfer hook instruction +it("Initializes ExtraAccountMetaList Account", async () => { + const initializeExtraAccountMetaListInstruction = await program.methods + .initializeExtraAccountMetaList() + .accounts({ + mint: cookieMint.publicKey, + extraAccountMetaList: extraAccountMetaListPDA, + crumbMint: crumbMint.publicKey, + }) + .instruction(); + + const transaction = new Transaction().add( + initializeExtraAccountMetaListInstruction, + ); + + const txSig = await sendAndConfirmTransaction( + provider.connection, + transaction, + [payerWallet.payer], + { + skipPreflight: true, + commitment: "confirmed", + }, + ); + + console.log(getExplorerLink("transaction", txSig, "localnet")); +}); +``` + +#### 6. Write the "Transfer and Transfer Back" test + +Our last test is to transfer our cookie back and forth and see that our crumbs +have been minted to both `payerWallet` and `recipient`. + +But before we transfer, we have to create the ATAs to hold the cookie and crumb +tokens for both the `payerWallet` and `recipient`. We can do this by calling +`getOrCreateAssociatedTokenAccount`. And we only need to do this to get the +following: `destinationCookieAccount`, `sourceCrumbAccount` and, +`destinationCrumbAccount`, + +because `sourceCookieAccount` was created when we minted the NFT. + +To transfer, we call `createTransferCheckedWithTransferHookInstruction`. This +takes the following: + +- `connection`: Connection to use + +- `source`: Source token account + +- `mint`: Mint to transfer + +- `destination`: Destination token account + +- `owner`: Owner of the source token account + +- `amount`: Amount to transfer + +- `decimals`: Decimals of the mint + +- `multiSigners`: The signer account(s) for a multisig + +- `commitment`: Commitment to use + +- `programId`: SPL Token program account + +We will call this twice, to and from the `recipient`. + +You may notice that this does not take any of the additional accounts we need +for the transfer hook like the `crumbMint` for example. This is because this +function fetches the `extraAccountMeta` for us and automatically includes all of +the accounts needed! That being said, it is asynchronous, so we will have to +`await` it. + +Lastly, after the transfers, we'll grab the crumb mint and assert the total +supply is two, and that both the `sourceCrumbAccount` and the +`destinationCrumbAccount` have some crumbs. + +Putting this all together we get our final test: + +```ts +it("Transfer and Transfer Back", async () => { + const amount = BigInt(1); + const decimals = 0; + + // Create all of the needed ATAs + const destinationCookieAccount = ( + await getOrCreateAssociatedTokenAccount( + connection, + payerWallet.payer, + cookieMint.publicKey, + recipient.publicKey, + false, + undefined, + { commitment: "confirmed" }, + TOKEN_2022_PROGRAM_ID, + ) + ).address; + + const sourceCrumbAccount = ( + await getOrCreateAssociatedTokenAccount( + connection, + payerWallet.payer, + crumbMint.publicKey, + payerWallet.publicKey, + false, + undefined, + { commitment: "confirmed" }, + TOKEN_PROGRAM_ID, + ) + ).address; + + const destinationCrumbAccount = ( + await getOrCreateAssociatedTokenAccount( + connection, + payerWallet.payer, + crumbMint.publicKey, + recipient.publicKey, + false, + undefined, + { commitment: "confirmed" }, + TOKEN_PROGRAM_ID, + ) + ).address; + + // Standard token transfer instruction + const transferInstruction = + await createTransferCheckedWithTransferHookInstruction( + connection, + sourceCookieAccount, + cookieMint.publicKey, + destinationCookieAccount, + payerWallet.publicKey, + amount, + decimals, // Decimals + [], + "confirmed", + TOKEN_2022_PROGRAM_ID, + ); + + const transferBackInstruction = + await createTransferCheckedWithTransferHookInstruction( + connection, + destinationCookieAccount, + cookieMint.publicKey, + sourceCookieAccount, + recipient.publicKey, + amount, + decimals, // Decimals + [], + "confirmed", + TOKEN_2022_PROGRAM_ID, + ); + + const transaction = new Transaction().add( + transferInstruction, + transferBackInstruction, + ); + const txSig = await sendAndConfirmTransaction( + connection, + transaction, + [payerWallet.payer, recipient], + { + skipPreflight: true, + }, + ); + + console.log(getExplorerLink("transaction", txSig, "localnet")); + + const mintInfo = await getMint( + connection, + crumbMint.publicKey, + "processed", + TOKEN_PROGRAM_ID, + ); + + const sourceCrumbAccountInfo = await getAccount( + connection, + sourceCrumbAccount, + "processed", + TOKEN_PROGRAM_ID, + ); + + const destinationCrumbAccountInfo = await getAccount( + connection, + destinationCrumbAccount, + "processed", + TOKEN_PROGRAM_ID, + ); + + expect(Number(mintInfo.supply)).to.equal(2); + expect(Number(sourceCrumbAccountInfo.amount)).to.equal(1); + expect(Number(destinationCrumbAccountInfo.amount)).to.equal(1); + + console.log("\nCrumb Count:", Number(mintInfo.supply)); + console.log("Source Crumb Amount:", Number(sourceCrumbAccountInfo.amount)); + console.log( + "Destination Crumb Amount\n", + Number(destinationCrumbAccountInfo.amount), + ); +}); +``` + +Go ahead and run all of the tests: + +```bash +anchor test +``` + +They should all be passing! + +If you want to take a look at any of the Explorer links do the following: + +In a separate terminal within the project directory run: + +```bash +solana-test-validator --bpf-program metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s ./tests/metaplex_token_metadata_program.so +``` + +Then you can test with: + +```bash +anchor test --skip-local-validator +``` + +Thats it! You've created a mint with a transfer hook! + +## Challenge + +Amend the transfer hook such that anyone who has a crumb cannot get their cookie +back. diff --git a/content/courses/tokens-and-nfts/index.mdx b/content/courses/tokens-and-nfts/index.mdx new file mode 100644 index 000000000..1e306ee19 --- /dev/null +++ b/content/courses/tokens-and-nfts/index.mdx @@ -0,0 +1,5 @@ +--- +author: unboxed +title: Tokens and NFTs on Solana +description: Create tokens and NFTs on Solana. +--- diff --git a/content/courses/tokens-and-nfts/meta.json b/content/courses/tokens-and-nfts/meta.json new file mode 100644 index 000000000..251374cda --- /dev/null +++ b/content/courses/tokens-and-nfts/meta.json @@ -0,0 +1,3 @@ +{ + "pages": ["token-program", "token-program-advanced", "nfts-with-metaplex"] +} diff --git a/content/courses/tokens-and-nfts/nfts-with-metaplex.mdx b/content/courses/tokens-and-nfts/nfts-with-metaplex.mdx new file mode 100644 index 000000000..7b88a98bc --- /dev/null +++ b/content/courses/tokens-and-nfts/nfts-with-metaplex.mdx @@ -0,0 +1,917 @@ +--- +title: Create Solana NFTs With Metaplex +objectives: + - Explain NFTs and how they're represented on the Solana network + - Explain the role of the Metaplex Token Metadata program + - Create and update NFTs using the Metaplex JS SDK +description: + "How to create NFTs in TypeScript with Metaplex Metadata program and Irys + permanent storage service." +--- + +### Summary + +- **Non-Fungible Tokens (NFTs)** are SPL Tokens with an associated metadata + account, 0 decimals, and a maximum supply of 1 +- **Metadata** attaches additional properties to token mints (both NFTs and + regular tokens). For NFTs, metadata includes the token name and a link to an + offchain JSON file. This JSON file contains links to artwork and other media + files, any special traits the NFT has, and more. +- The **Metaplex Token Metadata** program is an onchain program that attaches + metadata to a token mint. We can interact with the Token Metadata program + using the + [Token Metadata package](https://developers.metaplex.com/token-metadata) via + Umi, a tool made by Metaplex for working with onchain programs. + +### Lesson + +Solana Non-Fungible Tokens (NFTs) are SPL tokens created using the Token +program. These tokens, however, also have an additional metadata account +associated with each token mint. + +In this lesson, we'll cover the basics of how NFTs are represented on Solana, +how to create and update them using the `mpl-token-metadata` npm module. + +#### NFTs on Solana + +An NFT is a standard token from the Token Program with the following +characteristics: + +1. Has 0 decimals, so it cannot be divided into parts +2. Comes from a token mint with a supply of 1, so only 1 of these tokens exists +3. Comes from a token mint whose authority is set to `null` (to ensure that the + supply never changes) +4. Has an associated account that stores **metadata** - things like a name, + symbol, images, etc. + +While the first three points can be achieved with the SPL Token Program, the +associated metadata requires an additional program. This is the **Metadata +program**. + +#### The Metaplex Token Metadata program + +The most popular way Solana NFTs have been created is by using the +[Metaplex Token Metadata](https://developers.metaplex.com/token-metadata) +program. + +![Metadata](/assets/courses/unboxed/solana-nft-metaplex-metadata.png) + +- When creating an NFT, the Token Metadata program creates an **onchain + metadata** account using a Program Derived Address (PDA) with the token mint + as a seed. This allows the metadata account for any NFT to be located + deterministically using the address of the token mint. The onchain metadata + contains a URI field that points to an offchain `.json` file. + +- The **offchain metadata** in the JSON file stores the link to the media + (images, videos, 3D files) of the NFT, any traits the NFT may have, and + additional metadata (see + [this example JSON file](https://lsc6xffbdvalb5dvymf5gwjpeou7rr2btkoltutn5ij5irlpg3wa.arweave.net/XIXrlKEdQLD0dcML01kvI6n4x0GanLnSbeoT1EVvNuw)). + Permanent data storage systems such as Arweave are often used to store the + offchain component of NFT metadata. + +In the following sections, we'll cover the basics of using the +`metaplex-foundation/token-metadata` plugin with Umi to prepare assets, create +NFTs, update NFTs, and associate an NFT with a broader collection. For more +information on `metaplex-foundation/token-metadata` see the +[developer docs for Token Metadata](https://developers.metaplex.com/token-metadata). + + + +[Metaplex Core](https://developers.metaplex.com/core), is an NFT standard from Metaplex where asset details such as the owner, name, uri e.t.c are stored on a single account. However, the most common style of NFT is still by making a Solana +SPL token with some Metadata attached via the Metaplex Metadata program, so +that's what we'll be using in this tutorial. + + + +#### UMI instance + +Umi is a framework for making JS/TS clients for onchain programs, that was +created by Metaplex. Umi can create JS/TS clients for many programs, but in +practice, it's most commonly used to communicate to the Token Metadata program. + +Note that Umi has different implementations for many concepts than web3.js, +including Keypairs, PublicKeys, and Connections. However, it is easy to convert +from web3.js versions of these items to the Umi equivalents. + +#### Installation and setting up Umi + +First we create a new Umi instance. We can do this by either providing our own +RPC endpoint, or use the public facing Solana endpoints provided by the +`clusterApiUrl` method. + +```typescript +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); +``` + +Finally, we pass in the identity for our Umi instance (this is the keypair that +will be used to sign transactions) and the plugins that we will use, in our +case, this is the `metaplex-foundation/mpl-token-metadata`. + +```typescript +import { mplTokenMetadata } from "@metaplex-foundation/mpl-token-metadata"; +import { keypairIdentity } from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { getKeypairFromFile } from "@solana-developers/helpers"; +import { promises as fs } from "fs"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL metadata program plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplTokenMetadata()); +``` + +#### Uploading assets + +Before creating an NFT, you must prepare and upload any assets you plan to +associate with the NFT. While this doesn't have to be an image, most NFTs have +an image associated with them. + +Preparing and uploading an image involves converting the image to a buffer, +converting the file to a +[generic file](https://developers.metaplex.com/umi/storage#generic-files) using +the `createGenericFile()` function and finally uploading it to the designated +Storage Driver. + +The `GenericFile` type allows Umi to support different file variations despite +the difference of browser files and local file system files i.e. those on your +computer. + +In action, uploading an image named `random-image.png` from your computer would +take the following steps: + +1. Reading the file using `readFile` into a buffer. + +2. Creating a generic file type with the files MIME Type from the buffer and + filePath. + +3. Uploading file to designated storage provider. + +```typescript +let filePath = "random-image.png"; + +const buffer = await fs.readFile(filePath); +let file = createGenericFile(buffer, filePath, { + // chose the correct file MIME type https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types + contentType: "image/jpeg", +}); +const [image] = await umi.uploader.upload([file]); +``` + +The function's return value will be the URI where the image was stored. + +#### Upload metadata + +After uploading an image, it's time to upload the offchain JSON metadata using +the `uploadJson()` method. This will return a URI where the JSON metadata is +stored. + +Remember, the offchain portion of the metadata includes things like the image +URI as well as additional information like the name and description of the NFT. +While you can technically include anything you'd like in this JSON object, in +most cases, you should follow the +[NFT standard](https://developers.metaplex.com/token-metadata/token-standard#the-non-fungible-standard) +to ensure compatibility with wallets, programs, and applications. + +To create the metadata, use the `uploadJson()` method provided by the SDK. This +method accepts a metadata object and returns a URI that points to the uploaded +metadata. + +```typescript +const uri = await umi.uploader.uploadJson({ + name, + description, + image, +}); +``` + +#### Create the NFT + +After uploading the NFT's metadata, you can finally create the NFT on the +network. The `mplTokenMetadata` plugin we added earlier provides the required +helpers to create an NFT or any other token with minimal configuration. The +helper `createNft` method will create the mint account, token account, metadata +account, and master edition account for you. The data provided to this method +will represent the onchain portion of the NFT metadata. You can explore the SDK +to see all the other input optionally supplied to this method. + +```typescript +const { signature, result } = await createNft(umi, { + mint, + name: "My NFT", + uri, + updateAuthority: umi.identity.publicKey, + sellerFeeBasisPoints: percentAmount(0), +}).sendAndConfirm(umi, { send: { commitment: "finalized" } }); +``` + +The `sendAndConfirm` method is what takes care of signing our transaction and +sending it. It also provides other options to set pre-flight checks and our +desired commitment for the transaction, which defaults to `confirmed` if not +provided. + +This method returns an object containing the transaction signature and a result. +The result object contains the outcome of our transaction. If successful, the +`err` inside this will be set to null otherwise it'll contain the error for the +failed transaction. + +By default, the SDK sets the `isMutable` property to true, allowing for updates +to be made to the NFT's metadata. However, you can choose to set `isMutable` to +false, making the NFT's metadata immutable. + +#### Update the NFT + +If you've left `isMutable` as true, you may update your NFT's metadata. + +The SDK's `updateV1` method allows you to update both the onchain and offchain +portions of the NFT's metadata. To update the offchain metadata, you'll need to +repeat the steps of uploading a new image and metadata URI (as outlined in the +previous steps), then provide the new metadata URI to this method. This will +change the URI that the onchain metadata points to, effectively updating the +offchain metadata as well. + +```typescript +const nft = await fetchMetadataFromSeeds(umi, { mintAddress }); + +await updateV1(umi, { + mint, + authority: umi.identity, + data: { + ...nft, + sellerFeeBasisPoints: 0, + name: "Updated Name", + }, + primarySaleHappened: true, + isMutable: true, +}).sendAndConfirm(umi); +``` + +Note that any fields you don't include in the call to `updateV1` will stay the +same, by design. + +#### Add the NFT to a collection + +A +[Certified Collection](https://developers.metaplex.com/token-metadata/collections) +is an NFT that individual NFTs can belong to. Think of a large NFT collection +like Solana Monkey Business. If you look at an individual NFT's +[Metadata](https://explorer.solana.com/address/C18YQWbfwjpCMeCm2MPGTgfcxGeEDPvNaGpVjwYv33q1/metadata) +you will see a `collection` field with a `key` that points to the +`Certified Collection` +[NFT](https://explorer.solana.com/address/SMBH3wF6baUj6JWtzYvqcKuj2XCKWDqQxzspY12xPND/). +Simply put, NFTs that are part of a collection are associated with another NFT +that represents the collection itself. + +Certified collections are important because they mean the collection owner has +verified that each NFT actually belongs to the collection! + +To add an NFT to a collection, first, the Collection NFT has to be created. The +process is the same as before, except you'll include one additional field on our +NFT Metadata: `isCollection`. This field tells the token program that this NFT +is a Collection NFT. + +```typescript +const collectionMint = generateSigner(umi); + +await createNft(umi, { + mint: collectionMint, + name: `My Collection`, + uri, + sellerFeeBasisPoints: percentAmount(0), + isCollection: true, +}).sendAndConfirm(umi); +``` + +To mint an NFT into this collection, the +[Collection type](https://mpl-token-metadata-js-docs.vercel.app/types/Collection.html) +which has two fields, the address of the `collectionMint` generated above and +the verified field. + +```typescript +const { signature, result } = await createNft(umi, { + mint, + name: "My NFT", + uri, + updateAuthority: umi.identity.publicKey, + sellerFeeBasisPoints: percentAmount(0), + collection: { key: collectionMint.publicKey, verified: false }, +}).sendAndConfirm(umi, { send: { commitment: "finalized" } }); +``` + +When you checkout the metadata on your newly created NFT, you should now see a +`collection` field like so: + +```JSON +"collection":{ + "verified": false, + "key": "SMBH3wF6baUj6JWtzYvqcKuj2XCKWDqQxzspY12xPND" +} +``` + +The last thing you need to do is verify the NFT. This effectively just flips the +`verified` field above to true, but it's incredibly important. This is what lets +consuming programs and apps, including wallets and art marketplaces, know that +your NFT is in fact part of the collection - because the Collection's owner has +signed a transaction making the NFT a member of that collection. You can do this +using the `verifyCollectionV1` function: + +```typescript +const metadata = findMetadataPda(umi, { mint: mint.publicKey }); + +await verifyCollectionV1(umi, { + metadata, + collectionMint, + authority: umi.identity, +}).sendAndConfirm(umi); +``` + +### Lab + +In this lab, we'll go through the steps to create an NFT using the Metaplex Umi +framework, update the NFT's metadata after the fact, and then associate the NFT +with a collection. By the end, you will have a basic understanding of how to use +the Metaplex Umi and the mplTokenMetadata library to interact with NFTs on +Solana. + +#### Part 1: Creating an NFT collection + +To begin, make a new folder and install the relevant dependencies: + +```bash +npm i @solana/web3.js@1 @solana-developers/helpers@2 @metaplex-foundation/mpl-token-metadata @metaplex-foundation/umi-bundle-defaults @metaplex-foundation/umi-uploader-irys esrun +``` + +Then create a file called `create-metaplex-nft-collection.ts`, and add our +imports: + +```typescript +import { + createNft, + mplTokenMetadata, +} from "@metaplex-foundation/mpl-token-metadata"; +import { + createGenericFile, + generateSigner, + keypairIdentity, + percentAmount, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { irysUploader } from "@metaplex-foundation/umi-uploader-irys"; +import { + airdropIfRequired, + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl, Connection, LAMPORTS_PER_SOL } from "@solana/web3.js"; +import { promises as fs } from "fs"; +import * as path from "path"; +``` + +Connect to devnet, load a user and Airdrop some SOL if needed: + +```typescript +// create a new connection to Solana's devnet cluster +const connection = new Connection(clusterApiUrl("devnet")); + +// load keypair from local file system +// assumes that the keypair is already generated using `solana-keygen new` +const user = await getKeypairFromFile(); + +await airdropIfRequired( + connection, + user.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.1 * LAMPORTS_PER_SOL, +); + +console.log("Loaded user:", user.publicKey.toBase58()); +``` + +Create a new Umi instance, assign it the loaded keypair, load the +`mplTokenMetadata` to interact with the metadata program and `irysUploader` to +upload our files. + +```typescript +const umi = createUmi(connection); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const user = await getKeypairFromFile(); + +// convert to umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(user.secretKey); + +// assigns a signer to our umi instance, and loads the MPL metadata program and Irys uploader plugins. +umi + .use(keypairIdentity(umiKeypair)) + .use(mplTokenMetadata()) + .use(irysUploader()); +``` + +Download the image assets the collection image from the links below and save +them inside your working directory, + +1. collection image: + https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/collection.png + +2. NFT image: + https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/nft.png + +We will use these images as our collection and nft cover images respectively. + +We will use Irys as our storage provider, and Metaplex conveniently ships the +`umi-uploader-irys` plugin we can use to upload our files. The plugin, also +takes care of storage fees so that we don't have to worry about making this on +our own. + +Upload the offchain metadata to Irys: + +```typescript +const collectionImagePath = path.resolve(__dirname, "collection.png"); + +const buffer = await fs.readFile(collectionImagePath); +let file = createGenericFile(buffer, collectionImagePath, { + contentType: "image/png", +}); +const [image] = await umi.uploader.upload([file]); +console.log("image uri:", image); + +// upload offchain json to Arweave using irys +const uri = await umi.uploader.uploadJson({ + name: "My Collection", + symbol: "MC", + description: "My Collection description", + image, +}); +console.log("Collection offchain metadata URI:", uri); +``` + +Then actually make the collection: + +```typescript +// generate mint keypair +const collectionMint = generateSigner(umi); + +// create and mint NFT +await createNft(umi, { + mint: collectionMint, + name: "My Collection", + uri, + updateAuthority: umi.identity.publicKey, + sellerFeeBasisPoints: percentAmount(0), + isCollection: true, +}).sendAndConfirm(umi, { send: { commitment: "finalized" } }); + +let explorerLink = getExplorerLink( + "address", + collectionMint.publicKey, + "devnet", +); +console.log(`Collection NFT: ${explorerLink}`); +console.log(`Collection NFT address is:`, collectionMint.publicKey); +console.log("✅ Finished successfully!"); +``` + +We advise using [esrun](https://www.npmjs.com/package/esrun) to run the scripts +because it allows you to use top level await without having to wrap your code +inside asynchronous function. + +Run the `create-metaplex-nft-collection.ts` script + +``` +npx esrun create-metaplex-nft-collection.ts +``` + +The output should look like this: + +``` +% npx esrun create-metaplex-nft-collection.ts + +Loaded user: 4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF +image uri: https://arweave.net/XWpt7HDOFC0wJQcQWgP9n_cxHS0qQik9-27CAAaGP6E +Collection offchain metadata URI: https://arweave.net/atIf58t3FHa3heoOtNqPkVvEGC_9WzAduY0GQE-LnFI +Collection NFT: https://explorer.solana.com/address/D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1?cluster=devnet +Collection NFT address is: D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1 +✅ Finished successfully! +``` + +Congratulations! You've created a Metaplex Collection. Check this out on Solana +Explorer using the URL above which should resemble + +![Solana Explorer with details about created collection](/assets/courses/unboxed/solana-explorer-metaplex-collection.png) + +If you have any trouble, try and fix it yourself, but if you need to you can +also check out the +[solution code](https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/create-collection.ts). + +We'll use the collection NFT address in the next step. + +#### 2. Creating a Metaplex NFT inside the collection + +We'll now make a Metaplex NFT that's a member of the collection we just made. +Make a new file called `create-metaplex-nft.ts`. The setup for this will look +the same as the previous file, with slightly different imports: + +```typescript +import { + createNft, + findMetadataPda, + mplTokenMetadata, + verifyCollectionV1, +} from "@metaplex-foundation/mpl-token-metadata"; +import { + createGenericFile, + generateSigner, + keypairIdentity, + percentAmount, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { irysUploader } from "@metaplex-foundation/umi-uploader-irys"; +import { + airdropIfRequired, + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl, Connection, LAMPORTS_PER_SOL } from "@solana/web3.js"; +import { promises as fs } from "fs"; +import * as path from "path"; +// create a new connection to Solana's devnet cluster +const connection = new Connection(clusterApiUrl("devnet")); + +// load keypair from local file system +// assumes that the keypair is already generated using `solana-keygen new` +const user = await getKeypairFromFile(); +console.log("Loaded user:", user.publicKey.toBase58()); + +await airdropIfRequired( + connection, + user.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.1 * LAMPORTS_PER_SOL, +); + +const umi = createUmi(connection); + +// convert to umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(user.secretKey); + +// load our plugins and signer +umi + .use(keypairIdentity(umiKeypair)) + .use(mplTokenMetadata()) + .use(irysUploader()); +``` + +Now let's tell Metaplex our collection, and the NFT we want to make: + +```typescript +// Substitute in your collection NFT address from create-metaplex-nft-collection.ts +const collectionNftAddress = UMIPublicKey("YOUR_COLLECTION_NFT_ADDRESS_HERE"); + +// example data and metadata for our NFT +const nftData = { + name: "My NFT", + symbol: "MN", + description: "My NFT Description", + sellerFeeBasisPoints: 0, + imageFile: "nft.png", +}; +``` + +We can then put out files into Irys: + +```typescript +const NFTImagePath = path.resolve(__dirname, "nft.png"); + +const buffer = await fs.readFile(NFTImagePath); +let file = createGenericFile(buffer, NFTImagePath, { + contentType: "image/png", +}); + +// upload image and get image uri +const [image] = await umi.uploader.upload([file]); +console.log("image uri:", image); + +// upload offchain json using irys and get metadata uri +const uri = await umi.uploader.uploadJson({ + name: "My NFT", + symbol: "MN", + description: "My NFT Description", + image, +}); +console.log("NFT offchain metadata URI:", uri); +``` + +And then create an NFT using the URI from the metadata: + +```typescript +// generate mint keypair +const mint = generateSigner(umi); + +// create and mint NFT +await createNft(umi, { + mint, + name: "My NFT", + symbol: "MN", + uri, + updateAuthority: umi.identity.publicKey, + sellerFeeBasisPoints: percentAmount(0), + collection: { + key: collectionAddress, + verified: false, + }, +}).sendAndConfirm(umi, { send: { commitment: "finalized" } }); + +let explorerLink = getExplorerLink("address", mint.publicKey, "devnet"); +console.log(`Token Mint: ${explorerLink}`); +``` + +Run `npx esrun create-metaplex-nft.ts`. If all goes well, you will see the +following: + +``` +% npx esrun create-metaplex-nft.ts + +Loaded user: 4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF +image uri: https://arweave.net/XgTss3uKlddlMFjRTIvDiDLBv6Pptm-Vx9mz6Oe5f-o +NFT offchain metadata URI: https://arweave.net/PK3Url31k4BYNvYOgTuYgWuCLrNjl5BrrF5lbY9miR8 +Token Mint: https://explorer.solana.com/address/CymscdAwuTRjCz1ezsNZa15MnwGNrxhGUEToLFcyijMT?cluster=devnet +Created NFT address is CymscdAwuTRjCz1ezsNZa15MnwGNrxhGUEToLFcyijMT +✅ Finished successfully! +``` + +Inspect your NFT at the address given! If you have any trouble, try and fix it +yourself, but if you need to you can also check out the +[solution code](https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/create-nft.ts). + +You should have something similar to this image on your explorer page +![Solana Explorer with details about created NFT](/assets/courses/unboxed/solana-explorer-metaplex-nft.png) + +Finally, let's verify our mint as being part of our collection. This makes it so +the `verified` field in the onchain metadata is set to `true`, so consuming +programs and apps can know for sure that the NFT in fact belongs to the +collection. + +Create a new file `verify-metaplex-nft.ts`, import the required libraries and +instantiate a new umi Instance. + +```typescript +import { + findMetadataPda, + mplTokenMetadata, + verifyCollectionV1, +} from "@metaplex-foundation/mpl-token-metadata"; +import { + keypairIdentity, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { irysUploader } from "@metaplex-foundation/umi-uploader-irys"; +import { + airdropIfRequired, + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl, Connection, LAMPORTS_PER_SOL } from "@solana/web3.js"; + +// create a new connection to Solana's devnet cluster +const connection = new Connection(clusterApiUrl("devnet")); + +// load keypair from local file system +// assumes that the keypair is already generated using `solana-keygen new` +const user = await getKeypairFromFile(); +console.log("Loaded user:", user.publicKey.toBase58()); + +await airdropIfRequired( + connection, + user.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.1 * LAMPORTS_PER_SOL, +); + +const umi = createUmi(connection); + +// Substitute in your collection NFT address from create-metaplex-nft-collection.ts +const collectionAddress = UMIPublicKey(""); + +// Substitute in your NFT address from create-metaplex-nft.ts +const nftAddress = UMIPublicKey(""); +``` + +Verifying an NFT will require you to have the `collectionAddress` you used +created in the creation of a collection stage, and we will use the +`verifyCollectionV1` method. + +```typescript +// Verify our collection as a Certified Collection +// See https://developers.metaplex.com/token-metadata/collections +const metadata = findMetadataPda(umi, { mint: nftAddress }); +await verifyCollectionV1(umi, { + metadata, + collectionMint: collectionAddress, + authority: umi.identity, +}).sendAndConfirm(umi); + +let explorerLink = getExplorerLink("address", nftAddress, "devnet"); +console.log(`verified collection: ${explorerLink}`); +console.log("✅ Finished successfully!"); +``` + +Run `npx esrun verify-metaplex-nft.ts`. If all goes well, you will see the +following: + +``` +% npx esrun create-metaplex-nft.ts + +Loaded user: 4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF +verified collection: https://explorer.solana.com/address/CymscdAwuTRjCz1ezsNZa15MnwGNrxhGUEToLFcyijMT?cluster=devnet +✅ Finished successfully! +``` + +Inspect your verified NFT at the address given! If you have any trouble, try and +fix it yourself, but if you need to you can also check out the +[solution code](https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/verify-nft.ts). + +The verified flag on your NFT should now be set to `1` -> `true` showing that +it's verified. To confirm this, look under the metadata tab on the Solana +Explorer to confirm that your NFT is verified as part of the collection. + +![Solana Explorer with details about created NFT](/assets/courses/unboxed/solana-explorer-verified-nft.png) + +Remember the NFT address, we'll use it in the next step. + +#### 3. Update the NFT + +Create a new file, called `update-metaplex-nft.ts`. The imports will be similar +to our previous files: + +```typescript +import { + createNft, + fetchMetadataFromSeeds, + updateV1, + findMetadataPda, + mplTokenMetadata, +} from "@metaplex-foundation/mpl-token-metadata"; +import { + createGenericFile, + generateSigner, + keypairIdentity, + percentAmount, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { irysUploader } from "@metaplex-foundation/umi-uploader-irys"; +import { + airdropIfRequired, + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl, Connection, LAMPORTS_PER_SOL } from "@solana/web3.js"; +import { promises as fs } from "fs"; +import * as path from "path"; + +// create a new connection to Solana's devnet cluster +const connection = new Connection(clusterApiUrl("devnet")); + +// load keypair from local file system +// assumes that the keypair is already generated using `solana-keygen new` +const user = await getKeypairFromFile(); +console.log("Loaded user:", user.publicKey.toBase58()); + +await airdropIfRequired( + connection, + user.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.1 * LAMPORTS_PER_SOL, +); + +const umi = createUmi(connection); + +// convert to umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(user.secretKey); + +// load our plugins and signer +umi + .use(keypairIdentity(umiKeypair)) + .use(mplTokenMetadata()) + .use(irysUploader()); +``` + +Let's load our NFT, specifying the address from the previous example, and set up +what we'd like to update: + +```typescript +// Load the NFT using the mint address +const mint = UMIPublicKey("YOUR_NFT_ADDRESS_HERE"); +const asset = await fetchDigitalAsset(umi, mint); + +// example data for updating an existing NFT +const updatedNftData = { + name: "Updated Asset", + symbol: "UPDATED", + description: "Updated Description", + sellerFeeBasisPoints: 0, + imageFile: "nft.png", +}; +``` + +We can then use Metaplex to update our NFT: + +```typescript +const NFTImagePath = path.resolve(__dirname, "nft.png"); + +const buffer = await fs.readFile(NFTImagePath); +let file = createGenericFile(buffer, NFTImagePath, { + contentType: "image/png", +}); + +// upload new image and get image uri +const [image] = await umi.uploader.upload([file]); +console.log("image uri:", image); + +// upload updated offchain json using irys and get metadata uri +const uri = await umi.uploader.uploadJson({ + name: "Updated ", + symbol: "UPDATED", + description: "Updated Description", + image, +}); +console.log("NFT offchain metadata URI:", uri); + +// Load the NFT using the mint address +const mint = UMIPublicKey("Zxd9TmtBHQNti6tJxtx1AKYJFykNUwJL4rth441CjRd"); +const nft = await fetchMetadataFromSeeds(umi, { mint }); + +await updateV1(umi, { + mint, + authority: umi.identity, + data: { + ...nft, + sellerFeeBasisPoints: 0, + name: "Updated Asset", + }, + primarySaleHappened: true, + isMutable: true, +}).sendAndConfirm(umi); + +let explorerLink = getExplorerLink("address", mint, "devnet"); +console.log(`NFT updated with new metadata URI: ${explorerLink}`); + +console.log("✅ Finished successfully!"); +``` + +Run `npx esrun update-metaplex-nft.ts`. You should see something like: + +```bash +% npx esrun update-metaplex-nft.ts + +Loaded user: 4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF +image uri: https://arweave.net/dboiAebucLGhprtknDQnp-yMj348cpJF4aQul406odg +NFT offchain metadata URI: https://arweave.net/XEjo-44GHRFNOEtPUdDsQlW5z1Gtpk2Wv0HvR8ll1Bw +NFT updated with new metadata URI: https://explorer.solana.com/address/Zxd9TmtBHQNti6tJxtx1AKYJFykNUwJL4rth441CjRd?cluster=devnet +✅ Finished successfully! +``` + +Inspect the updated NFT on Solana Explorer! Just like previously, if you have +any issues, you should fix them yourself, but if needed the +[solution code](https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/update-nft.ts) +is available. + +![Solana Explorer with details about the updated NFT](/assets/courses/unboxed/solana-explorer-with-updated-NFT.png) + +Congratulations! You've successfully learned how to use the Metaplex SDK to +create, update, and verify NFTs as part of a collection. That's everything you +need to build out your own collection for just about any use case. You could +build a new event ticketing platform, revamp a retail business membership +program, or even digitize your school's student ID system. The possibilities are +endless! + +### Challenge + +The steps covered above for creating an NFT would be incredibly tedious to +execute for thousands of NFTs in one go. Many providers, including Metaplex, +MagicEden, and Tensor have so-called 'fair launch' tools that take care of +minting large quantities of NFTs and ensuring they are sold within the +parameters set by their creators. Dive into one of these fair launch platforms +and create an NFT. This hands-on experience will not only reinforce your +understanding of the tools but also boost your confidence in your ability to use +them effectively in the future. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=296745ac-503c-4b14-b3a6-b51c5004c165)! + + diff --git a/content/courses/tokens-and-nfts/token-program-advanced.mdx b/content/courses/tokens-and-nfts/token-program-advanced.mdx new file mode 100644 index 000000000..25379f36e --- /dev/null +++ b/content/courses/tokens-and-nfts/token-program-advanced.mdx @@ -0,0 +1,448 @@ +--- +title: Token Burning and Delegation +objectives: + - Understand why and how to burn tokens + - Allow a token holder to allocate a limited amount of tokens to another + account to spend or burn using token delegation. +description: + "How to burn tokens, and approve/revoke token delegations on Solana." +--- + +### Summary + +- **Burning tokens** reduces the total supply of a token by removing them from + circulation. +- **Approving a delegate**, allows another account to transfer or burn a + specified amount of tokens from a token account while retaining original + account ownership. +- **Revoking a delegate**, removes their authority to act on behalf of the token + account owner. +- Each of these operations is facilitated through the `spl-token` library, + utilizing specific functions for each action. + +### Lesson + +In this lesson, we'll cover burning tokens and delegation. You may not have a +need for these in your own application, so if you're more interested in NFTs, +feel free to skip ahead to +[creating NFTs with Metaplex](/developers/courses/tokens-and-nfts/nfts-with-metaplex)! + +#### Burn Tokens + +Burning tokens is the process of decreasing the token supply of a given token +mint. Burning tokens removes the tokens from the given token account and from +broader circulation. + +To burn tokens using the `spl-token` library, use the +[`burn()`](https://solana-labs.github.io/solana-program-library/token/js/functions/burn.html#burn) +function. + +```typescript +import { burn } from "@solana/spl-token"; +``` + +```typescript +const transactionSignature = await burn( + connection, + payer, + account, + mint, + owner, + amount, +); +``` + +The `burn()` function requires the following arguments: + +- `connection`: JSON-RPC connection to the cluster. +- `payer`: The account responsible for paying transaction fees. +- `account`: The token account from which tokens will be burned. +- `mint`: The token mint associated with the token account. +- `owner`: The owner of the token account. +- `amount`: The number of tokens to burn. + +Under the hood, the `burn()` function creates a transaction using the +instruction obtained from +[`createBurnInstruction()`](https://solana-labs.github.io/solana-program-library/token/js/functions/createBurnInstruction.html#createBurnInstruction) +function. + +```typescript +import { PublicKey, Transaction } from "@solana/web3.js"; +import { createBurnInstruction } from "@solana/spl-token"; + +async function buildBurnTransaction( + account: PublicKey, + mint: PublicKey, + owner: PublicKey, + amount: number, +): Promise { + const transaction = new Transaction().add( + createBurnInstruction(account, mint, owner, amount), + ); + + return transaction; +} +``` + +#### Approve Delegate + +Approving a delegate is the process of authorizing another account to transfer +or burn tokens from a token account. The authority over the token account +remains with the original owner. The maximum number of tokens a delegate can +transfer or burn is defined when the owner approves the delegate. Only one +delegate can be associated with a token account at a time. + +To approve a delegate using the `spl-token` library, use the +[`approve()`](https://solana-labs.github.io/solana-program-library/token/js/functions/approve.html#approve) +function. + +```typescript +const transactionSignature = await approve( + connection, + payer, + account, + delegate, + owner, + amount, +); +``` + +The `approve()` function returns a `TransactionSignature` that can be viewed on +Solana Explorer. It requires the following arguments: + +- `connection`: The JSON-RPC connection to the cluster. +- `payer`: The account of the payer for the transaction. +- `account`: The token account to delegate tokens from. +- `delegate`: The account authorized to transfer or burn tokens. +- `owner`: The account of the owner of the token account. +- `amount`: The maximum number of tokens the delegate can transfer or burn. + +Under the hood, the `approve()` function creates a transaction with instructions +obtained from the +[`createApproveInstruction()`](https://solana-labs.github.io/solana-program-library/token/js/functions/createApproveInstruction.html#createApproveInstruction) +function. + +```typescript +import { PublicKey, Transaction } from "@solana/web3.js"; +import { createApproveInstruction } from "@solana/spl-token"; + +async function buildApproveTransaction( + account: PublicKey, + delegate: PublicKey, + owner: PublicKey, + amount: number, +): Promise { + const transaction = new Transaction().add( + createApproveInstruction(account, delegate, owner, amount), + ); + + return transaction; +} +``` + +#### Revoke Delegate + +A previously approved delegate for a token account can be revoked. Once revoked, +the delegate can no longer transfer tokens from the owner's token account. Any +untransferred amount from the previously approved tokens will no longer be +accessible by the delegate. + +To revoke a delegate using the `spl-token` library, use the +[`revoke()`](https://solana-labs.github.io/solana-program-library/token/js/functions/revoke.html#revoke) +function. + +```typescript +import { revoke } from "@solana/spl-token"; + +const transactionSignature = await revoke(connection, payer, account, owner); +``` + +The `revoke()` function returns a `TransactionSignature` that can be viewed on +Solana Explorer. This function requires the following arguments: + +- `connection`: The JSON-RPC connection to the cluster. +- `payer`: The account responsible for paying the transaction fees. +- `account`: The token account from which to revoke the delegate authority. +- `owner`: The account of the owner of the token account. + +Under the hood, the `revoke()` function generates a transaction using the +instructions from the +[`createRevokeInstruction()`](https://solana-labs.github.io/solana-program-library/token/js/functions/createRevokeInstruction.html#createRevokeInstruction) +function: + +```typescript +import { PublicKey, Transaction } from "@solana/web3.js"; +import { createRevokeInstruction } from "@solana/spl-token"; + +async function buildRevokeTransaction( + account: PublicKey, + owner: PublicKey, +): Promise { + const transaction = new Transaction().add( + createRevokeInstruction(account, owner), + ); + + return transaction; +} +``` + +### Lab + +This lab extends the concepts covered in the previous lesson on the +[Token Program](/developers/courses/tokens-and-nfts/token-program). + +#### 1. Delegating Tokens + +We will use the `approve()` function from the `spl-token` library to authorize a +delegate to transfer or burn up to 50 tokens from our token account. + +Similar to the process of +[Transferring Tokens](/developers/courses/tokens-and-nfts/token-program#transferring-tokens) +in the previous lab, you can +[add a second account on Devnet](/developers/courses/intro-to-solana/intro-to-cryptography) +if desired or collaborate with a friend who has a Devnet account. + +Create a new file named `delegate-tokens.ts`. For this example, we are using the +System Program ID as a delegate for demonstration, but you can use an actual +address that you want to delegate. + +```typescript title="delegate-tokens.ts" +import "dotenv/config"; +import { + getExplorerLink, + getKeypairFromEnvironment, +} from "@solana-developers/helpers"; +import { + Connection, + PublicKey, + clusterApiUrl, + SystemProgram, +} from "@solana/web3.js"; +import { approve, getOrCreateAssociatedTokenAccount } from "@solana/spl-token"; + +const DEVNET_URL = clusterApiUrl("devnet"); +const TOKEN_DECIMALS = 2; +const DELEGATE_AMOUNT = 50; +const MINOR_UNITS_PER_MAJOR_UNITS = 10 ** TOKEN_DECIMALS; + +// Initialize connection and load user keypair +const connection = new Connection(DEVNET_URL); +const user = getKeypairFromEnvironment("SECRET_KEY"); + +console.log(`🔑 Loaded keypair. Public key: ${user.publicKey.toBase58()}`); + +// Replace this with your actual address +// For this example, we will be using System Program's ID as a delegate +const delegatePublicKey = new PublicKey(SystemProgram.programId); + +// Substitute your token mint address +const tokenMintAddress = new PublicKey("YOUR_TOKEN_MINT_ADDRESS_HERE"); + +try { + // Get or create the user's token account + const userTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + user, + tokenMintAddress, + user.publicKey, + ); + + // Approve the delegate + const approveTransactionSignature = await approve( + connection, + user, + userTokenAccount.address, + delegatePublicKey, + user.publicKey, + DELEGATE_AMOUNT * MINOR_UNITS_PER_MAJOR_UNITS, + ); + + const explorerLink = getExplorerLink( + "transaction", + approveTransactionSignature, + "devnet", + ); + + console.log(`✅ Delegate approved. Transaction: ${explorerLink}`); +} catch (error) { + console.error( + `Error: ${error instanceof Error ? error.message : String(error)}`, + ); +} +``` + +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your token mint address obtained +from the previous lesson +[Token Program](/developers/courses/tokens-and-nfts/token-program#create-the-token-mint). + +Run the script using `npx esrun delegate-tokens.ts`. You should see: + +```bash +🔑 Loaded keypair. Public key: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM +✅ Delegate approved. Transaction: https://explorer.solana.com/tx/21tX6L7zk5tkHeoD7V1JYYW25VAWRfQrJPnxDcMXw94yuFbHxX4UZEgS6k6co9dBWe7PqFoMoWEVfbVA92Dk4xsQ?cluster=devnet +``` + +Open the Explorer link, you will see the ‌approval information. + +![Delegate Tokens](/assets/courses/unboxed/delegate-token.png) + +#### 2. Revoke Delegate + +Let's revoke the `delegate` using the `spl-token` library's `revoke()` function. + +Revoke will set the delegate for the token account to null and reset the +delegated amount to 0. + +Create a new file `revoke-approve-tokens.ts`. + +```typescript title="revoke-approve-tokens.ts" +import "dotenv/config"; +import { + getExplorerLink, + getKeypairFromEnvironment, +} from "@solana-developers/helpers"; +import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; +import { revoke, getOrCreateAssociatedTokenAccount } from "@solana/spl-token"; + +const DEVNET_URL = clusterApiUrl("devnet"); +// Substitute your token mint address +const TOKEN_MINT_ADDRESS = "YOUR_TOKEN_MINT_ADDRESS_HERE"; + +const connection = new Connection(DEVNET_URL); +const user = getKeypairFromEnvironment("SECRET_KEY"); + +console.log(`🔑 Loaded keypair. Public key: ${user.publicKey.toBase58()}`); + +try { + const tokenMintAddress = new PublicKey(TOKEN_MINT_ADDRESS); + + const userTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + user, + tokenMintAddress, + user.publicKey, + ); + + const revokeTransactionSignature = await revoke( + connection, + user, + userTokenAccount.address, + user.publicKey, + ); + + const explorerLink = getExplorerLink( + "transaction", + revokeTransactionSignature, + "devnet", + ); + + console.log(`✅ Revoke Delegate Transaction: ${explorerLink}`); +} catch (error) { + console.error( + `Error: ${error instanceof Error ? error.message : String(error)}`, + ); +} +``` + +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your mint token address obtained +from the previous lesson +[Token Program](/developers/courses/tokens-and-nfts/token-program#create-the-token-mint). + +Run the script using `npx esrun revoke-approve-tokens.ts`. You should see: + +```bash +🔑 Loaded keypair. Public key: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM +✅ Revoke Delegate Transaction: https://explorer.solana.com/tx/YTc2Vd41SiGiHf3iEPkBH3y164fMbV2TSH2hbe7WypT6K6Q2b3f31ryFWhypmBK2tXmvGYjXeYbuwxHeJvnZZX8?cluster=devnet +``` + +Open the Explorer link, you will see the revoke information. + +![Revoke Approve Tokens](/assets/courses/unboxed/revoke-approve-tokens.png) + +#### 3. Burn Tokens + +Finally, let's remove some tokens from circulation by burning them. + +Use the `spl-token` library's `burn()` function to remove half of your tokens +from circulation. Now, call this function to burn 5 of the user's tokens. + +Create a new file `burn-tokens.ts`. + +```typescript title="burn-tokens.ts" +import "dotenv/config"; +import { + getExplorerLink, + getKeypairFromEnvironment, +} from "@solana-developers/helpers"; +import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; +import { getOrCreateAssociatedTokenAccount, burn } from "@solana/spl-token"; + +const DEVNET_URL = clusterApiUrl("devnet"); +const TOKEN_DECIMALS = 2; +const BURN_AMOUNT = 5; +// Substitute your token mint address +const TOKEN_MINT_ADDRESS = "YOUR_TOKEN_MINT_ADDRESS_HERE"; + +const connection = new Connection(DEVNET_URL); +const user = getKeypairFromEnvironment("SECRET_KEY"); + +console.log(`🔑 Loaded keypair. Public key: ${user.publicKey.toBase58()}`); + +try { + const tokenMintAccount = new PublicKey(TOKEN_MINT_ADDRESS); + + const userTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + user, + tokenMintAccount, + user.publicKey, + ); + + const burnAmount = BURN_AMOUNT * 10 ** TOKEN_DECIMALS; + + const transactionSignature = await burn( + connection, + user, + userTokenAccount.address, + tokenMintAccount, + user, + burnAmount, + ); + + const explorerLink = getExplorerLink( + "transaction", + transactionSignature, + "devnet", + ); + + console.log(`✅ Burn Transaction: ${explorerLink}`); +} catch (error) { + console.error( + `Error: ${error instanceof Error ? error.message : String(error)}`, + ); +} +``` + +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your mint token address obtained +from the previous chapter +[Token Program](/developers/courses/tokens-and-nfts/token-program#create-the-token-mint). + +Run the script using `npx esrun burn-tokens.ts`. You should see: + +```bash +🔑 Loaded keypair. Public key: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM +✅ Burn Transaction: https://explorer.solana.com/tx/5Ufipgvsi5aLzzcr8QQ7mLXHyCwBDqsPxGTPinvFpjSiARnEDgFiPbD2ZiaDkkmwKDMoQ94bf5uqF2M7wjFWcKuv?cluster=devnet +``` + +Open the Explorer link, you will see the burn information. + +![Burn Tokens](/assets/courses/unboxed/burn-tokens.png) + +Well done! You've now completed the lab. + + + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=72cab3b8-984b-4b09-a341-86800167cfc7)! + + diff --git a/content/courses/tokens-and-nfts/token-program.mdx b/content/courses/tokens-and-nfts/token-program.mdx new file mode 100644 index 000000000..30a974fc9 --- /dev/null +++ b/content/courses/tokens-and-nfts/token-program.mdx @@ -0,0 +1,899 @@ +--- +title: Create Tokens With The Token Program +objectives: + - Create token mints + - Create token metadata + - Create associated token accounts + - Mint tokens + - Transfer tokens +description: + "Understand how tokens - both regular tokens and NFTs - are created, stored, + and transferred on Solana. " +--- + +### Summary + +- You may recall SOL is the 'native token' of Solana. All other tokens, fungible + and non-fungible tokens (NFTs), are called **SPL Tokens**. +- The **Token Program** contains instructions for creating and interacting with + SPL Tokens. +- **Token Mints** are accounts that define a specific token. This includes + information about the token itself (like how many decimals it has), the + account allowed to mint more tokens (called the **mint authority**), and where + to find more information about the token like a description, image, etc. The + mint authority can use the token mint to make more tokens! +- **Token Accounts** hold tokens of a specific Token Mint. For most users, their + balances of each token mint are stored in **Associated Token Accounts** - + accounts with addresses made from their wallet address and the token's mint. +- Creating Token Mints and Token Accounts requires allocating **rent** in SOL. + The rent for a Token Account can be refunded when the account is closed. + Additionally, tokens created with the + [Token Extensions Program](/developers/courses/token-extensions/close-mint) + can also close Token Mints. + +### Lesson + +The Token Program is one of many programs made available by the Solana Program +Library (SPL). It contains instructions for creating and interacting with SPL +Tokens. These tokens represent all non-native (i.e. not SOL) tokens on the +Solana network. + +This lesson will focus on the basics of creating and managing a new SPL Token +using the Token Program: + +1. Creating a new Token Mint +2. Creating Token Accounts +3. Minting +4. Transferring tokens from one holder to another + +We'll be approaching this from the client side of the development process using +the `@solana/spl-token` Javascript library. + +#### Token Mint + +To create a new SPL Token you first have to create a Token Mint. A Token Mint is +an account that holds data about a specific token. + +As an example, let's look at +[USD Coin (USDC) on the Solana Explorer](https://explorer.solana.com/address/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v). +USDC's Token Mint address is `EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v`. +With the explorer, we can see the particular details about USDC's Token Mint +such as the current supply of tokens, the addresses of the mint and freeze +authorities, and the decimal precision of the token: + +![USDC Token Mint](/assets/courses/unboxed/token-program-usdc-mint.png) + +To create a new Token Mint, you need to send the right transaction instructions +to the Token Program. To do this, we'll use the `createMint` function from +`@solana/spl-token`. + +```typescript +const tokenMint = await createMint( + connection, + payer, + mintAuthority, + freezeAuthority, + decimal, +); +``` + +The `createMint` function returns the `publicKey` of the new token mint. This +function requires the following arguments: + +- `connection` - the JSON-RPC connection to the cluster +- `payer` - the public key of the payer for the transaction +- `mintAuthority` - the account that is authorized to do the actual minting of + tokens from the token mint. +- `freezeAuthority` - an account authorized to freeze the tokens in a token + account. If freezing is not a desired attribute, the parameter can be set to + null +- `decimals` - specifies the desired decimal precision of the token + +When creating a new mint from a script that has access to your secret key, you +can simply use the `createMint` function. However, if you were to build a +website to allow users to create a new token mint, you would need to do so with +the user's secret key without making them expose it to the browser. In that +case, you would want to build and submit a transaction with the right +instructions. + +Under the hood, the `createMint` function is simply creating a transaction that +contains two instructions: + +1. Create a new account +2. Initialize a new mint + +This would look as follows: + +```typescript +import * as web3 from "@solana/web3.js"; +import * as token from "@solana/spl-token"; + +async function buildCreateMintTransaction( + connection: web3.Connection, + payer: web3.PublicKey, + decimals: number, +): Promise { + const lamports = await token.getMinimumBalanceForRentExemptMint(connection); + const accountKeypair = web3.Keypair.generate(); + const programId = token.TOKEN_PROGRAM_ID; + + const transaction = new web3.Transaction().add( + web3.SystemProgram.createAccount({ + fromPubkey: payer, + newAccountPubkey: accountKeypair.publicKey, + space: token.MINT_SIZE, + lamports, + programId, + }), + token.createInitializeMintInstruction( + accountKeypair.publicKey, + decimals, + payer, + payer, + programId, + ), + ); + + return transaction; +} +``` + +When manually building the instructions to create a new token mint, make sure +you add the instructions for creating the account and initializing the mint to +the _same transaction_. If you were to do each step in a separate transaction, +it's theoretically possible for somebody else to take the account you create and +initialize it for their own mint. + +#### Rent and Rent Exemption + +Note that the first line in the function body of the previous code snippet +contains a call to `getMinimumBalanceForRentExemptMint`, the result of which is +passed into the `createAccount` function. This is part of account initialization +called rent exemption. + +Until recently, all accounts on Solana were required to do one of the following +to avoid being deallocated: + +1. Pay rent at specific intervals +2. Deposit enough SOL upon initialization to be considered rent-exempt + +Recently, the first option was done away with and it became a requirement to +deposit enough SOL for rent exemption when initializing a new account. + +In this case, we're creating a new account for a token mint so we use +`getMinimumBalanceForRentExemptMint` from the `@solana/spl-token` library. +However, this concept applies to all accounts and you can use the more generic +`getMinimumBalanceForRentExemption` method on `Connection` for other accounts +you may need to create. + +#### Token Account + +Before you can mint tokens (issue new supply), you need a Token Account to hold +the newly issued tokens. + +A Token Account holds tokens of a specific "mint" and has a specified "owner" of +the account. Only the owner is authorized to decrease the Token Account balance +(transfer, burn, etc.) while anyone can send tokens to the Token Account to +increase its balance. + +You can use the `spl-token` library's `createAccount` function to create the new +Token Account: + +```typescript +const tokenAccount = await createAccount( + connection, + payer, + mint, + owner, + keypair, +); +``` + +The `createAccount` function returns the `publicKey` of the new token account. +This function requires the following arguments: + +- `connection` - the JSON-RPC connection to the cluster +- `payer` - the account of the payer for the transaction +- `mint` - the token mint that the new token account is associated with +- `owner` - the account of the owner of the new token account +- `keypair` - this is an optional parameter for specifying the new token account + address. If no keypair is provided, the `createAccount` function defaults to a + derivation from the associated `mint` and `owner` accounts. + +Please note that this `createAccount` function is different from the +`createAccount` function shown above when we looked under the hood of the +`createMint` function. Previously we used the `createAccount` function on +`SystemProgram` to return the instruction for creating all accounts. The +`createAccount` function here is a helper function in the `spl-token` library +that submits a transaction with two instructions. The first creates the account +and the second initializes the account as a Token Account. + +Like with creating a Token Mint, if we needed to build the transaction for +`createAccount` manually we could duplicate what the function is doing under the +hood: + +1. Use `getMint` to retrieve the data associated with the `mint` +2. Use `getAccountLenForMint` to calculate the space needed for the token + account +3. Use `getMinimumBalanceForRentExemption` to calculate the lamports needed for + rent exemption +4. Create a new transaction using `SystemProgram.createAccount` and + `createInitializeAccountInstruction`. Note that this `createAccount` is from + `@solana/web3.js` and used to create a generic new account. The + `createInitializeAccountInstruction` uses this new account to initialize the + new token account + +```typescript +import * as web3 from "@solana/web3.js"; +import * as token from "@solana/spl-token"; + +async function buildCreateTokenAccountTransaction( + connection: web3.Connection, + payer: web3.PublicKey, + mint: web3.PublicKey, +): Promise { + const mintState = await token.getMint(connection, mint); + const accountKeypair = await web3.Keypair.generate(); + const space = token.getAccountLenForMint(mintState); + const lamports = await connection.getMinimumBalanceForRentExemption(space); + const programId = token.TOKEN_PROGRAM_ID; + + const transaction = new web3.Transaction().add( + web3.SystemProgram.createAccount({ + fromPubkey: payer, + newAccountPubkey: accountKeypair.publicKey, + space, + lamports, + programId, + }), + token.createInitializeAccountInstruction( + accountKeypair.publicKey, + mint, + payer, + programId, + ), + ); + + return transaction; +} +``` + +#### Associated Token Accounts + +An Associated Token Account stores tokens in an address made from: + +- The owner's public key +- The token mint + +For example, Bob's USDC is stored in an Associated Token Account made from Bob's +public key, and the USDC mint address. + +Associated Token Accounts provide a deterministic way to find the Token Account +owned by a specific `publicKey` for a specific token. + +There are other ways to create token accounts (particularly for onchain +programs), but nearly all the time you want to store tokens for a user, you'll +want it to be an Associated Token Account. Even if the user doesn't already have +an ATA for that token, you can simply find the address and make the account for +them. + +![ATAs are PDAs](/assets/courses/unboxed/atas-are-pdas.svg) + +You can create an associated token account using the `spl-token` library's +`createAssociatedTokenAccount` function. + +```typescript +const associatedTokenAccount = await createAssociatedTokenAccount( + connection, + payer, + mint, + owner, +); +``` + +This function returns the `publicKey` of the new associated token account and +requires the following arguments: + +- `connection` - the JSON-RPC connection to the cluster +- `payer` - the account of the payer for the transaction +- `mint` - the token mint that the new token account is associated with +- `owner` - the account of the owner of the new token account + +You can also use `getOrCreateAssociatedTokenAccount` to get the Token Account +associated with a given address or create it if it doesn't exist. For example, +if you were writing code to airdrop tokens to a given user, you'd likely use +this function to ensure that the token account associated with the given user +gets created if it doesn't already exist. + +Under the hood, `createAssociatedTokenAccount` is doing two things: + +1. Using `getAssociatedTokenAddress` to derive the associated token account + address from the `mint` and `owner` +2. Building a transaction using instructions from + `createAssociatedTokenAccountInstruction` + +```typescript +import * as web3 from "@solana/web3.js"; +import * as token from "@solana/spl-token"; + +async function buildCreateAssociatedTokenAccountTransaction( + payer: web3.PublicKey, + mint: web3.PublicKey, +): Promise { + const associatedTokenAddress = await token.getAssociatedTokenAddress( + mint, + payer, + false, + ); + + const transaction = new web3.Transaction().add( + token.createAssociatedTokenAccountInstruction( + payer, + associatedTokenAddress, + payer, + mint, + ), + ); + + return transaction; +} +``` + +#### Mint Tokens + +Minting tokens is the process of issuing new tokens into circulation. When you +mint tokens, you increase the supply of the token mint and deposit the newly +minted tokens into a token account. Only the mint authority of a token mint is +allowed to mint new tokens. + +To mint tokens using the `spl-token` library, you can use the `mintTo` function. + +```typescript +const transactionSignature = await mintTo( + connection, + payer, + mint, + destination, + authority, + amount, +); +``` + +The `mintTo` function returns a `TransactionSignature` that can be viewed on the +Solana Explorer. The `mintTo` function requires the following arguments: + +- `connection` - the JSON-RPC connection to the cluster +- `payer` - the account of the payer for the transaction +- `mint` - the token mint that the new token account is associated with +- `destination` - the token account that tokens will be minted to +- `authority` - the account authorized to mint tokens +- `amount` - the raw amount of tokens to mint outside of decimals, e.g. if + Scrooge Coin mint's decimals property was set to 2 then to get 1 full Scrooge + Coin you would need to set this property to 100 + +It's not uncommon to update the mint authority on a token mint to null after the +tokens have been minted. This would set a maximum supply and ensure no tokens +can be minted in the future. Conversely, minting authority could be granted to a +program so tokens could be automatically minted at regular intervals or +according to programmable conditions. + +Under the hood, the `mintTo` function simply creates a transaction with the +instructions obtained from the `createMintToInstruction` function. + +```typescript +import * as web3 from "@solana/web3.js"; +import * as token from "@solana/spl-token"; + +async function buildMintToTransaction( + authority: web3.PublicKey, + mint: web3.PublicKey, + amount: number, + destination: web3.PublicKey, +): Promise { + const transaction = new web3.Transaction().add( + token.createMintToInstruction(mint, destination, authority, amount), + ); + + return transaction; +} +``` + +#### Transfer Tokens + +SPL Token transfers require both the sender and receiver to have token accounts +for the mint of the tokens being transferred. The tokens are transferred from +the sender's token account to the receiver's token account. + +You can use `getOrCreateAssociatedTokenAccount` when obtaining the receiver's +associated token account to ensure their token account exists before the +transfer. If the account doesn't exist already, this function will create it and +the payer on the transaction will be debited the lamports required for the +account creation. + +Once you know the receiver's token account address, you transfer tokens using +the `spl-token` library's `transfer` function. + +```typescript +const transactionSignature = await transfer( + connection, + payer, + source, + destination, + owner, + amount, +); +``` + +The `transfer` function returns a `TransactionSignature` that can be viewed on +the Solana Explorer. The `transfer` function requires the following arguments: + +- `connection` - the JSON-RPC connection to the cluster +- `payer` - the account of the payer for the transaction +- `source` - the token account sending tokens +- `destination` - the token account receiving tokens +- `owner` - the account of the owner of the `source` token account +- `amount` - the number of tokens to transfer + +Under the hood, the `transfer` function simply creates a transaction with the +instructions obtained from the `createTransferInstruction` function: + +```typescript +import * as web3 from "@solana/web3.js"; +import * as token from "@solana/spl-token"; + +async function buildTransferTransaction( + source: web3.PublicKey, + destination: web3.PublicKey, + owner: web3.PublicKey, + amount: number, +): Promise { + const transaction = new web3.Transaction().add( + token.createTransferInstruction(source, destination, owner, amount), + ); + + return transaction; +} +``` + +### Lab + +We're going to use the Token Program to create a Token Mint, create an +Associated Token Account, mint tokens, transfer tokens, and burn tokens. + +Assuming you already have a `.env` file with a `SECRET_KEY` setup per +[Cryptography fundamentals](/developers/courses/intro-to-solana/intro-to-cryptography). + +```bash +npm i @solana/web3.js@1 @solana/spl-token @solana-developers/helpers@2 esrun +``` + +#### Create the Token Mint + +Create an empty file called `create-token-mint.ts`. After loading our keypairs, +we'll call `createMint()`, setting our `user` as the `payer`, `mintAuthority`, +and `freezeAuthority`. + +Think of the token mint as the factory that makes tokens. Our `user`, as the +`mintAuthority` is the person that runs the factory. + +```typescript title="create-token-mint.ts" +import { createMint } from "@solana/spl-token"; +import "dotenv/config"; +import { + getKeypairFromEnvironment, + getExplorerLink, +} from "@solana-developers/helpers"; +import { Connection, clusterApiUrl } from "@solana/web3.js"; + +const connection = new Connection(clusterApiUrl("devnet")); + +const user = getKeypairFromEnvironment("SECRET_KEY"); + +console.log( + `🔑 Loaded our keypair securely, using an env file! Our public key is: ${user.publicKey.toBase58()}`, +); + +// This is a shortcut that runs: +// SystemProgram.createAccount() +// token.createInitializeMintInstruction() +// See https://www.soldev.app/course/token-program +const tokenMint = await createMint(connection, user, user.publicKey, null, 2); + +const link = getExplorerLink("address", tokenMint.toString(), "devnet"); + +console.log(`✅ Finished! Created token mint: ${link}`); +``` + +Run the script using `npx esrun create-token-mint.ts`. You should see + +```bash +✅ Finished! Created token mint: https://explorer.solana.com/address/HYeUCAqdsQBkqQNHRoBPov42QySDhwM7zAqiorToosbz?cluster=devnet +``` + +Open up Solana Explorer and look at your new token! + +Remember the address of the mint! We'll use this later. + +#### Make some token metadata + +You'll notice our token account does not have a pretty symbol and shows up as +'Unknown Token' in Explorer. That's because our token has no metadata! Let's add +some. + +We'll use the Metaplex `mpl-token-metadata` Program, version 2. This is the most +popular version of `mpl-token-metadata` and saves significant complexity +compared to the newer version 3. + +```bash +npm i @metaplex-foundation/mpl-token-metadata@2 +``` + +Create a new file called `create-token-metadata.ts` + +```typescript title="create-token-metadata.ts" +// This uses "@metaplex-foundation/mpl-token-metadata@2" to create tokens +import "dotenv/config"; +import { + getKeypairFromEnvironment, + getExplorerLink, +} from "@solana-developers/helpers"; +import { + Connection, + clusterApiUrl, + PublicKey, + Transaction, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { createCreateMetadataAccountV3Instruction } from "@metaplex-foundation/mpl-token-metadata"; + +const user = getKeypairFromEnvironment("SECRET_KEY"); + +const connection = new Connection(clusterApiUrl("devnet")); + +console.log( + `🔑 We've loaded our keypair securely, using an env file! Our public key is: ${user.publicKey.toBase58()}`, +); + +const TOKEN_METADATA_PROGRAM_ID = new PublicKey( + "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", +); + +// Substitute in your token mint account +const tokenMintAccount = new PublicKey("YOUR_TOKEN_MINT_ADDRESS_HERE"); + +const metadataData = { + name: "Solana Training Token", + symbol: "TRAINING", + // Arweave / IPFS / Pinata etc link using metaplex standard for offchain data + uri: "https://arweave.net/1234", + sellerFeeBasisPoints: 0, + creators: null, + collection: null, + uses: null, +}; + +const metadataPDAAndBump = PublicKey.findProgramAddressSync( + [ + Buffer.from("metadata"), + TOKEN_METADATA_PROGRAM_ID.toBuffer(), + tokenMintAccount.toBuffer(), + ], + TOKEN_METADATA_PROGRAM_ID, +); + +const metadataPDA = metadataPDAAndBump[0]; + +const transaction = new Transaction(); + +const createMetadataAccountInstruction = + createCreateMetadataAccountV3Instruction( + { + metadata: metadataPDA, + mint: tokenMintAccount, + mintAuthority: user.publicKey, + payer: user.publicKey, + updateAuthority: user.publicKey, + }, + { + createMetadataAccountArgsV3: { + collectionDetails: null, + data: metadataData, + isMutable: true, + }, + }, + ); + +transaction.add(createMetadataAccountInstruction); + +const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [user], +); + +const transactionLink = getExplorerLink( + "transaction", + transactionSignature, + "devnet", +); + +console.log(`✅ Transaction confirmed, explorer link is: ${transactionLink}`); + +const tokenMintLink = getExplorerLink( + "address", + tokenMintAccount.toString(), + "devnet", +); + +console.log(`✅ Look at the token mint again: ${tokenMintLink}`); +``` + +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your address of the mint and run the +script using `npx esrun create-token-metadata.ts`. + +You'll now see Solana Explorer is updated, showing the token's name and symbol +on the mint! + +Note that Solana Explorer will display a warning like: + +> Warning! Token names and logos are not unique. This token may have spoofed its +> name and logo to look like another token. Verify the token's mint address to +> ensure it is correct. + +This warning is accurate - indeed anyone can make any token have any symbol or +name they like. However for your reference, if you are making an original token +that becomes very well known, Solana Explorer uses a whitelist based on the +[Unified Token List API](https://github.com/solflare-wallet/utl-api). + +#### Create an Associated Token Account to store the tokens + +Now that we've created the mint, let's create a new Associated Token Account so +that someone can store our tokens. This Associated Token Account could be for +our wallet (if we, as the token mint authority, want to mint tokens to our +address) or anyone else we know with a devnet wallet! + +Create an empty file called `create-token-account.ts`. Then use +`getOrCreateAssociatedTokenAccount()` to get an associated token account based +on a wallet and our mint address, making the account if it needs to. + +Remember to substitute in your token mint address below! + +```typescript title="create-token-account.ts" +import { getOrCreateAssociatedTokenAccount } from "@solana/spl-token"; +import "dotenv/config"; +import { + getExplorerLink, + getKeypairFromEnvironment, +} from "@solana-developers/helpers"; +import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; +const connection = new Connection(clusterApiUrl("devnet")); + +const user = getKeypairFromEnvironment("SECRET_KEY"); + +console.log( + `🔑 Loaded our keypair securely, using an env file! Our public key is: ${user.publicKey.toBase58()}`, +); + +// Substitute in your token mint account from create-token-mint.ts +const tokenMintAccount = new PublicKey("YOUR_TOKEN_MINT"); + +// Here we are making an associated token account for our own address, but we can +// make an ATA on any other wallet in devnet! +// const recipient = new PublicKey("SOMEONE_ELSES_DEVNET_ADDRESS"); +const recipient = user.publicKey; + +const tokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + user, + tokenMintAccount, + recipient, +); + +console.log(`Token Account: ${tokenAccount.address.toBase58()}`); + +const link = getExplorerLink( + "address", + tokenAccount.address.toBase58(), + "devnet", +); + +console.log(`✅ Created token Account: ${link}`); +``` + +Run the script using `npx esrun create-token-account.ts`. You should see: + +```bash +✅ Success! Created token account: https://explorer.solana.com/address/CTjoLdEeK8rk4YWYW9ZqACyjHexbYKH3hEoagHxLVEFs?cluster=devnet +``` + +Open the token account in Solana Explorer. Look at the owner - it's the account +you made the ATA for! The balance will be zero, as we haven't sent any tokens +there yet. Let's mint some tokens there and fix that! + +Remember the address of your token account ! We'll use it to mint tokens. + +#### Mint Tokens + +Now that we have a token mint and a token account, let's mint tokens to the +token account. Recall that we set the `user` as the `mintAuthority` for the +`mint` we created. + +Create an empty file called `mint-tokens.ts`. Then uses the `spl-token` function +`mintTo()` to mint tokens. Remember to substitute in your token mint address and +token account address below! + +```typescript title="mint-tokens.ts" +import { mintTo } from "@solana/spl-token"; +import "dotenv/config"; +import { + getExplorerLink, + getKeypairFromEnvironment, +} from "@solana-developers/helpers"; +import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; +const connection = new Connection(clusterApiUrl("devnet")); + +// Our token has two decimal places +const MINOR_UNITS_PER_MAJOR_UNITS = Math.pow(10, 2); + +const user = getKeypairFromEnvironment("SECRET_KEY"); + +// Substitute in your token mint account from create-token-mint.ts +const tokenMintAccount = new PublicKey("YOUR_TOKEN_MINT_ACCOUNT"); + +// Substitute in your own, or a friend's token account address, based on the previous step. +const recipientAssociatedTokenAccount = new PublicKey( + "RECIPIENT_TOKEN_ACCOUNT", +); + +const transactionSignature = await mintTo( + connection, + user, + tokenMintAccount, + recipientAssociatedTokenAccount, + user, + 10 * MINOR_UNITS_PER_MAJOR_UNITS, +); + +const link = getExplorerLink("transaction", transactionSignature, "devnet"); + +console.log(`✅ Success! Mint Token Transaction: ${link}`); +``` + +Run the script using `npx esrun mint-tokens.ts`. You should see: + +```bash +✅ Success! Mint Token Transaction: https://explorer.solana.com/tx/36U9ELyJ2VAZSkeJKj64vUh9cEzVKWznESyqFCJ92sj1KgKwrFH5iwQsYmjRQDUN2uVhcbW8AVDsNaiNuPZ7n9m4?cluster=devnet +``` + +Open Explorer, and see the transaction and the new tokens in the recipient's +account! + +#### Transfer Tokens + +Next, let's transfer some of the tokens we just minted using the `spl-token` +library's `transfer` function. You can +[add a second account on devnet](/developers/courses/intro-to-solana/intro-to-cryptography) +if you like, or find a friend who has a devnet account and send them your token! + +As you saw in Explorer, the tokens currently reside in an Associated Token +Account attached to our wallet. We don't have to remember the address for our +associated token account - we can just look it up using +`getOrCreateAssociatedTokenAccount()` and provide our wallet address and the +mint of the token we want to send. Likewise, we can find (or make) an ATA for +our recipient to hold this token too. + +Create an empty file called `transfer-tokens.ts`. Then replace +`YOUR_RECIPIENT_HERE` with your recipient public key and replace +`YOUR_TOKEN_MINT_ADDRESS_HERE` with your token mint address. + +```typescript title="transfer-tokens.ts" +import "dotenv/config"; +import { + getExplorerLink, + getKeypairFromEnvironment, +} from "@solana-developers/helpers"; +import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; +import { getOrCreateAssociatedTokenAccount, transfer } from "@solana/spl-token"; +const connection = new Connection(clusterApiUrl("devnet")); + +const sender = getKeypairFromEnvironment("SECRET_KEY"); + +console.log( + `🔑 Loaded our keypair securely, using an env file! Our public key is: ${sender.publicKey.toBase58()}`, +); + +// Add the recipient public key here. +const recipient = new PublicKey("YOUR_RECIPIENT_HERE"); + +// Substitute in your token mint account +const tokenMintAccount = new PublicKey("YOUR_TOKEN_MINT_ADDRESS_HERE"); + +// Our token has two decimal places +const MINOR_UNITS_PER_MAJOR_UNITS = Math.pow(10, 2); + +console.log(`💸 Attempting to send 1 token to ${recipient.toBase58()}...`); + +// Get or create the source token account to store this token +const sourceTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + sender, + tokenMintAccount, + sender.publicKey, +); + +// Get or create the destination token account to store this token +const destinationTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + sender, + tokenMintAccount, + recipient, +); + +// Transfer the tokens +const signature = await transfer( + connection, + sender, + sourceTokenAccount.address, + destinationTokenAccount.address, + sender, + 1 * MINOR_UNITS_PER_MAJOR_UNITS, +); + +const explorerLink = getExplorerLink("transaction", signature, "devnet"); + +console.log(`✅ Transaction confirmed, explorer link is: ${explorerLink}`); +``` + +Run the script using `npx esrun transfer-tokens.ts`. You should see: + +```bash +✅ Transaction confirmed, explorer link is: https://explorer.solana.com/tx/SgV2j2DkaErYf7ERiB11USoZzGqAk8HPEqVJLP8HWdz9M61FSFgyEMXJycHQtfCooCAPBom7Vi3akEAwSUHQUsu?cluster=devnet +``` + +Open the Explorer link. You see your balance go down, and the recipient's +balance go up! + +### Challenge + +Now it's your turn to build something independently. Create an application that +allows a user to create a new mint, create a token account, and mint tokens. + +To interact with the Token Program using the wallet adapter, you will have to +build each transaction and then submit the transaction to the wallet app for +approval. + +![Token Program Challenge Frontend](/assets/courses/unboxed/token-program-frontend.png) + +1. You can build this from scratch or you can + [download the starter code](https://github.com/Unboxed-Software/solana-token-frontend/tree/starter). +2. Create a new Token Mint in the `CreateMint` component. If you need a + refresher on how to send transactions to a wallet for approval, have a look + at the + [Wallets lesson](/developers/courses/intro-to-solana/interact-with-wallets). + +When creating a new mint, the newly generated `Keypair` will also have to sign +the transaction. When additional signers are required in addition to the +connected wallet, use the following format: + +```typescript +sendTransaction(transaction, connection, { + signers: [Keypair], +}); +``` + +3. Create a new Token Account in the `CreateTokenAccount` component. +4. Mint tokens in the `MintToForm` component. + +If you get stumped, feel free to reference the +[solution code](https://github.com/ZYJLiu/solana-token-frontend). + +And remember, get creative with these challenges and make them your own! + + + +### Completed the lab? + +Push your code to GitHub and +[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=72cab3b8-984b-4b09-a341-86800167cfc7)! + + diff --git a/content/docs/advanced/actions.mdx b/content/docs/advanced/actions.mdx new file mode 100644 index 000000000..5f9150a2e --- /dev/null +++ b/content/docs/advanced/actions.mdx @@ -0,0 +1,1236 @@ +--- +title: Actions and Blinks +seoTitle: "Actions and Blinks" +description: + "Solana Actions are APIs that return transactions for users to preview and + sign. Blockchain links – or blinks – turn Actions into a shareable, + metadata-rich link." +altRoutes: + - /docs/actions + - /docs/blinks + - /docs/advanced/blinks +--- + +[Solana Actions](#actions) are specification-compliant APIs that return +transactions on the Solana blockchain to be previewed, signed, and sent across a +number of various contexts, including QR codes, buttons + widgets, and websites +across the internet. Actions make it simple for developers to integrate the +things you can do throughout the Solana ecosystem right into your environment, +allowing you to perform blockchain transactions without needing to navigate away +to a different app or webpage. + +[Blockchain links](#blinks) – or blinks – turn any Solana Action into a +shareable, metadata-rich link. Blinks allow Action-aware clients (browser +extension wallets, bots) to display additional capabilities for the user. On a +website, a blink might immediately trigger a transaction preview in a wallet +without going to a decentralized app; in Discord, a bot might expand the blink +into an interactive set of buttons. This pushes the ability to interact on-chain +to any web surface capable of displaying a URL. + +## Get Started + +To quickly get started with creating custom Solana Actions: + +```shell +npm install @solana/actions +``` + +- install the + [Solana Actions SDK](https://www.npmjs.com/package/@solana/actions) in your + application +- build an API endpoint for the [GET request](#get-request) that returns the + metadata about your Action +- create an API endpoint that accepts the [POST request](#post-request) and + returns the signable transaction for the user + +> Checkout this video tutorial on +> [how to build a Solana Action](https://www.youtube.com/watch?v=kCht01Ycif0) +> using the `@solana/actions` SDK. +> +> You can also find the +> [source code for an Action](https://github.com/solana-developers/solana-actions/blob/main/examples/next-js/src/app/api/actions/transfer-sol/route.ts) +> that performs a native SOL transfer here and several other example Actions in +> [this repo](https://github.com/solana-developers/solana-actions/tree/main/examples). + +When deploying your custom Solana Actions to production: + +- ensure your application has a valid [actions.json file](#actionsjson) at the + root of your domain +- ensure your application responds with the + [required Cross-Origin headers](#options-response) on all Action endpoints, + including the `actions.json` file +- test and debug your blinks/actions using the + [Blinks Inspector](https://www.blinks.xyz/inspector) + +If you are looking for inspiration around building Actions and blinks, checkout +the [Awesome Blinks](https://github.com/solana-developers/awesome-blinks) +repository for some community creations and even +[ideas for new ones](https://github.com/solana-developers/awesome-blinks/discussions/categories/ideas-for-blinks). + +## Actions + +The Solana Actions specification uses a set of standard APIs to deliver signable +transactions (and eventually signable messages) from an application directly to +a user. They are hosted at publicly accessible URLs and are therefore accessible +by their URL for any client to interact with. + +> You can think of Actions as a API endpoint that will return metadata and +> something for a user to sign (either a transaction or a authentication +> message) with their blockchain wallet. + +The Actions API consists of making simple `GET` and `POST` requests to an +Action's URL endpoint and handling the responses that conform to the Actions +interface. + +1. the [GET request](#get-request) returns metadata that provides human-readable + information to the client about what actions are available at this URL, and + an optional list of related actions. +2. the [POST request](#post-request) returns a signable transaction or message + that the client then prompts the user's wallet to sign and execute on the + blockchain or in another offchain service. + +### Action Execution and Lifecycle + +In practice, interacting with Actions closely resembles interacting with a +typical REST API: + +- a client makes the initial `GET` request to an Action URL in order to fetch + metadata about the Actions available +- the endpoint returns a response that include metadata about the endpoint (like + the application's title and icon) and a listing of the available actions for + this endpoint +- the client application (like a mobile wallet, chat bot, or website) displays a + UI for the user to perform one of the actions +- after the user selects an action (by clicking a button), the client makes a + `POST` request to the endpoint in order to get the transaction for the user to + sign +- the wallet facilitates the user signing the transaction and ultimately sends + the transaction to the blockchain for confirmation + +![Solana Actions Execution and Lifecycle](/assets/docs/action-execution-and-lifecycle.png) + +When receiving transactions from an Actions URL, clients should handle +submission of these transactions to the blockchain and manage their state +lifecycle. + +Actions also support some level of invalidation before execution. The `GET` and +`POST` request may return some metadata that states whether the action is +capable of be taken (like with the `disabled` field). + +For example, if there was an Action endpoint that facilitates voting on a DAO +governance proposal whose voting window has closed, the initial +[GET request](#get-request) may return the error message "This proposal is no +longer up for a vote" and the "Vote Yes" and "Vote No" buttons as "disabled". + +## Blinks + +Blinks (blockchain links) are client applications that introspect Action APIs +and construct user interfaces around interacting with and executing Actions. + +Client applications that support blinks simply detect Action-compatible URLs, +parse them, and allow users to interact with them in standardized user +interfaces. + +> Any client application that fully introspects an Actions API to build a +> complete interface for it is a _blink_. Therefore, not all clients that +> consume Actions APIs are blinks. + +### Blink URL Specification + +A blink URL describes a client application that enables a user to complete the +full [lifecycle of executing an Action](#action-execution-and-lifecycle), +including signing with their wallet. + +```text +https://example.domain/?action= +``` + +For any client application to become a blink: + +- The blink URL must contain a query parameter of `action` whose value is a + URL-encoded [Action URL](#url-scheme). This value must be + [URL-encoded](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent) + to not conflict with any other protocol parameters. + +- The client application must + [URL-decode](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent) + the `action` query parameter and introspect the Action API link provided (see + [Action URL scheme](#url-scheme)). + +- The client must render a rich user interface that enables a user to complete + the full [lifecycle of executing an Action](#action-execution-and-lifecycle), + including signing with their wallet. + +> Not all blink client applications (e.g. websites or dApps) will support all +> Actions. Application developers may chose which Actions they want to support +> within their blink interfaces. + +The following example demonstrates a valid blink URL with an `action` value of +`solana-action:https://actions.alice.com/donate` that is URL encoded: + +```text +https://example.domain/?action=solana-action%3Ahttps%3A%2F%2Factions.alice.com%2Fdonate +``` + +### Detecting Actions via Blinks + +Blinks may be linked to Actions in at least 3 ways: + +1. Sharing an explicit Action URL: + `solana-action:https://actions.alice.com/donate` + + In this case, only supported clients may render the blink. There will be no + fallback link preview, or site that may be visited outside of the + non-supporting client. + +2. Sharing a link to a website that is linked to an Actions API via an + [`actions.json` file](#actionsjson) on the website's domain root. + + For example, `https://alice.com/actions.json` maps + `https://alice.com/donate`, a website URL at which users can donate to Alice, + to API URL `https://actions.alice.com/donate`, at which Actions for donating + to Alice are hosted. + +3. Embedding an Action URL in an “interstitial” site URL that understands how to + parse Actions. + + ```text + https://example.domain/?action= + ``` + +Clients that support blinks should be able to take any of the above formats and +correctly render an interface to facilitate executing the action directly in the +client. + +For clients that do not support blinks, there should be an underlying website +(making the browser become the universal fallback). + +If a user taps anywhere on a client that is not an action button or text input +field, they should be taken to the underlying site. + +### Blink Testing and Verification + +While Solana Actions and blinks are a permissionless protocol/specification, +client applications and wallets are still required to ultimately facilitate +users to sign the transaction. + +> Use the [Blinks Inspector](https://www.blinks.xyz/inspector) tool to inspect, +> debug, and test your blinks and actions directly in your browser. You can view +> the GET and POST response payloads, response headers, and test all inputs to +> each of your linked Actions. + +Each client application or wallets may have different requirements on which +Action endpoints their clients will automatically unfurl and immediately display +to their users on social media platforms. + +For example, some clients may operate on an "allow list" approach that may +require verification prior to their client unfurling an Action for users such as +Dialect's Actions Registry (detailed below). + +All blinks will still render and allow for signing on Dialect's +[dial.to](https://dial.to) blinks Interstitial site, with their registry status +displayed in the blink. + +### Dialect's Actions Registry + +As a public good for the Solana ecosystem, [Dialect](https://dialect.to) +maintains a public registry — together with the help of Solana Foundation and +other community members — of blockchain links that have are from pre-verified +from known sources. As of launch, only Actions that have been registered in the +Dialect registry will unfurl in the Twitter feed when posted. + +Client applications and wallets can freely choose to use this public registry or +another solution to help ensure user security and safety. If not verified +through the Dialect registry, the blockchain link will not be touched by the +blink client, and will be rendered as a typical URL. + +Developers can apply to be verified by Dialect here: +[dial.to/register](https://dial.to/register) + +## Specification + +The Solana Actions specification consists of key sections that are part of a +request/response interaction flow: + +- Solana Action [URL scheme](#url-scheme) providing an Action URL +- [OPTIONS response](#options-response) to an Action URL to pass CORS + requirements +- [GET request](#get-request) to an Action URL +- [GET response](#get-response) from the server +- [POST request](#post-request) to an Action URL +- [POST response](#post-response) from the server + +Each of these requests are made by the _Action client_ (e.g. wallet app, browser +extension, dApp, website, etc) to gather specific metadata for rich user +interfaces and to facilitate user input to the Actions API. + +Each of the responses are crafted by an application (e.g. website, server +backend, etc) and returned to the _Action client_. Ultimately, providing a +signable transaction or message for a wallet to prompt the user to approve, +sign, and send to the blockchain. + +> The types and interfaces declared within this readme files are often the +> simplified version of the types to aid in readability. +> +> For better type safety and improved developer experience, the +> `@solana/actions-spec` package contains more complex type definitions. You can +> find the +> [source code for them here](https://github.com/solana-developers/solana-actions/blob/main/packages/actions-spec/index.d.ts). + +### URL Scheme + +A Solana Action URL describes an interactive request for a signable Solana +transaction or message using the `solana-action` protocol. + +The request is interactive because the parameters in the URL are used by a +client to make a series of standardized HTTP requests to compose a signable +transaction or message for the user to sign with their wallet. + +```text +solana-action: +``` + +- A single `link` field is required as the pathname. The value must be a + conditionally + [URL-encoded](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/encodeURIComponent) + absolute HTTPS URL. + +- If the URL contains query parameters, it must be URL-encoded. URL-encoding the + value prevents conflicting with any Actions protocol parameters, which may be + added via the protocol specification. + +- If the URL does not contain query parameters, it should not be URL-encoded. + This produces a shorter URL and a less dense QR code. + +In either case, clients must +[URL-decode](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent) +the value. This has no effect if the value isn't URL-encoded. If the decoded +value is not an absolute HTTPS URL, the wallet must reject it as **malformed**. + +### OPTIONS response + +In order to allow Cross-Origin Resource Sharing +([CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS)) within Actions +clients (including blinks), all Action endpoints should respond to HTTP requests +for the `OPTIONS` method with valid headers that will allow clients to pass CORS +checks for all subsequent requests from their same origin domain. + +An Actions client may perform +"[preflight](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#preflighted_requests)" +requests to the Action URL endpoint in order check if the subsequent GET request +to the Action URL will pass all CORS checks. These CORS preflight checks are +made using the `OPTIONS` HTTP method and should respond with all required HTTP +headers that will allow Action clients (like blinks) to properly make all +subsequent requests from their origin domain. + +At a minimum, the required HTTP headers include: + +- `Access-Control-Allow-Origin` with a value of `*` + - this ensures all Action clients can safely pass CORS checks in order to make + all required requests +- `Access-Control-Allow-Methods` with a value of `GET,POST,PUT,OPTIONS` + - ensures all required HTTP request methods are supported for Actions +- `Access-Control-Allow-Headers` with a minimum value of + `Content-Type, Authorization, Content-Encoding, Accept-Encoding` + +For simplicity, developers should consider returning the same response and +headers to `OPTIONS` requests as their [`GET` response](#get-response). + + + +The `actions.json` file response must also return valid Cross-Origin headers for +`GET` and `OPTIONS` requests, specifically the `Access-Control-Allow-Origin` +header value of `*`. + +See [actions.json](#actionsjson) below for more details. + + + +### GET Request + +The Action client (e.g. wallet, browser extension, etc) should make an HTTP +`GET` JSON request to the Action's URL endpoint. + +- The request should not identify the wallet or the user. +- The client should make the request with an + [`Accept-Encoding` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding). +- The client should display the domain of the URL as the request is being made. + +### GET Response + +The Action's URL endpoint (e.g. application or server backend) should respond +with an HTTP `OK` JSON response (with a valid payload in the body) or an +appropriate HTTP error. + +- The client must handle HTTP + [client errors](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#client_error_responses), + [server errors](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#server_error_responses), + and + [redirect responses](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#redirection_messages). +- The endpoint should respond with a + [`Content-Encoding` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding) + for HTTP compression. +- The endpoint should respond with a + [`Content-Type` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type) + of `application/json`. + +- The client should not cache the response except as instructed by + [HTTP caching](https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching#controlling_caching) + response headers. +- The client should display the `title` and render the `icon` image to user. + +Error responses (i.e. HTTP 4xx and 5xx status codes) should return a JSON +response body following `ActionError` to present a helpful error message to +users. See [Action Errors](#action-errors). + +#### GET Response Body + +A `GET` response with an HTTP `OK` JSON response should include a body payload +that follows the interface specification: + +```ts title="ActionGetResponse" +export type ActionType = "action" | "completed"; + +export type ActionGetResponse = Action<"action">; + +export interface Action { + /** type of Action to present to the user */ + type: T; + /** image url that represents the source of the action request */ + icon: string; + /** describes the source of the action request */ + title: string; + /** brief summary of the action to be performed */ + description: string; + /** button text rendered to the user */ + label: string; + /** UI state for the button being rendered to the user */ + disabled?: boolean; + links?: { + /** list of related Actions a user could perform */ + actions: LinkedAction[]; + }; + /** non-fatal error message to be displayed to the user */ + error?: ActionError; +} +``` + +- `type` - The type of action being given to the user. Defaults to `action`. The + initial `ActionGetResponse` is required to have a type of `action`. + + - `action` - Standard action that will allow the user to interact with any of + the `LinkedActions` + - `completed` - Used to declare the "completed" state within action chaining. + +- `icon` - The value must be an absolute HTTP or HTTPS URL of an icon image. The + file must be an SVG, PNG, or WebP image, or the client/wallet must reject it + as **malformed**. + +- `title` - The value must be a UTF-8 string that represents the source of the + action request. For example, this might be the name of a brand, store, + application, or person making the request. + +- `description` - The value must be a UTF-8 string that provides information on + the action. The description should be displayed to the user. + +- `label` - The value must be a UTF-8 string that will be rendered on a button + for the user to click. All labels should not exceed 5 word phrases and should + start with a verb to solidify the action you want the user to take. For + example, "Mint NFT", "Vote Yes", or "Stake 1 SOL". + +- `disabled` - The value must be boolean to represent the disabled state of the + rendered button (which displays the `label` string). If no value is provided, + `disabled` should default to `false` (i.e. enabled by default). For example, + if the action endpoint is for a governance vote that has closed, set + `disabled=true` and the `label` could be "Vote Closed". + +- `error` - An optional error indication for non-fatal errors. If present, the + client should display it to the user. If set, it should not prevent the client + from interpreting the action or displaying it to the user (see + [Action Errors](#action-errors)). For example, the error can be used together + with `disabled` to display a reason like business constraints, authorization, + the state, or an error of external resource. + +- `links.actions` - An optional array of related actions for the endpoint. Users + should be displayed UI for each of the listed actions and expected to only + perform one. For example, a governance vote action endpoint may return three + options for the user: "Vote Yes", "Vote No", and "Abstain from Vote". + + - If no `links.actions` is provided, the client should render a single button + using the root `label` string and make the POST request to the same action + URL endpoint as the initial GET request. + + - If any `links.actions` are provided, the client should only render buttons + and input fields based on the items listed in the `links.actions` field. The + client should not render a button for the contents of the root `label`. + +```ts title="LinkedAction" +export interface LinkedAction { + /** URL endpoint for an action */ + href: string; + /** button text rendered to the user */ + label: string; + /** + * Parameters to accept user input within an action + * @see {ActionParameter} + * @see {ActionParameterSelectable} + */ + parameters?: Array; +} +``` + +The `ActionParameter` allows declaring what input the Action API is requesting +from the user: + +```ts title="ActionParameter" +/** + * Parameter to accept user input within an action + * note: for ease of reading, this is a simplified type of the actual + */ +export interface ActionParameter { + /** input field type */ + type?: ActionParameterType; + /** parameter name in url */ + name: string; + /** placeholder text for the user input field */ + label?: string; + /** declare if this field is required (defaults to `false`) */ + required?: boolean; + /** regular expression pattern to validate user input client side */ + pattern?: string; + /** human-readable description of the `type` and/or `pattern`, represents a caption and error, if value doesn't match */ + patternDescription?: string; + /** the minimum value allowed based on the `type` */ + min?: string | number; + /** the maximum value allowed based on the `type` */ + max?: string | number; +} +``` + +The `pattern` should be a string equivalent of a valid regular expression. This +regular expression pattern should by used by blink-clients to validate user +input before making the POST request. If the `pattern` is not a valid regular +expression, it should be ignored by clients. + +The `patternDescription` is a human readable description of the expected input +requests from the user. If `pattern` is provided, the `patternDescription` is +required to be provided. + +The `min` and `max` values allows the input to set a lower and/or upper bounds +of the input requested from the user (i.e. min/max number and or min/max +character length), and should be used for client side validation. For input +`type`s of `date` or `datetime-local`, these values should be a string dates. +For other string based input `type`s, the values should be numbers representing +their min/max character length. + +If the user input value is not considered valid per the `pattern`, the user +should receive a client side error message indicating the input field is not +valid and displayed the `patternDescription` string. + +The `type` field allows the Action API to declare more specific user input +fields, providing better client side validation and improving the user +experience. In many cases, this type will resemble the standard +[HTML input element](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input). + +The `ActionParameterType` can be simplified to the following type: + +```ts title="ActionParameterType" +/** + * Input field type to present to the user + * @default `text` + */ +export type ActionParameterType = + | "text" + | "email" + | "url" + | "number" + | "date" + | "datetime-local" + | "checkbox" + | "radio" + | "textarea" + | "select"; +``` + +Each of the `type` values should normally result in a user input field that +resembles a standard HTML `input` element of the corresponding `type` (i.e. +``) to provide better client side validation and user +experience: + +- `text` - equivalent of HTML + [“text” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/text) + element +- `email` - equivalent of HTML + [“email” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/email) + element +- `url` - equivalent of HTML + [“url” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/url) + element +- `number` - equivalent of HTML + [“number” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/number) + element +- `date` - equivalent of HTML + [“date” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/date) + element +- `datetime-local` - equivalent of HTML + [“datetime-local” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/datetime-local) + element +- `checkbox` - equivalent to a grouping of standard HTML + [“checkbox” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/checkbox) + elements. The Action API should return `options` as detailed below. The user + should be able to select multiple of the provided checkbox options. +- `radio` - equivalent to a grouping of standard HTML + [“radio” input](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/radio) + elements. The Action API should return `options` as detailed below. The user + should be able to select only one of the provided radio options. +- Other HTML input type equivalents not specified above (`hidden`, `button`, + `submit`, `file`, etc) are not supported at this time. + +In addition to the elements resembling HTML input types above, the following +user input elements are also supported: + +- `textarea` - equivalent of HTML + [textarea element](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/textarea). + Allowing the user to provide multi-line input. +- `select` - equivalent of HTML + [select element](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/select), + allowing the user to experience a “dropdown” style field. The Action API + should return `options` as detailed below. + +When `type` is set as `select`, `checkbox`, or `radio` then the Action API +should include an array of `options` that each provide a `label` and `value` at +a minimum. Each option may also have a `selected` value to inform the +blink-client which of the options should be selected by default for the user +(see `checkbox` and `radio` for differences). + +This `ActionParameterSelectable` can be simplified to the following type +definition: + +```ts title="ActionParameterSelectable" +/** + * note: for ease of reading, this is a simplified type of the actual + */ +interface ActionParameterSelectable extends ActionParameter { + options: Array<{ + /** displayed UI label of this selectable option */ + label: string; + /** value of this selectable option */ + value: string; + /** whether or not this option should be selected by default */ + selected?: boolean; + }>; +} +``` + +If no `type` is set or an unknown/unsupported value is set, blink-clients should +default to `text` and render a simple text input. + +The Action API is still responsible to validate and sanitize all data from the +user input parameters, enforcing any “required” user input as necessary. + +For platforms other that HTML/web based ones (like native mobile), the +equivalent native user input component should be used to achieve the equivalent +experience and client side validation as the HTML/web input types described +above. + +#### Example GET Response + +The following example response provides a single "root" action that is expected +to be presented to the user a single button with a label of "Claim Access +Token": + +```json +{ + "title": "HackerHouse Events", + "icon": "", + "description": "Claim your Hackerhouse access token.", + "label": "Claim Access Token" // button text +} +``` + +The following example response provides 3 related action links that allow the +user to click one of 3 buttons to cast their vote for a DAO proposal: + +```json +{ + "title": "Realms DAO Platform", + "icon": "", + "description": "Vote on DAO governance proposals #1234.", + "label": "Vote", + "links": { + "actions": [ + { + "label": "Vote Yes", // button text + "href": "/api/proposal/1234/vote?choice=yes" + }, + { + "label": "Vote No", // button text + "href": "/api/proposal/1234/vote?choice=no" + }, + { + "label": "Abstain from Vote", // button text + "href": "/api/proposal/1234/vote?choice=abstain" + } + ] + } +} +``` + +#### Example GET Response with Parameters + +The following examples response demonstrate how to accept text input from the +user (via `parameters`) and include that input in the final `POST` request +endpoint (via the `href` field within a `LinkedAction`): + +The following example response provides the user with 3 linked actions to stake +SOL: a button labeled "Stake 1 SOL", another button labeled "Stake 5 SOL", and a +text input field that allows the user to enter a specific "amount" value that +will be sent to the Action API: + +```json +{ + "title": "Stake-o-matic", + "icon": "", + "description": "Stake SOL to help secure the Solana network.", + "label": "Stake SOL", // not displayed since `links.actions` are provided + "links": { + "actions": [ + { + "label": "Stake 1 SOL", // button text + "href": "/api/stake?amount=1" + // no `parameters` therefore not a text input field + }, + { + "label": "Stake 5 SOL", // button text + "href": "/api/stake?amount=5" + // no `parameters` therefore not a text input field + }, + { + "label": "Stake", // button text + "href": "/api/stake?amount={amount}", + "parameters": [ + { + "name": "amount", // field name + "label": "SOL amount" // text input placeholder + } + ] + } + ] + } +} +``` + +The following example response provides a single input field for the user to +enter an `amount` which is sent with the POST request (either as a query +parameter or a subpath can be used): + +```json +{ + "icon": "", + "label": "Donate SOL", + "title": "Donate to GoodCause Charity", + "description": "Help support this charity by donating SOL.", + "links": { + "actions": [ + { + "label": "Donate", // button text + "href": "/api/donate/{amount}", // or /api/donate?amount={amount} + "parameters": [ + // {amount} input field + { + "name": "amount", // input field name + "label": "SOL amount" // text input placeholder + } + ] + } + ] + } +} +``` + +### POST Request + +The client must make an HTTP `POST` JSON request to the action URL with a body +payload of: + +```json +{ + "account": "" +} +``` + +- `account` - The value must be the base58-encoded public key of an account that + may sign the transaction. + +The client should make the request with an +[Accept-Encoding header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding) +and the application may respond with a +[Content-Encoding header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding) +for HTTP compression. + +The client should display the domain of the action URL as the request is being +made. If a `GET` request was made, the client should also display the `title` +and render the `icon` image from that GET response. + +### POST Response + +The Action's `POST` endpoint should respond with an HTTP `OK` JSON response +(with a valid payload in the body) or an appropriate HTTP error. + +- The client must handle HTTP + [client errors](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#client_error_responses), + [server errors](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#server_error_responses), + and + [redirect responses](https://developer.mozilla.org/en-US/docs/Web/HTTP/Status#redirection_messages). +- The endpoint should respond with a + [`Content-Type` header](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type) + of `application/json`. + +Error responses (i.e. HTTP 4xx and 5xx status codes) should return a JSON +response body following `ActionError` to present a helpful error message to +users. See [Action Errors](#action-errors). + +#### POST Response Body + +A `POST` response with an HTTP `OK` JSON response should include a body payload +of: + +```ts title="ActionPostResponse" +/** + * Response body payload returned from the Action POST Request + */ +export interface ActionPostResponse { + /** base64 encoded serialized transaction */ + transaction: string; + /** describes the nature of the transaction */ + message?: string; + links?: { + /** + * The next action in a successive chain of actions to be obtained after + * the previous was successful. + */ + next: NextActionLink; + }; +} +``` + +- `transaction` - The value must be a base64-encoded + [serialized transaction](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html#serialize). + The client must base64-decode the transaction and + [deserialize it](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html#from). + +- `message` - The value must be a UTF-8 string that describes the nature of the + transaction included in the response. The client should display this value to + the user. For example, this might be the name of an item being purchased, a + discount applied to a purchase, or a thank you note. + +- `links.next` - An optional value use to "chain" multiple Actions together in + series. After the included `transaction` has been confirmed on-chain, the + client can fetch and render the next action. See + [Action Chaining](#action-chaining) for more details. + +- The client and application should allow additional fields in the request body + and response body, which may be added by future specification updates. + +> The application may respond with a partially or fully signed transaction. The +> client and wallet must validate the transaction as **untrusted**. + +#### POST Response - Transaction + +If the transaction +[`signatures`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html#signatures) +are empty or the transaction has NOT been partially signed: + +- The client must ignore the + [`feePayer`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html#feePayer) + in the transaction and set the `feePayer` to the `account` in the request. +- The client must ignore the + [`recentBlockhash`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html#recentBlockhash) + in the transaction and set the `recentBlockhash` to the + [latest blockhash](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html#getLatestBlockhash). +- The client must serialize and deserialize the transaction before signing it. + This ensures consistent ordering of the account keys, as a workaround for + [this issue](https://github.com/solana-labs/solana/issues/21722). + +If the transaction has been partially signed: + +- The client must NOT alter the + [`feePayer`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html#feePayer) + or + [`recentBlockhash`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html#recentBlockhash) + as this would invalidate any existing signatures. +- The client must verify existing signatures, and if any are invalid, the client + must reject the transaction as **malformed**. + +The client must only sign the transaction with the `account` in the request, and +must do so only if a signature for the `account` in the request is expected. + +If any signature except a signature for the `account` in the request is +expected, the client must reject the transaction as **malicious**. + +### Action Errors + +Actions APIs should return errors using `ActionError` in order to present +helpful error messages to the user. Depending on the context, this error could +be fatal or non-fatal. + +```ts title="ActionError" +export interface ActionError { + /** simple error message to be displayed to the user */ + message: string; +} +``` + +When an Actions API responds with an HTTP error status code (i.e. 4xx and 5xx), +the response body should be a JSON payload following `ActionError`. The error is +considered fatal and the included `message` should be presented to the user. + +For API responses that support the optional `error` attribute (like +[`ActionGetResponse`](#get-response)), the error is considered non-fatal and the +included `message` should be presented to the user. + +## Action Chaining + +Solana Actions can be "chained" together in a successive series. After an +Action's transaction is confirmed on-chain, the next action can be obtained and +presented to the user. + +Action chaining allows developers to build more complex and dynamic experiences +within blinks, including: + +- providing multiple transactions (and eventually sign message) to a user +- customized action metadata based on the user's wallet address +- refreshing the blink metadata after a successful transaction +- receive an API callback with the transaction signature for additional + validation and logic on the Action API server +- customized "success" messages by updating the displayed metadata (e.g. a new + image and description) + +To chain multiple actions together, in any `ActionPostResponse` include a +`links.next` of either: + +- `PostNextActionLink` - POST request link with a same origin callback url to + receive the `signature` and user's `account` in the body. This callback url + should respond with a `NextAction`. +- `InlineNextActionLink` - Inline metadata for the next action to be presented + to the user immediately after the transaction has confirmed. No callback will + be made. + +```ts +export type NextActionLink = PostNextActionLink | InlineNextActionLink; + +/** @see {NextActionPostRequest} */ +export interface PostNextActionLink { + /** Indicates the type of the link. */ + type: "post"; + /** Relative or same origin URL to which the POST request should be made. */ + href: string; +} + +/** + * Represents an inline next action embedded within the current context. + */ +export interface InlineNextActionLink { + /** Indicates the type of the link. */ + type: "inline"; + /** The next action to be performed */ + action: NextAction; +} +``` + +### NextAction + +After the `ActionPostResponse` included `transaction` is signed by the user and +confirmed on-chain, the blink client should either: + +- execute the callback request to fetch and display the `NextAction`, or +- if a `NextAction` is already provided via `links.next`, the blink client + should update the displayed metadata and make no callback request + +If the callback url is not the same origin as the initial POST request, no +callback request should be made. Blink clients should display an error notifying +the user. + +```ts title="NextAction" +/** The next action to be performed */ +export type NextAction = Action<"action"> | CompletedAction; + +/** The completed action, used to declare the "completed" state within action chaining. */ +export type CompletedAction = Omit, "links">; +``` + +Based on the `type`, the next action should be presented to the user via blink +clients in one of the following ways: + +- `action` - (default) A standard action that will allow the user to see the + included Action metadata, interact with the provided `LinkedActions`, and + continue to chain any following actions. + +- `completed` - The terminal state of an action chain that can update the blink + UI with the included Action metadata, but will not allow the user to execute + further actions. + +If `links.next` is not provided, blink clients should assume the current action +is final action in the chain, presenting their "completed" UI state after the +transaction is confirmed. + +## actions.json + +The purpose of the [`actions.json` file](#actionsjson) allows an application to +instruct clients on what website URLs support Solana Actions and provide a +mapping that can be used to perform [GET requests](#get-request) to an Actions +API server. + + + +The `actions.json` file response must also return valid Cross-Origin headers for +`GET` and `OPTIONS` requests, specifically the `Access-Control-Allow-Origin` +header value of `*`. + +See [OPTIONS response](#options-response) above for more details. + + + +The `actions.json` file should be stored and universally accessible at the root +of the domain. + +For example, if your web application is deployed to `my-site.com` then the +`actions.json` file should be accessible at `https://my-site.com/actions.json`. +This file should also be Cross-Origin accessible via any browser by having a +`Access-Control-Allow-Origin` header value of `*`. + +### Rules + +The `rules` field allows the application to map a set of a website's relative +route paths to a set of other paths. + +**Type:** `Array` of `ActionRuleObject`. + +```ts title="ActionRuleObject" +interface ActionRuleObject { + /** relative (preferred) or absolute path to perform the rule mapping from */ + pathPattern: string; + /** relative (preferred) or absolute path that supports Action requests */ + apiPath: string; +} +``` + +- [`pathPattern`](#rules-pathpattern) - A pattern that matches each incoming + pathname. + +- [`apiPath`](#rules-apipath) - A location destination defined as an absolute + pathname or external URL. + +#### Rules - pathPattern + +A pattern that matches each incoming pathname. It can be an absolute or relative +path and supports the following formats: + +- **Exact Match**: Matches the exact URL path. + + - Example: `/exact-path` + - Example: `https://website.com/exact-path` + +- **Wildcard Match**: Uses wildcards to match any sequence of characters in the + URL path. This can match single (using `*`) or multiple segments (using `**`). + (see [Path Matching](#rules-path-matching) below). + + - Example: `/trade/*` will match `/trade/123` and `/trade/abc`, capturing only + the first segment after `/trade/`. + - Example: `/category/*/item/**` will match `/category/123/item/456` and + `/category/abc/item/def`. + - Example: `/api/actions/trade/*/confirm` will match + `/api/actions/trade/123/confirm`. + +#### Rules - apiPath + +The destination path for the action request. It can be defined as an absolute +pathname or an external URL. + +- Example: `/api/exact-path` +- Example: `https://api.example.com/v1/donate/*` +- Example: `/api/category/*/item/*` +- Example: `/api/swap/**` + +#### Rules - Query Parameters + +Query parameters from the original URL are always preserved and appended to the +mapped URL. + +#### Rules - Path Matching + +The following table outlines the syntax for path matching patterns: + +| Operator | Matches | +| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `*` | A single path segment, not including the surrounding path separator / characters. | +| `**` | Matches zero or more characters, including any path separator / characters between multiple path segments. If other operators are included, the `**` operator must be the last operator. | +| `?` | Unsupported pattern. | + +### Rules Examples + +The following example demonstrates an exact match rule to map requests to `/buy` +from your site's root to the exact path `/api/buy` relative to your site's root: + +```json title="actions.json" +{ + "rules": [ + { + "pathPattern": "/buy", + "apiPath": "/api/buy" + } + ] +} +``` + +The following example uses wildcard path matching to map requests to any path +(excluding subdirectories) under `/actions/` from your site's root to a +corresponding path under `/api/actions/` relative to your site's root: + +```json title="actions.json" +{ + "rules": [ + { + "pathPattern": "/actions/*", + "apiPath": "/api/actions/*" + } + ] +} +``` + +The following example uses wildcard path matching to map requests to any path +(excluding subdirectories) under `/donate/` from your site's root to a +corresponding absolute path `https://api.dialect.com/api/v1/donate/` on an +external site: + +```json title="actions.json" +{ + "rules": [ + { + "pathPattern": "/donate/*", + "apiPath": "https://api.dialect.com/api/v1/donate/*" + } + ] +} +``` + +The following example uses wildcard path matching for an idempotent rule to map +requests to any path (including subdirectories) under `/api/actions/` from your +site's root to itself: + +> Idempotent rules allow blink clients to more easily determine if a given path +> supports Action API requests without having to be prefixed with the +> `solana-action:` URI or performing additional response testing. + +```json title="actions.json" +{ + "rules": [ + { + "pathPattern": "/api/actions/**", + "apiPath": "/api/actions/**" + } + ] +} +``` + +## Action Identity + +Action endpoints may include an _Action Identity_ in the transactions that are +returned in their [POST response](#post-response) for the user to sign. This +allows indexers and analytics platforms to easily and verifiably attribute +on-chain activity to a specific Action Provider (i.e. service) in a verifiable +way. + +The [Action Identity](#action-identity) is a keypair used to sign a specially +formatted message that is included in transaction using a Memo instruction. This +_Identifier Message_ can be verifiably attributed to a specific Action Identity, +and therefore attribute transactions to a specific Action Provider. + +The keypair is not required to sign the transaction itself. This allows wallets +and applications to improve transaction deliverability when no other signatures +are on the transaction returned to a user (see +[POST response transaction](#post-response-transaction)). + +If an Action Provider's use case requires their backend services to pre-sign the +transaction before the user does, they should use this keypair as their Action +Identity. This will allow one less account be included in the transaction, +lowering the total transactions size by 32-bytes. + +### Action Identifier Message + +The Action Identifier Message is a colon separate UTF-8 string included in a +transaction using a single [SPL Memo](https://spl.solana.com/memo) instruction. + +```shell +protocol:identity:reference:signature +``` + +- `protocol` - The value of the protocol being used (set to `solana-action` per + the [URL Scheme](#url-scheme) above) +- `identity` - The value must be the base58-encoded public key address of the + Action Identity keypair +- `reference` - The value must be base58-encoded 32-byte array. This may or may + not be public keys, on or off the curve, and may or may not correspond with + accounts on Solana. +- `signature` - base58-encoded signature created from the Action Identity + keypair signing only the `reference` value. + +The `reference` value must be used only once and in a single transaction. For +the purpose of associating transactions with an Action Provider, only the first +usage of the `reference` value is considered valid. + +Transactions may have multiple Memo instructions. When performing a +[`getSignaturesForAddress`](/docs/rpc/http/getsignaturesforaddress), +the results `memo` field will return each memo instruction's message as a single +string with each separated by a semi-colon. + +No other data should be included with Identifier Message's Memo instruction. + +The `identity` and the `reference` should be included as read-only, non-signer +[keys](https://solana-labs.github.io/solana-web3.js/v1.x/classes/TransactionInstruction.html#keys) +in the transaction on an instruction that is NOT the Identifier Message Memo +instruction. + +The Identifier Message Memo instruction must have zero accounts provided. If any +accounts are provided, the Memo program requires theses accounts to be valid +signers. For the purposes of identifying actions, this restricts flexibility and +can degrade the user experience. Therefore it is considered an anti-pattern and +must be avoided. + +### Action Identity Verification + +Any transaction that includes the `identity` account can be verifiably +associated with the Action Provider in a multi-step process: + +1. Get all the transactions for a given `identity`. +2. Parse and verify each transaction's memo string, ensuring the `signature` is + valid for the `reference` stored. +3. Verify the specific transaction is the first on-chain occurrence of the + `reference` on-chain: + - If this transaction is the first occurrence, the transaction is considered + verified and can be safely attributed to the Action Provider. + - If this transaction is NOT the first occurrence, it is considered invalid + and therefore not attributed to the Action Provider. + +Because Solana validators index transactions by the account keys, the +[`getSignaturesForAddress`](/docs/rpc/http/getsignaturesforaddress) +RPC method can be used locate all transactions including the `identity` account. + +This RPC method's response includes all the Memo data in the `memo` field. If +multiple Memo instructions were used in the transaction, each memo message will +be included in this `memo` field and must be parsed accordingly by the verifier +to obtain the _Identity Verification Message_. + +These transactions should be initially considered **UNVERIFIED**. This is due to +the `identity` not being required to sign the transaction which allows any +transaction to include this account as a non-signer. Potentially artificially +inflating attribution and usage counts. + +The Identity Verification Message should be checked to ensure the `signature` +was created by the `identity` signing the `reference`. If this signature +verification fails, the transaction is invalid and should be attributed to the +Action Provider. + +If the signature verification is successful, the verifier should ensure this +transaction is the first on-chain occurrence of the `reference`. If it is not, +the transaction is considered invalid. diff --git a/content/docs/advanced/confirmation.mdx b/content/docs/advanced/confirmation.mdx new file mode 100644 index 000000000..9a91956fd --- /dev/null +++ b/content/docs/advanced/confirmation.mdx @@ -0,0 +1,401 @@ +--- +title: Confirmation & Expiration +seoTitle: "Transaction Confirmation & Expiration" +description: + "Understand how Solana transaction confirmation and when a transaction expires + (including recent blockhash checks)." +altRoutes: + - /docs/advanced + - /docs/core/transactions/confirmation +h1: Transaction Confirmation & Expiration +--- + +Problems relating to +[transaction confirmation](/docs/terminology#transaction-confirmations) are +common with many newer developers while building applications. This article aims +to boost the overall understanding of the confirmation mechanism used on the +Solana blockchain, including some recommended best practices. + +## Brief background on transactions + +Before diving into how Solana transaction confirmation and expiration works, +let's briefly set the base understanding of a few things: + +- what a transaction is +- the lifecycle of a transaction +- what a blockhash is +- and a brief understanding of Proof of History (PoH) and how it relates to + blockhashes + +### What is a transaction? + +Transactions consist of two components: a +[message](/docs/terminology#message) and a +[list of signatures](/docs/terminology#signature). The transaction message is +where the magic happens and at a high level it consists of four components: + +- a **header** with metadata about the transaction, +- a **list of instructions** to invoke, +- a **list of accounts** to load, and +- a **“recent blockhash.”** + +In this article, we're going to be focusing a lot on a transaction's +[recent blockhash](/docs/terminology#blockhash) because it plays a big role +in transaction confirmation. + +### Transaction lifecycle refresher + +Below is a high level view of the lifecycle of a transaction. This article will +touch on everything except steps 1 and 4. + +1. Create a header and a list of instructions along with the list of accounts + that instructions need to read and write +2. Fetch a recent blockhash and use it to prepare a transaction message +3. Simulate the transaction to ensure it behaves as expected +4. Prompt user to sign the prepared transaction message with their private key +5. Send the transaction to an RPC node which attempts to forward it to the + current block producer +6. Hope that a block producer validates and commits the transaction into their + produced block +7. Confirm the transaction has either been included in a block or detect when it + has expired + +### What is a Blockhash? + +A [“blockhash”](/docs/terminology#blockhash) refers to the last Proof of +History (PoH) hash for a [“slot”](/docs/terminology#slot) (description +below). Since Solana uses PoH as a trusted clock, a transaction's recent +blockhash can be thought of as a **timestamp**. + +### Proof of History refresher + +Solana's Proof of History mechanism uses a very long chain of recursive SHA-256 +hashes to build a trusted clock. The “history” part of the name comes from the +fact that block producers hash transaction id's into the stream to record which +transactions were processed in their block. + +[PoH hash calculation](https://github.com/anza-xyz/agave/blob/aa0922d6845e119ba466f88497e8209d1c82febc/entry/src/poh.rs#L79): +`next_hash = hash(prev_hash, hash(transaction_ids))` + +PoH can be used as a trusted clock because each hash must be produced +sequentially. Each produced block contains a blockhash and a list of hash +checkpoints called “ticks” so that validators can verify the full chain of +hashes in parallel and prove that some amount of time has actually passed. + +## Transaction Expiration + +By default, all Solana transactions will expire if not committed to a block in a +certain amount of time. The **vast majority** of transaction confirmation issues +are related to how RPC nodes and validators detect and handle **expired** +transactions. A solid understanding of how transaction expiration works should +help you diagnose the bulk of your transaction confirmation issues. + +### How does transaction expiration work? + +Each transaction includes a “recent blockhash” which is used as a PoH clock +timestamp and expires when that blockhash is no longer “recent enough”. + +As each block is finalized (i.e. the maximum tick height +[is reached](https://github.com/anza-xyz/agave/blob/0588ecc6121ba026c65600d117066dbdfaf63444/runtime/src/bank.rs#L3269-L3271), +reaching the "block boundary"), the final hash of the block is added to the +`BlockhashQueue` which stores a maximum of the +[300 most recent blockhashes](https://github.com/anza-xyz/agave/blob/e0b0bcc80380da34bb63364cc393801af1e1057f/sdk/program/src/clock.rs#L123-L126). +During transaction processing, Solana Validators will check if each +transaction's recent blockhash is recorded within the most recent 151 stored +hashes (aka "max processing age"). If the transaction's recent blockhash is +[older than this](https://github.com/anza-xyz/agave/blob/cb2fd2b632f16a43eff0c27af7458e4e97512e31/runtime/src/bank.rs#L3570-L3571) +max processing age, the transaction is not processed. + +> Due to the current +> [max processing age of 150](https://github.com/anza-xyz/agave/blob/cb2fd2b632f16a43eff0c27af7458e4e97512e31/sdk/program/src/clock.rs#L129-L131) +> and the "age" of a blockhash in the queue being +> [0-indexed](https://github.com/anza-xyz/agave/blob/992a398fe8ea29ec4f04d081ceef7664960206f4/accounts-db/src/blockhash_queue.rs#L248-L274), +> there are actually 151 blockhashes that are considered "recent enough" and +> valid for processing. + +Since [slots](/docs/terminology#slot) (aka the time period a validator can +produce a block) are configured to last about +[400ms](https://github.com/anza-xyz/agave/blob/cb2fd2b632f16a43eff0c27af7458e4e97512e31/sdk/program/src/clock.rs#L107-L109), +but may fluctuate between 400ms and 600ms, a given blockhash can only be used by +transactions for about 60 to 90 seconds before it will be considered expired by +the runtime. + +### Example of transaction expiration + +Let's walk through a quick example: + +1. A validator is actively producing a new block for the current slot +2. The validator receives a transaction from a user with the recent blockhash + `abcd...` +3. The validator checks this blockhash `abcd...` against the list of recent + blockhashes in the `BlockhashQueue` and discovers that it was created 151 + blocks ago +4. Since it is exactly 151 blockhashes old, the transaction has not expired yet + and can still be processed! +5. But wait: before actually processing the transaction, the validator finished + creating the next block and added it to the `BlockhashQueue`. The validator + then starts producing the block for the next slot (validators get to produce + blocks for 4 consecutive slots) +6. The validator checks that same transaction again and finds it is now 152 + blockhashes old and rejects it because it's too old :( + +## Why do transactions expire? + +There's a very good reason for this actually, it's to help validators avoid +processing the same transaction twice. + +A naive brute force approach to prevent double processing could be to check +every new transaction against the blockchain's entire transaction history. But +by having transactions expire after a short amount of time, validators only need +to check if a new transaction is in a relatively small set of _recently_ +processed transactions. + +### Other blockchains + +Solana's approach to prevent double processing is quite different from other +blockchains. For example, Ethereum tracks a counter (nonce) for each transaction +sender and will only process transactions that use the next valid nonce. + +Ethereum's approach is simple for validators to implement, but it can be +problematic for users. Many people have encountered situations when their +Ethereum transactions got stuck in a _pending_ state for a long time and all the +later transactions, which used higher nonce values, were blocked from +processing. + +### Advantages on Solana + +There are a few advantages to Solana's approach: + +1. A single fee payer can submit multiple transactions at the same time that are + allowed to be processed in any order. This might happen if you're using + multiple applications at the same time. +2. If a transaction doesn't get committed to a block and expires, users can try + again knowing that their previous transaction will NOT ever be processed. + +By not using counters, the Solana wallet experience may be easier for users to +understand because they can get to success, failure, or expiration states +quickly and avoid annoying pending states. + +### Disadvantages on Solana + +Of course there are some disadvantages too: + +1. Validators have to actively track a set of all processed transaction id's to + prevent double processing. +2. If the expiration time period is too short, users might not be able to submit + their transaction before it expires. + +These disadvantages highlight a tradeoff in how transaction expiration is +configured. If the expiration time of a transaction is increased, validators +need to use more memory to track more transactions. If expiration time is +decreased, users don't have enough time to submit their transaction. + +Currently, Solana clusters require that transactions use blockhashes that are no +more than 151 blocks old. + +> This [GitHub issue](https://github.com/solana-labs/solana/issues/23582) +> contains some calculations that estimate that mainnet-beta validators need +> about 150MB of memory to track transactions. This could be slimmed down in the +> future if necessary without decreasing expiration time as are detailed in that +> issue. + +## Transaction confirmation tips + +As mentioned before, blockhashes expire after a time period of only 151 blocks +which can pass as quickly as **one minute** when slots are processed within the +target time of 400ms. + +One minute is not a lot of time considering that a client needs to fetch a +recent blockhash, wait for the user to sign, and finally hope that the +broadcasted transaction reaches a leader that is willing to accept it. Let's go +through some tips to help avoid confirmation failures due to transaction +expiration! + +### Fetch blockhashes with the appropriate commitment level + +Given the short expiration time frame, it's imperative that clients and +applications help users create transactions with a blockhash that is as recent +as possible. + +When fetching blockhashes, the current recommended RPC API is called +[`getLatestBlockhash`](/docs/rpc/http/getlatestblockhash). By default, this +API uses the `finalized` commitment level to return the most recently finalized +block's blockhash. However, you can override this behavior by +[setting the `commitment` parameter](/docs/rpc/#configuring-state-commitment) +to a different commitment level. + +**Recommendation** + +The `confirmed` commitment level should almost always be used for RPC requests +because it's usually only a few slots behind the `processed` commitment and has +a very low chance of belonging to a dropped +[fork](https://docs.anza.xyz/consensus/fork-generation). + +But feel free to consider the other options: + +- Choosing `processed` will let you fetch the most recent blockhash compared to + other commitment levels and therefore gives you the most time to prepare and + process a transaction. But due to the prevalence of forking in the Solana + blockchain, roughly 5% of blocks don't end up being finalized by the cluster + so there's a real chance that your transaction uses a blockhash that belongs + to a dropped fork. Transactions that use blockhashes for abandoned blocks + won't ever be considered recent by any blocks that are in the finalized + blockchain. +- Using the [default commitment](/docs/rpc#default-commitment) level `finalized` + will eliminate any risk that the blockhash you choose will belong to a dropped + fork. The tradeoff is that there is typically at least a 32 slot difference + between the most recent confirmed block and the most recent finalized block. + This tradeoff is pretty severe and effectively reduces the expiration of your + transactions by about 13 seconds but this could be even more during unstable + cluster conditions. + +### Use an appropriate preflight commitment level + +If your transaction uses a blockhash that was fetched from one RPC node then you +send, or simulate, that transaction with a different RPC node, you could run +into issues due to one node lagging behind the other. + +When RPC nodes receive a `sendTransaction` request, they will attempt to +determine the expiration block of your transaction using the most recent +finalized block or with the block selected by the `preflightCommitment` +parameter. A **VERY** common issue is that a received transaction's blockhash +was produced after the block used to calculate the expiration for that +transaction. If an RPC node can't determine when your transaction expires, it +will only forward your transaction **one time** and afterwards will then +**drop** the transaction. + +Similarly, when RPC nodes receive a `simulateTransaction` request, they will +simulate your transaction using the most recent finalized block or with the +block selected by the `preflightCommitment` parameter. If the block chosen for +simulation is older than the block used for your transaction's blockhash, the +simulation will fail with the dreaded “blockhash not found” error. + +**Recommendation** + +Even if you use `skipPreflight`, **ALWAYS** set the `preflightCommitment` +parameter to the same commitment level used to fetch your transaction's +blockhash for both `sendTransaction` and `simulateTransaction` requests. + +### Be wary of lagging RPC nodes when sending transactions + +When your application uses an RPC pool service or when the RPC endpoint differs +between creating a transaction and sending a transaction, you need to be wary of +situations where one RPC node is lagging behind the other. For example, if you +fetch a transaction blockhash from one RPC node then you send that transaction +to a second RPC node for forwarding or simulation, the second RPC node might be +lagging behind the first. + +**Recommendation** + +For `sendTransaction` requests, clients should keep resending a transaction to a +RPC node on a frequent interval so that if an RPC node is slightly lagging +behind the cluster, it will eventually catch up and detect your transaction's +expiration properly. + +For `simulateTransaction` requests, clients should use the +[`replaceRecentBlockhash`](/docs/rpc/http/simulatetransaction) parameter to +tell the RPC node to replace the simulated transaction's blockhash with a +blockhash that will always be valid for simulation. + +### Avoid reusing stale blockhashes + +Even if your application has fetched a very recent blockhash, be sure that +you're not reusing that blockhash in transactions for too long. The ideal +scenario is that a recent blockhash is fetched right before a user signs their +transaction. + +**Recommendation for applications** + +Poll for new recent blockhashes on a frequent basis to ensure that whenever a +user triggers an action that creates a transaction, your application already has +a fresh blockhash that's ready to go. + +**Recommendation for wallets** + +Poll for new recent blockhashes on a frequent basis and replace a transaction's +recent blockhash right before they sign the transaction to ensure the blockhash +is as fresh as possible. + +### Use healthy RPC nodes when fetching blockhashes + +By fetching the latest blockhash with the `confirmed` commitment level from an +RPC node, it's going to respond with the blockhash for the latest confirmed +block that it's aware of. Solana's block propagation protocol prioritizes +sending blocks to staked nodes so RPC nodes naturally lag about a block behind +the rest of the cluster. They also have to do more work to handle application +requests and can lag a lot more under heavy user traffic. + +Lagging RPC nodes can therefore respond to +[`getLatestBlockhash`](/docs/rpc/http/getlatestblockhash) requests with +blockhashes that were confirmed by the cluster quite awhile ago. By default, a +lagging RPC node detects that it is more than 150 slots behind the cluster will +stop responding to requests, but just before hitting that threshold they can +still return a blockhash that is just about to expire. + +**Recommendation** + +Monitor the health of your RPC nodes to ensure that they have an up-to-date view +of the cluster state with one of the following methods: + +1. Fetch your RPC node's highest processed slot by using the + [`getSlot`](/docs/rpc/http/getslot) RPC API with the `processed` + commitment level and then call the + [`getMaxShredInsertSlot`](/docs/rpc/http/getmaxshredinsertslot) RPC API + to get the highest slot that your RPC node has received a “shred” of a block + for. If the difference between these responses is very large, the cluster is + producing blocks far ahead of what the RPC node has processed. +2. Call the `getLatestBlockhash` RPC API with the `confirmed` commitment level + on a few different RPC API nodes and use the blockhash from the node that + returns the highest slot for its + [context slot](/docs/rpc/#rpcresponse-structure). + +### Wait long enough for expiration + +**Recommendation** + +When calling the [`getLatestBlockhash`](/docs/rpc/http/getlatestblockhash) +RPC API to get a recent blockhash for your transaction, take note of the +`lastValidBlockHeight` in the response. + +Then, poll the [`getBlockHeight`](/docs/rpc/http/getblockheight) RPC API +with the `confirmed` commitment level until it returns a block height greater +than the previously returned last valid block height. + +### Consider using “durable” transactions + +Sometimes transaction expiration issues are really hard to avoid (e.g. offline +signing, cluster instability). If the previous tips are still not sufficient for +your use-case, you can switch to using durable transactions (they just require a +bit of setup). + +To start using durable transactions, a user first needs to submit a transaction +that +[invokes instructions that create a special on-chain “nonce” account](https://docs.rs/solana-program/latest/solana_program/system_instruction/fn.create_nonce_account.html) +and stores a “durable blockhash” inside of it. At any point in the future (as +long as the nonce account hasn't been used yet), the user can create a durable +transaction by following these 2 rules: + +1. The instruction list must start with an + [“advance nonce” system instruction](https://docs.rs/solana-program/latest/solana_program/system_instruction/fn.advance_nonce_account.html) + which loads their on-chain nonce account +2. The transaction's blockhash must be equal to the durable blockhash stored by + the on-chain nonce account + +Here's how these durable transactions are processed by the Solana runtime: + +1. If the transaction's blockhash is no longer “recent”, the runtime checks if + the transaction's instruction list begins with an “advance nonce” system + instruction +2. If so, it then loads the nonce account specified by the “advance nonce” + instruction +3. Then it checks that the stored durable blockhash matches the transaction's + blockhash +4. Lastly it makes sure to advance the nonce account's stored blockhash to the + latest recent blockhash to ensure that the same transaction can never be + processed again + +For more details about how these durable transactions work, you can read the +[original proposal](https://docs.anza.xyz/implemented-proposals/durable-tx-nonces) +and +[check out an example](/developers/guides/advanced/introduction-to-durable-nonces) +in the Solana docs. diff --git a/content/docs/advanced/lookup-tables.mdx b/content/docs/advanced/lookup-tables.mdx new file mode 100644 index 000000000..2b8a67c18 --- /dev/null +++ b/content/docs/advanced/lookup-tables.mdx @@ -0,0 +1,191 @@ +--- +title: Address Lookup Tables +description: + Learn how to use Solana Address Lookup Tables (ALTs) to efficiently handle up + to 64 addresses per transaction. Create, extend, and utilize lookup tables + using web3.js. +--- + +Address Lookup Tables, commonly referred to as "_lookup tables_" or "_ALTs_" for +short, allow developers to create a collection of related addresses to +efficiently load more addresses in a single transaction. + +Since each transaction on the Solana blockchain requires a listing of every +address that is interacted with as part of the transaction, this listing would +effectively be capped at 32 addresses per transaction. With the help of +[Address Lookup Tables](/docs/advanced/lookup-tables), a transaction would +now be able to raise that limit to 64 addresses per transaction. + +## Compressing onchain addresses + +After all the desired addresses have been stored onchain in an Address Lookup +Table, each address can be referenced inside a transaction by its 1-byte index +within the table (instead of their full 32-byte address). This lookup method +effectively "_compresses_" a 32-byte address into a 1-byte index value. + +This "_compression_" enables storing up to 256 addresses in a single lookup +table for use inside any given transaction. + +## Versioned Transactions + +To utilize an Address Lookup Table inside a transaction, developers must use v0 +transactions that were introduced with the new +[Versioned Transaction format](/docs/advanced/versions). + +## How to create an address lookup table + +Creating a new lookup table with the `@solana/web3.js` library is similar to the +older `legacy` transactions, but with some differences. + +Using the `@solana/web3.js` library, you can use the +[`createLookupTable`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/AddressLookupTableProgram.html#createLookupTable) +function to construct the instruction needed to create a new lookup table, as +well as determine its address: + +```js +const web3 = require("@solana/web3.js"); + +// connect to a cluster and get the current `slot` +const connection = new web3.Connection(web3.clusterApiUrl("devnet")); +const slot = await connection.getSlot(); + +// Assumption: +// `payer` is a valid `Keypair` with enough SOL to pay for the execution + +const [lookupTableInst, lookupTableAddress] = + web3.AddressLookupTableProgram.createLookupTable({ + authority: payer.publicKey, + payer: payer.publicKey, + recentSlot: slot, + }); + +console.log("lookup table address:", lookupTableAddress.toBase58()); + +// To create the Address Lookup Table onchain: +// send the `lookupTableInst` instruction in a transaction +``` + +> NOTE: Address lookup tables can be **created** with either a `v0` transaction +> or a `legacy` transaction. But the Solana runtime can only retrieve and handle +> the additional addresses within a lookup table while using +> [v0 Versioned Transactions](/docs/advanced/versions#current-transaction-versions). + +## Add addresses to a lookup table + +Adding addresses to a lookup table is known as "_extending_". Using the +`@solana/web3.js` library, you can create a new _extend_ instruction using the +[`extendLookupTable`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/AddressLookupTableProgram.html#extendLookupTable) +method: + +```js +// add addresses to the `lookupTableAddress` table via an `extend` instruction +const extendInstruction = web3.AddressLookupTableProgram.extendLookupTable({ + payer: payer.publicKey, + authority: payer.publicKey, + lookupTable: lookupTableAddress, + addresses: [ + payer.publicKey, + web3.SystemProgram.programId, + // list more `publicKey` addresses here + ], +}); + +// Send this `extendInstruction` in a transaction to the cluster +// to insert the listing of `addresses` into your lookup table with address `lookupTableAddress` +``` + +> NOTE: Due to the same memory limits of `legacy` transactions, any transaction +> used to _extend_ an Address Lookup Table is also limited in how many addresses +> can be added at a time. Because of this, you will need to use multiple +> transactions to _extend_ any table with more addresses (~20) that can fit +> within a single transaction's memory limits. + +Once these addresses have been inserted into the table, and stored onchain, you +will be able to utilize the Address Lookup Table in future transactions. +Enabling up to 64 addresses in those future transactions. + +## Fetch an Address Lookup Table + +Similar to requesting another account (or PDA) from the cluster, you can fetch a +complete Address Lookup Table with the +[`getAddressLookupTable`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html#getAddressLookupTable) +method: + +```js +// define the `PublicKey` of the lookup table to fetch +const lookupTableAddress = new web3.PublicKey(""); + +// get the table from the cluster +const lookupTableAccount = ( + await connection.getAddressLookupTable(lookupTableAddress) +).value; + +// `lookupTableAccount` will now be a `AddressLookupTableAccount` object + +console.log("Table address from cluster:", lookupTableAccount.key.toBase58()); +``` + +Our `lookupTableAccount` variable will now be a `AddressLookupTableAccount` +object which we can parse to read the listing of all the addresses stored on +chain in the lookup table: + +```js +// loop through and parse all the addresses stored in the table +for (let i = 0; i < lookupTableAccount.state.addresses.length; i++) { + const address = lookupTableAccount.state.addresses[i]; + console.log(i, address.toBase58()); +} +``` + +## How to use an address lookup table in a transaction + +After you have created your lookup table, and stored your needed address on +chain (via extending the lookup table), you can create a `v0` transaction to +utilize the onchain lookup capabilities. + +Just like older `legacy` transactions, you can create all the +[instructions](/docs/terminology#instruction) your transaction will execute +onchain. You can then provide an array of these instructions to the +[Message](/docs/terminology#message) used in the `v0` transaction. + +> NOTE: The instructions used inside a `v0` transaction can be constructed using +> the same methods and functions used to create the instructions in the past. +> There is no required change to the instructions used involving an Address +> Lookup Table. + +```js +// Assumptions: +// - `arrayOfInstructions` has been created as an `array` of `TransactionInstruction` +// - we are using the `lookupTableAccount` obtained above + +// construct a v0 compatible transaction `Message` +const messageV0 = new web3.TransactionMessage({ + payerKey: payer.publicKey, + recentBlockhash: blockhash, + instructions: arrayOfInstructions, // note this is an array of instructions +}).compileToV0Message([lookupTableAccount]); + +// create a v0 transaction from the v0 message +const transactionV0 = new web3.VersionedTransaction(messageV0); + +// sign the v0 transaction using the file system wallet we created named `payer` +transactionV0.sign([payer]); + +// send and confirm the transaction +// (NOTE: There is NOT an array of Signers here; see the note below...) +const txid = await web3.sendAndConfirmTransaction(connection, transactionV0); + +console.log( + `Transaction: https://explorer.solana.com/tx/${txid}?cluster=devnet`, +); +``` + +> NOTE: When sending a `VersionedTransaction` to the cluster, it must be signed +> BEFORE calling the `sendAndConfirmTransaction` method. If you pass an array of +> `Signer` (like with `legacy` transactions) the method will trigger an error! + +## More Resources + +- Read the [proposal](https://docs.anza.xyz/proposals/versioned-transactions) + for Address Lookup Tables and Versioned transactions +- [Example Rust program using Address Lookup Tables](https://github.com/TeamRaccoons/address-lookup-table-multi-swap) diff --git a/content/docs/advanced/meta.json b/content/docs/advanced/meta.json new file mode 100644 index 000000000..697759c87 --- /dev/null +++ b/content/docs/advanced/meta.json @@ -0,0 +1,12 @@ +{ + "title": "Advanced Topics", + "pages": [ + "confirmation", + "retry", + "versions", + "lookup-tables", + "state-compression", + "actions" + ], + "defaultOpen": true +} diff --git a/content/docs/advanced/retry.mdx b/content/docs/advanced/retry.mdx new file mode 100644 index 000000000..a88a8351b --- /dev/null +++ b/content/docs/advanced/retry.mdx @@ -0,0 +1,328 @@ +--- +title: Retrying Transactions +altRoutes: + - /docs/core/transactions/retry +description: + Learn how to handle dropped transactions and implement custom retry logic on + Solana. This guide covers transaction rebroadcasting, preflight checks, and + best practices for managing transaction retries to ensure reliable transaction + processing on the Solana blockchain. +--- + +# Retrying Transactions + +On some occasions, a seemingly valid transaction may be dropped before it is +included in a block. This most often occurs during periods of network +congestion, when an RPC node fails to rebroadcast the transaction to the +[leader](/docs/terminology#leader). To an end-user, it may appear as if their +transaction disappears entirely. While RPC nodes are equipped with a generic +rebroadcasting algorithm, application developers are also capable of developing +their own custom rebroadcasting logic. + +## TLDR; + +- RPC nodes will attempt to rebroadcast transactions using a generic algorithm +- Application developers can implement their own custom rebroadcasting logic +- Developers should take advantage of the `maxRetries` parameter on the + `sendTransaction` JSON-RPC method +- Developers should enable preflight checks to raise errors before transactions + are submitted +- Before re-signing any transaction, it is **very important** to ensure that the + initial transaction's blockhash has expired + +## The Journey of a Transaction + +### How Clients Submit Transactions + +In Solana, there is no concept of a mempool. All transactions, whether they are +initiated programmatically or by an end-user, are efficiently routed to leaders +so that they can be processed into a block. There are two main ways in which a +transaction can be sent to leaders: + +1. By proxy via an RPC server and the + [sendTransaction](/docs/rpc/http/sendtransaction) JSON-RPC method +2. Directly to leaders via a + [TPU Client](https://docs.rs/solana-client/latest/solana_client/tpu_client/index.html) + +The vast majority of end-users will submit transactions via an RPC server. When +a client submits a transaction, the receiving RPC node will in turn attempt to +broadcast the transaction to both the current and next leaders. Until the +transaction is processed by a leader, there is no record of the transaction +outside of what the client and the relaying RPC nodes are aware of. In the case +of a TPU client, rebroadcast and leader forwarding is handled entirely by the +client software. + +![Overview of a transactions journey, from client to leader](/assets/docs/rt-tx-journey.png) + +### How RPC Nodes Broadcast Transactions + +After an RPC node receives a transaction via `sendTransaction`, it will convert +the transaction into a +[UDP](https://en.wikipedia.org/wiki/User_Datagram_Protocol) packet before +forwarding it to the relevant leaders. UDP allows validators to quickly +communicate with one another, but does not provide any guarantees regarding +transaction delivery. + +Because Solana's leader schedule is known in advance of every +[epoch](/docs/terminology#epoch) (~2 days), an RPC node will broadcast its +transaction directly to the current and next leaders. This is in contrast to +other gossip protocols such as Ethereum that propagate transactions randomly and +broadly across the entire network. By default, RPC nodes will try to forward +transactions to leaders every two seconds until either the transaction is +finalized or the transaction's blockhash expires (150 blocks or ~1 minute 19 +seconds as of the time of this writing). If the outstanding rebroadcast queue +size is greater than +[10,000 transactions](https://github.com/solana-labs/solana/blob/bfbbc53dac93b3a5c6be9b4b65f679fdb13e41d9/send-transaction-service/src/send_transaction_service.rs#L20), +newly submitted transactions are dropped. There are command-line +[arguments](https://github.com/solana-labs/solana/blob/bfbbc53dac93b3a5c6be9b4b65f679fdb13e41d9/validator/src/main.rs#L1172) +that RPC operators can adjust to change the default behavior of this retry +logic. + +When an RPC node broadcasts a transaction, it will attempt to forward the +transaction to a leader's +[Transaction Processing Unit (TPU)](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/validator.rs#L867). +The TPU processes transactions in five distinct phases: + +- [Fetch Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/fetch_stage.rs#L21) +- [SigVerify Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/tpu.rs#L91) +- [Banking Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/banking_stage.rs#L249) +- [Proof of History Service](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/poh/src/poh_service.rs) +- [Broadcast Stage](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/core/src/tpu.rs#L136) + +![Overview of the Transaction Processing Unit (TPU)](/assets/docs/rt-tpu-jito-labs.png) + +Of these five phases, the Fetch Stage is responsible for receiving transactions. +Within the Fetch Stage, validators will categorize incoming transactions +according to three ports: + +- [tpu](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/gossip/src/contact_info.rs#L27) + handles regular transactions such as token transfers, NFT mints, and program + instructions +- [tpu_vote](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/gossip/src/contact_info.rs#L31) + focuses exclusively on voting transactions +- [tpu_forwards](https://github.com/solana-labs/solana/blob/cd6f931223181d5a1d47cba64e857785a175a760/gossip/src/contact_info.rs#L29) + forwards unprocessed packets to the next leader if the current leader is + unable to process all transactions + +For more information on the TPU, please refer to +[this excellent writeup by Jito Labs](https://jito-labs.medium.com/solana-validator-101-transaction-processing-90bcdc271143). + +## How Transactions Get Dropped + +Throughout a transaction's journey, there are a few scenarios in which the +transaction can be unintentionally dropped from the network. + +### Before a transaction is processed + +If the network drops a transaction, it will most likely do so before the +transaction is processed by a leader. UDP +[packet loss](https://en.wikipedia.org/wiki/Packet_loss) is the simplest reason +why this might occur. During times of intense network load, it's also possible +for validators to become overwhelmed by the sheer number of transactions +required for processing. While validators are equipped to forward surplus +transactions via `tpu_forwards`, there is a limit to the amount of data that can +be +[forwarded](https://github.com/solana-labs/solana/blob/master/core/src/banking_stage.rs#L389). +Furthermore, each forward is limited to a single hop between validators. That +is, transactions received on the `tpu_forwards` port are not forwarded on to +other validators. + +There are also two lesser known reasons why a transaction may be dropped before +it is processed. The first scenario involves transactions that are submitted via +an RPC pool. Occasionally, part of the RPC pool can be sufficiently ahead of the +rest of the pool. This can cause issues when nodes within the pool are required +to work together. In this example, the transaction's +[recentBlockhash](/docs/core/transactions#recent-blockhash) is queried from +the advanced part of the pool (Backend A). When the transaction is submitted to +the lagging part of the pool (Backend B), the nodes will not recognize the +advanced blockhash and will drop the transaction. This can be detected upon +transaction submission if developers enable +[preflight checks](/docs/rpc/http/sendtransaction) on `sendTransaction`. + +![Transaction dropped via an RPC Pool](/assets/docs/rt-dropped-via-rpc-pool.png) + +Temporary network forks can also result in dropped transactions. If a validator +is slow to replay its blocks within the Banking Stage, it may end up creating a +minority fork. When a client builds a transaction, it's possible for the +transaction to reference a `recentBlockhash` that only exists on the minority +fork. After the transaction is submitted, the cluster can then switch away from +its minority fork before the transaction is processed. In this scenario, the +transaction is dropped due to the blockhash not being found. + +![Transaction dropped due to minority fork (before processed)](/assets/docs/rt-dropped-minority-fork-pre-process.png) + +### After a transaction is processed and before it is finalized + +In the event a transaction references a `recentBlockhash` from a minority fork, +it's still possible for the transaction to be processed. In this case, however, +it would be processed by the leader on the minority fork. When this leader +attempts to share its processed transactions with the rest of the network, it +would fail to reach consensus with the majority of validators that do not +recognize the minority fork. At this time, the transaction would be dropped +before it could be finalized. + +![Transaction dropped due to minority fork (after processed)](/assets/docs/rt-dropped-minority-fork-post-process.png) + +## Handling Dropped Transactions + +While RPC nodes will attempt to rebroadcast transactions, the algorithm they +employ is generic and often ill-suited for the needs of specific applications. +To prepare for times of network congestion, application developers should +customize their own rebroadcasting logic. + +### An In-Depth Look at sendTransaction + +When it comes to submitting transactions, the `sendTransaction` RPC method is +the primary tool available to developers. `sendTransaction` is only responsible +for relaying a transaction from a client to an RPC node. If the node receives +the transaction, `sendTransaction` will return the transaction id that can be +used to track the transaction. A successful response does not indicate whether +the transaction will be processed or finalized by the cluster. + +### Request Parameters + +- `transaction`: `string` - fully-signed Transaction, as encoded string +- (optional) `configuration object`: `object` + - `skipPreflight`: `boolean` - if true, skip the preflight transaction checks + (default: false) + - (optional) `preflightCommitment`: `string` - + [Commitment](/docs/rpc/#configuring-state-commitment) level to use + for preflight simulations against the bank slot (default: "finalized"). + - (optional) `encoding`: `string` - Encoding used for the transaction data. + Either "base58" (slow), or "base64". (default: "base58"). + - (optional) `maxRetries`: `usize` - Maximum number of times for the RPC node + to retry sending the transaction to the leader. If this parameter is not + provided, the RPC node will retry the transaction until it is finalized or + until the blockhash expires. + +**Response:** + +- `transaction id`: `string` - First transaction signature embedded in the + transaction, as base-58 encoded string. This transaction id can be used with + [`getSignatureStatuses`](/docs/rpc/http/getsignaturestatuses) to poll for + status updates. + +## Customizing Rebroadcast Logic + +In order to develop their own rebroadcasting logic, developers should take +advantage of `sendTransaction`'s `maxRetries` parameter. If provided, +`maxRetries` will override an RPC node's default retry logic, allowing +developers to manually control the retry process +[within reasonable bounds](https://github.com/solana-labs/solana/blob/98707baec2385a4f7114d2167ef6dfb1406f954f/validator/src/main.rs#L1258-L1274). + +A common pattern for manually retrying transactions involves temporarily storing +the `lastValidBlockHeight` that comes from +[getLatestBlockhash](/docs/rpc/http/getlatestblockhash). Once stashed, an +application can then +[poll the cluster's blockheight](/docs/rpc/http/getblockheight) and manually +retry the transaction at an appropriate interval. In times of network +congestion, it's advantageous to set `maxRetries` to 0 and manually rebroadcast +via a custom algorithm. While some applications may employ an +[exponential backoff](https://en.wikipedia.org/wiki/Exponential_backoff) +algorithm, others such as [Mango](https://www.mango.markets/) opt to +[continuously resubmit](https://github.com/blockworks-foundation/mango-ui/blob/b6abfc6c13b71fc17ebbe766f50b8215fa1ec54f/src/utils/send.tsx#L713) +transactions at a constant interval until some timeout has occurred. + +```ts +import { + Keypair, + Connection, + LAMPORTS_PER_SOL, + SystemProgram, + Transaction, +} from "@solana/web3.js"; +import * as nacl from "tweetnacl"; + +const sleep = async (ms: number) => { + return new Promise(r => setTimeout(r, ms)); +}; + +(async () => { + const payer = Keypair.generate(); + const toAccount = Keypair.generate().publicKey; + + const connection = new Connection("http://127.0.0.1:8899", "confirmed"); + + const airdropSignature = await connection.requestAirdrop( + payer.publicKey, + LAMPORTS_PER_SOL, + ); + + await connection.confirmTransaction({ signature: airdropSignature }); + + const blockhashResponse = await connection.getLatestBlockhash(); + const lastValidBlockHeight = blockhashResponse.lastValidBlockHeight - 150; + + const transaction = new Transaction({ + feePayer: payer.publicKey, + blockhash: blockhashResponse.blockhash, + lastValidBlockHeight: lastValidBlockHeight, + }).add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount, + lamports: 1000000, + }), + ); + const message = transaction.serializeMessage(); + const signature = nacl.sign.detached(message, payer.secretKey); + transaction.addSignature(payer.publicKey, Buffer.from(signature)); + const rawTransaction = transaction.serialize(); + let blockheight = await connection.getBlockHeight(); + + while (blockheight < lastValidBlockHeight) { + connection.sendRawTransaction(rawTransaction, { + skipPreflight: true, + }); + await sleep(500); + blockheight = await connection.getBlockHeight(); + } +})(); +``` + +When polling via `getLatestBlockhash`, applications should specify their +intended [commitment](/docs/rpc/#configuring-state-commitment) level. +By setting its commitment to `confirmed` (voted on) or `finalized` (~30 blocks +after `confirmed`), an application can avoid polling a blockhash from a minority +fork. + +If an application has access to RPC nodes behind a load balancer, it can also +choose to divide its workload amongst specific nodes. RPC nodes that serve +data-intensive requests such as +[getProgramAccounts](/developers/guides/javascript/get-program-accounts) may be +prone to falling behind and can be ill-suited for also forwarding transactions. +For applications that handle time-sensitive transactions, it may be prudent to +have dedicated nodes that only handle `sendTransaction`. + +### The Cost of Skipping Preflight + +By default, `sendTransaction` will perform three preflight checks prior to +submitting a transaction. Specifically, `sendTransaction` will: + +- Verify that all signatures are valid +- Check that the referenced blockhash is within the last 150 blocks +- Simulate the transaction against the bank slot specified by the + `preflightCommitment` + +In the event that any of these three preflight checks fail, `sendTransaction` +will raise an error prior to submitting the transaction. Preflight checks can +often be the difference between losing a transaction and allowing a client to +gracefully handle an error. To ensure that these common errors are accounted +for, it is recommended that developers keep `skipPreflight` set to `false`. + +### When to Re-Sign Transactions + +Despite all attempts to rebroadcast, there may be times in which a client is +required to re-sign a transaction. Before re-signing any transaction, it is +**very important** to ensure that the initial transaction's blockhash has +expired. If the initial blockhash is still valid, it is possible for both +transactions to be accepted by the network. To an end-user, this would appear as +if they unintentionally sent the same transaction twice. + +In Solana, a dropped transaction can be safely discarded once the blockhash it +references is older than the `lastValidBlockHeight` received from +`getLatestBlockhash`. Developers should keep track of this +`lastValidBlockHeight` by querying +[`getEpochInfo`](/docs/rpc/http/getepochinfo) and comparing with +`blockHeight` in the response. Once a blockhash is invalidated, clients may +re-sign with a newly-queried blockhash. diff --git a/content/docs/advanced/state-compression.mdx b/content/docs/advanced/state-compression.mdx new file mode 100644 index 000000000..a09953b6c --- /dev/null +++ b/content/docs/advanced/state-compression.mdx @@ -0,0 +1,338 @@ +--- +title: State Compression +description: + 'State Compression is the method of cheaply and securely storing + "fingerprints" of offchain data in the Solana leger, instead of expensive + accounts.' +--- + +On Solana, [State Compression](/docs/advanced/state-compression) is the +method of creating a "fingerprint" (or hash) of offchain data and storing this +fingerprint on-chain for secure verification. Effectively using the security of +the Solana ledger to securely validate offchain data, verifying it has not been +tampered with. + +This method of "compression" allows Solana programs and dApps to use cheap +blockchain [ledger](/docs/terminology#ledger) space, instead of the more +expensive [account](/docs/terminology#account) space, to securely store data. + +This is accomplished by using a special binary tree structure, known as a +[concurrent merkle tree](#what-is-a-concurrent-merkle-tree), to create a hash of +each piece of data (called a `leaf`), hashing those together, and only storing +this final hash on-chain. + +## What is State Compression? + +In simple terms, state compression uses "**_tree_**" structures to +cryptographically hash offchain data together, in a deterministic way, to +compute a single final hash that gets stored on-chain. + +These _trees_ are created in this "_deterministic_" process by: + +- taking any piece of data +- creating a hash of this data +- storing this hash as a `leaf` at the bottom of the tree +- each `leaf` pair is then hashed together, creating a `branch` +- each `branch` is then hashed together +- continually climbing the tree and hashing adjacent branches together +- once at the top of the tree, a final `root hash` is produced + +This `root hash` is then stored onchain, as a verifiable **_proof_** of all of +the data within every leaf. Allowing anyone to cryptographically verify all the +offchain data within the tree, while only actually storing a **minimal** amount +of data on-chain. Therefore, significantly reducing the cost to store/prove +large amounts of data due to this "state compression". + +## Merkle trees and concurrent merkle trees + +Solana's state compression used a special type of +[merkle tree](#what-is-a-merkle-tree) that allows for multiple changes to any +given tree to happen, while still maintaining the integrity and validity of the +tree. + +This special tree, known as a +"[concurrent merkle tree](#what-is-a-concurrent-merkle-tree)", effectively +retains a "changelog" of the tree on-chain. Allowing for multiple rapid changes +to the same tree (i.e. all in the same block), before a proof is invalidated. + +### What is a merkle tree? + +A [merkle tree](https://en.wikipedia.org/wiki/merkle_tree), sometimes called a +"hash tree", is a hash based binary tree structure where each `leaf` node is +represented as a cryptographic hash of its inner data. And every node that is +**not** a leaf, called a `branch`, is represented as a hash of its child leaf +hashes. + +Each branch is then also hashed together, climbing the tree, until eventually +only a single hash remains. This final hash, called the `root hash` or "root", +can then be used in combination with a "proof path" to verify any piece of data +stored within a leaf node. + +Once a final `root hash` has been computed, any piece of data stored within a +`leaf` node can be verified by rehashing the specific leaf's data and the hash +label of each adjacent branch climbing the tree (known as the `proof` or "proof +path"). Comparing this "rehash" to the `root hash` is the verification of the +underlying leaf data. If they match, the data is verified accurate. If they do +not match, the leaf data was changed. + +Whenever desired, the original leaf data can be changed by simply hashing the +**new leaf** data and recomputing the root hash in the same manner of the +original root. This **new root hash** is then used to verify any of the data, +and effectively invalidates the previous root hash and previous proof. +Therefore, each change to these _traditional merkle trees_ are required to be +performed in series. + +> This process of changing leaf data, and computing a new root hash can be a +> **very common** thing when using merkle trees! While it is one of the design +> points of the tree, it can result in one of the most notable drawbacks: rapid +> changes. + +### What is a Concurrent merkle tree? + +In high throughput applications, like within the +[Solana runtime](/docs/core/fees), requests to change an on-chain +_traditional merkle tree_ could be received by validators in relatively rapid +succession (e.g. within the same slot). Each leaf data change would still be +required to be performed in series. Resulting in each subsequent request for +change to fail, due to the root hash and proof being invalidated by the previous +change request in the slot. + +Enter, Concurrent merkle trees. + +A **Concurrent merkle tree** stores a **secure changelog** of the most recent +changes, their root hash, and the proof to derive it. This changelog "buffer" is +stored on-chain in an account specific to each tree, with a maximum number of +changelog "records" (aka `maxBufferSize`). + +When multiple leaf data change requests are received by validators in the same +slot, the on-chain _concurrent merkle tree_ can use this "changelog buffer" as a +source of truth for more acceptable proofs. Effectively allowing for up to +`maxBufferSize` changes to the same tree in the same slot. Significantly +boosting throughput. + +## Sizing a concurrent merkle tree + +When creating one of these on-chain trees, there are 3 values that will +determine the size of your tree, the cost to create your tree, and the number of +concurrent changes to your tree: + +1. max depth +2. max buffer size +3. canopy depth + +### Max depth + +The "max depth" of a tree is the **maximum number** of hops to get from any data +`leaf` to the `root` of the tree. + +Since merkle trees are binary trees, every leaf is connected to **only one** +other leaf; existing as a `leaf pair`. + +Therefore, the `maxDepth` of a tree is used to determine the maximum number of +nodes (aka pieces of data or `leafs`) to store within the tree using a simple +calculation: + +```text +nodes_count = 2 ^ maxDepth +``` + +Since a trees depth must be set at tree creation, you must decide how many +pieces of data you want your tree to store. Then using the simple calculation +above, you can determine the lowest `maxDepth` to store your data. + +#### Example 1: minting 100 nfts + +If you wanted to create a tree to store 100 compressed nfts, we will need a +minimum of "100 leafs" or "100 nodes". + +```text +// maxDepth=6 -> 64 nodes +2^6 = 64 + +// maxDepth=7 -> 128 nodes +2^7 = 128 +``` + +We must use a `maxDepth` of `7` to ensure we can store all of our data. + +#### Example 2: minting 15000 nfts + +If you wanted to create a tree to store 15000 compressed nfts, we will need a +minimum of "15000 leafs" or "15000 nodes". + +```text +// maxDepth=13 -> 8192 nodes +2^13 = 8192 + +// maxDepth=14 -> 16384 nodes +2^14 = 16384 +``` + +We must use a `maxDepth` of `14` to ensure we can store all of our data. + +#### The higher the max depth, the higher the cost + +The `maxDepth` value will be one of the primary drivers of cost when creating a +tree since you will pay this cost upfront at tree creation. The higher the max +tree depth, the more data fingerprints (aka hashes) you can store, the higher +the cost. + +### Max buffer size + +The "max buffer size" is effectively the maximum number of changes that can +occur on a tree, with the `root hash` still being valid. + +Due to the root hash effectively being a single hash of all leaf data, changing +any single leaf would invalidate the proof needed for all subsequent attempts to +change any leaf of a regular tree. + +But with a [concurrent tree](#what-is-a-concurrent-merkle-tree), there is +effectively a changelog of updates for these proofs. This changelog buffer is +sized and set at tree creation via this `maxBufferSize` value. + +### Canopy depth + +The "canopy depth," also known as the canopy size, refers to the number of proof +node levels that are cached or stored onchain for a given proof path. + +When performing an update action on a `leaf`, like transferring ownership (e.g. +selling a compressed NFT), the **complete** proof path must be used to verify +original ownership of the leaf and therefore allow for the update action. This +verification is performed using the **complete** proof path to correctly compute +the current `root hash` (or any cached `root hash` via the onchain "concurrent +buffer"). + +The larger a tree's max depth is, the more proof nodes are required to perform +this verification. For example, if your max depth is `14`, there are `14` total +proof nodes required to be used to verify. As a tree gets larger, the complete +proof path gets larger. + +Normally, each of these proof nodes would be required to be included within each +tree update transaction. Since each proof node value takes up `32 bytes` in a +transaction (similar to providing a Public Key), larger trees would very quickly +exceed the maximum transaction size limit. + +Enter the canopy. The canopy enables storing a set number of proof nodes on +chain (for any given proof path). Allowing for less proof nodes to be included +within each update transactions, therefore keeping the overall transaction size +below the limit. + +For example, a tree with a max depth of `14` would require `14` total proof +nodes. With a canopy of `10`, only `4` proof nodes are required to be submitted +per update transaction. + +![Canopy depth of 1 for a Concurrent Merkle Tree of max depth of 3](/assets/docs/compression/canopy-depth-1.png) + +Consider another example, this time with a tree of max depth `3`. If we want to +apply an action to one of the tree’s leaves—such as updating `R4`—we need to +provide proofs for `L4` and `R2`. However, we can omit `R1` since it is already +cached/stored onchain due to our canopy depth of `1`, which ensures that all +nodes at level 1 (`L1` and `R1`) are stored onchain. This results in a total of +2 required proofs. + +Therefore, the number of proofs required to update a leaf is equal to the max +depth minus the canopy depth. In this example, `3 - 1 = 2`. + +#### The larger the canopy depth value, the higher the cost + +The `canopyDepth` value is also a primary factor of cost when creating a tree +since you will pay this cost upfront at tree creation. The higher the canopy +depth, the more data proof nodes are stored onchain, the higher the cost. + +#### Smaller canopy limits composability + +While a tree's creation costs are higher with a higher canopy, having a lower +`canopyDepth` will require more proof nodes to be included within each update +transaction. The more nodes required to be submitted, the larger the transaction +size, and therefore the easier it is to exceed the transaction size limits. + +This will also be the case for any other Solana program or dApp that attempts to +interact with your tree/leafs. If your tree requires too many proof nodes +(because of a low canopy depth), then any other additional actions another +on-chain program **could** offer will be **limited** by their specific +instruction size plus your proof node list size. Limiting composability, and +potential additional utility for your specific tree. + +For example, if your tree is being used for compressed NFTs and has a very low +canopy depth, an NFT marketplace may only be able to support simple NFTs +transfers. And not be able to support an on-chain bidding system. + +## Cost of creating a tree + +The cost of creating a concurrent merkle tree is based on the tree's size +parameters: `maxDepth`, `maxBufferSize`, and `canopyDepth`. These values are all +used to calculate the on-chain storage (in bytes) required for a tree to exist +onchain. + +Once the required space (in bytes) has been calculated, and using the +[`getMinimumBalanceForRentExemption`](/docs/rpc/http/getminimumbalanceforrentexemption) +RPC method, request the cost (in lamports) to allocate this amount of bytes +on-chain. + +### Calculate tree cost in JavaScript + +Within the +[`@solana/spl-account-compression`](https://www.npmjs.com/package/@solana/spl-account-compression) +package, developers can use the +[`getConcurrentMerkleTreeAccountSize`](https://solana-labs.github.io/solana-program-library/account-compression/sdk/docs/modules/index.html#getConcurrentMerkleTreeAccountSize) +function to calculate the required space for a given tree size parameters. + +Then using the +[`getMinimumBalanceForRentExemption`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html#getMinimumBalanceForRentExemption) +function to get the final cost (in lamports) to allocate the required space for +the tree on-chain. + +Then determine the cost in lamports to make an account of this size rent exempt, +similar to any other account creation. + +```ts +// calculate the space required for the tree +const requiredSpace = getConcurrentMerkleTreeAccountSize( + maxDepth, + maxBufferSize, + canopyDepth, +); + +// get the cost (in lamports) to store the tree on-chain +const storageCost = + await connection.getMinimumBalanceForRentExemption(requiredSpace); +``` + +### Example costs + +Listed below are several example costs, for different tree sizes, including how +many leaf nodes are possible for each: + +**Example #1: 16,384 nodes costing 0.222 SOL** + +- max depth of `14` and max buffer size of `64` +- maximum number of leaf nodes: `16,384` +- canopy depth of `0` costs approximately `0.222 SOL` to create + +**Example #2: 16,384 nodes costing 1.134 SOL** + +- max depth of `14` and max buffer size of `64` +- maximum number of leaf nodes: `16,384` +- canopy depth of `11` costs approximately `1.134 SOL` to create + +**Example #3: 1,048,576 nodes costing 1.673 SOL** + +- max depth of `20` and max buffer size of `256` +- maximum number of leaf nodes: `1,048,576` +- canopy depth of `10` costs approximately `1.673 SOL` to create + +**Example #4: 1,048,576 nodes costing 15.814 SOL** + +- max depth of `20` and max buffer size of `256` +- maximum number of leaf nodes: `1,048,576` +- canopy depth of `15` costs approximately `15.814 SOL` to create + +## Compressed NFTs + +Compressed NFTs are one of the most popular use cases for State Compression on +Solana. With compression, a one million NFT collection could be minted for +`~50 SOL`, vice `~12,000 SOL` for its uncompressed equivalent collection. + +If you are interested in creating compressed NFTs yourself, read our developer +guide for +[minting and transferring compressed NFTs](/developers/guides/javascript/compressed-nfts). diff --git a/content/docs/advanced/versions.mdx b/content/docs/advanced/versions.mdx new file mode 100644 index 000000000..8b8ce8024 --- /dev/null +++ b/content/docs/advanced/versions.mdx @@ -0,0 +1,190 @@ +--- +title: Versioned Transactions +description: + "Explore the core Solana concepts: transactions, versioned transactions, + enabling additional functionality in the Solana runtime, address lookup + tables, and more." +altRoutes: + - /docs/core/transactions/versions +--- + +Versioned Transactions are the new transaction format that allow for additional +functionality in the Solana runtime, including +[Address Lookup Tables](/docs/advanced/lookup-tables). + +While changes to onchain programs are **NOT** required to support the new +functionality of versioned transactions (or for backwards compatibility), +developers **WILL** need update their client side code to prevent +[errors due to different transaction versions](#max-supported-transaction-version). + +## Current Transaction Versions + +The Solana runtime supports two transaction versions: + +- `legacy` - older transaction format with no additional benefit +- `0` - added support for + [Address Lookup Tables](/docs/advanced/lookup-tables) + +## Max supported transaction version + +All RPC requests that return a transaction **_should_** specify the highest +version of transactions they will support in their application using the +`maxSupportedTransactionVersion` option, including +[`getBlock`](/docs/rpc/http/getblock) and +[`getTransaction`](/docs/rpc/http/gettransaction). + +An RPC request will fail if a Versioned Transaction is returned that is higher +than the set `maxSupportedTransactionVersion`. (i.e. if a version `0` +transaction is returned when `legacy` is selected) + +> WARNING: If no `maxSupportedTransactionVersion` value is set, then only +> `legacy` transactions will be allowed in the RPC response. Therefore, your RPC +> requests **WILL** fail if any version `0` transactions are returned. + +## How to set max supported version + +You can set the `maxSupportedTransactionVersion` using both the +[`@solana/web3.js`](https://solana-labs.github.io/solana-web3.js/v1.x/) library +and JSON formatted requests directly to an RPC endpoint. + +### Using web3.js + +Using the +[`@solana/web3.js`](https://solana-labs.github.io/solana-web3.js/v1.x/) library, +you can retrieve the most recent block or get a specific transaction: + +```js +// connect to the `devnet` cluster and get the current `slot` +const connection = new web3.Connection(web3.clusterApiUrl("devnet")); +const slot = await connection.getSlot(); + +// get the latest block (allowing for v0 transactions) +const block = await connection.getBlock(slot, { + maxSupportedTransactionVersion: 0, +}); + +// get a specific transaction (allowing for v0 transactions) +const getTx = await connection.getTransaction( + "3jpoANiFeVGisWRY5UP648xRXs3iQasCHABPWRWnoEjeA93nc79WrnGgpgazjq4K9m8g2NJoyKoWBV1Kx5VmtwHQ", + { + maxSupportedTransactionVersion: 0, + }, +); +``` + +### JSON requests to the RPC + +Using a standard JSON formatted POST request, you can set the +`maxSupportedTransactionVersion` when retrieving a specific block: + +```shell +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d \ +'{"jsonrpc": "2.0", "id":1, "method": "getBlock", "params": [430, { + "encoding":"json", + "maxSupportedTransactionVersion":0, + "transactionDetails":"full", + "rewards":false +}]}' +``` + +## How to create a Versioned Transaction + +Versioned transactions can be created similar to the older method of creating +transactions. There are differences in using certain libraries that should be +noted. + +Below is an example of how to create a Versioned Transaction, using the +`@solana/web3.js` library, to send perform a SOL transfer between two accounts. + +#### Notes: + +- `payer` is a valid `Keypair` wallet, funded with SOL +- `toAccount` a valid `Keypair` + +Firstly, import the web3.js library and create a `connection` to your desired +cluster. + +We then define the recent `blockhash` and `minRent` we will need for our +transaction and the account: + +```js +const web3 = require("@solana/web3.js"); + +// connect to the cluster and get the minimum rent for rent exempt status +const connection = new web3.Connection(web3.clusterApiUrl("devnet")); +let minRent = await connection.getMinimumBalanceForRentExemption(0); +let blockhash = await connection + .getLatestBlockhash() + .then(res => res.blockhash); +``` + +Create an `array` of all the `instructions` you desire to send in your +transaction. In this example below, we are creating a simple SOL transfer +instruction: + +```js +// create an array with your desired `instructions` +const instructions = [ + web3.SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount.publicKey, + lamports: minRent, + }), +]; +``` + +Next, construct a `MessageV0` formatted transaction message with your desired +`instructions`: + +```js +// create v0 compatible message +const messageV0 = new web3.TransactionMessage({ + payerKey: payer.publicKey, + recentBlockhash: blockhash, + instructions, +}).compileToV0Message(); +``` + +Then, create a new `VersionedTransaction`, passing in our v0 compatible message: + +```js +const transaction = new web3.VersionedTransaction(messageV0); + +// sign your transaction with the required `Signers` +transaction.sign([payer]); +``` + +You can sign the transaction by either: + +- passing an array of `signatures` into the `VersionedTransaction` method, or +- call the `transaction.sign()` method, passing an array of the required + `Signers` + +> NOTE: After calling the `transaction.sign()` method, all the previous +> transaction `signatures` will be fully replaced by new signatures created from +> the provided in `Signers`. + +After your `VersionedTransaction` has been signed by all required accounts, you +can send it to the cluster and `await` the response: + +```js +// send our v0 transaction to the cluster +const txId = await connection.sendTransaction(transaction); +console.log(`https://explorer.solana.com/tx/${txId}?cluster=devnet`); +``` + +> NOTE: Unlike `legacy` transactions, sending a `VersionedTransaction` via +> `sendTransaction` does **NOT** support transaction signing via passing in an +> array of `Signers` as the second parameter. You will need to sign the +> transaction before calling `connection.sendTransaction()`. + +## More Resources + +- using + [Versioned Transactions for Address Lookup Tables](/docs/advanced/lookup-tables#how-to-create-an-address-lookup-table) +- view an + [example of a v0 transaction](https://explorer.solana.com/tx/h9WQsqSUYhFvrbJWKFPaXximJpLf6Z568NW1j6PBn3f7GPzQXe9PYMYbmWSUFHwgnUmycDNbEX9cr6WjUWkUFKx/?cluster=devnet) + on Solana Explorer +- read the + [accepted proposal](https://docs.anza.xyz/proposals/versioned-transactions) + for Versioned Transaction and Address Lookup Tables diff --git a/content/docs/clients/javascript-reference.mdx b/content/docs/clients/javascript-reference.mdx new file mode 100644 index 000000000..1066e6fa1 --- /dev/null +++ b/content/docs/clients/javascript-reference.mdx @@ -0,0 +1,861 @@ +--- +title: Web3.js API Examples +description: + Learn how to interact with the Solana blockchain using the @solana/web3.js + library through practical code samples and explanations. +--- + +## Web3 API Reference Guide + +The `@solana/web3.js` library is a package that has coverage over the +[Solana JSON RPC API](/docs/rpc). + +You can find the full documentation for the `@solana/web3.js` library +[here](https://solana-labs.github.io/solana-web3.js/v1.x/). + +## General + +### Connection + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html) + +Connection is used to interact with the [Solana JSON RPC](/docs/rpc). You can +use Connection to confirm transactions, get account info, and more. + +You create a connection by defining the JSON RPC cluster endpoint and the +desired commitment. Once this is complete, you can use this connection object to +interact with any of the Solana JSON RPC API. + +#### Example Usage + +```javascript +const web3 = require("@solana/web3.js"); + +let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); + +let slot = await connection.getSlot(); +console.log(slot); +// 93186439 + +let blockTime = await connection.getBlockTime(slot); +console.log(blockTime); +// 1630747045 + +let block = await connection.getBlock(slot); +console.log(block); + +/* +{ + blockHeight: null, + blockTime: 1630747045, + blockhash: 'AsFv1aV5DGip9YJHHqVjrGg6EKk55xuyxn2HeiN9xQyn', + parentSlot: 93186438, + previousBlockhash: '11111111111111111111111111111111', + rewards: [], + transactions: [] +} +*/ + +let slotLeader = await connection.getSlotLeader(); +console.log(slotLeader); +//49AqLYbpJYc2DrzGUAH1fhWJy62yxBxpLEkfJwjKy2jr +``` + +The above example shows only a few of the methods on Connection. Please see the +[source generated docs](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html) +for the full list. + +### Transaction + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Transaction.html) + +A transaction is used to interact with programs on the Solana blockchain. These +transactions are constructed with TransactionInstructions, containing all the +accounts possible to interact with, as well as any needed data or program +addresses. Each TransactionInstruction consists of keys, data, and a programId. +You can do multiple instructions in a single transaction, interacting with +multiple programs at once. + +#### Example Usage + +```javascript +const web3 = require("@solana/web3.js"); +const nacl = require("tweetnacl"); + +// Airdrop SOL for paying transactions +let payer = web3.Keypair.generate(); +let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); + +let airdropSignature = await connection.requestAirdrop( + payer.publicKey, + web3.LAMPORTS_PER_SOL, +); + +await connection.confirmTransaction({ signature: airdropSignature }); + +let toAccount = web3.Keypair.generate(); + +// Create Simple Transaction +let transaction = new web3.Transaction(); + +// Add an instruction to execute +transaction.add( + web3.SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount.publicKey, + lamports: 1000, + }), +); + +// Send and confirm transaction +// Note: feePayer is by default the first signer, or payer, if the parameter is not set +await web3.sendAndConfirmTransaction(connection, transaction, [payer]); + +// Alternatively, manually construct the transaction +let recentBlockhash = await connection.getLatestBlockhash(); +let manualTransaction = new web3.Transaction({ + recentBlockhash: recentBlockhash.blockhash, + feePayer: payer.publicKey, +}); +manualTransaction.add( + web3.SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount.publicKey, + lamports: 1000, + }), +); + +let transactionBuffer = manualTransaction.serializeMessage(); +let signature = nacl.sign.detached(transactionBuffer, payer.secretKey); + +manualTransaction.addSignature(payer.publicKey, signature); + +let isVerifiedSignature = manualTransaction.verifySignatures(); +console.log(`The signatures were verified: ${isVerifiedSignature}`); + +// The signatures were verified: true + +let rawTransaction = manualTransaction.serialize(); + +await web3.sendAndConfirmRawTransaction(connection, rawTransaction); +``` + +### Keypair + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Keypair.html) + +The keypair is used to create an account with a public key and secret key within +Solana. You can either generate, generate from a seed, or create from a secret +key. + +#### Example Usage + +```javascript +const { Keypair } = require("@solana/web3.js"); + +let account = Keypair.generate(); + +console.log(account.publicKey.toBase58()); +console.log(account.secretKey); + +// 2DVaHtcdTf7cm18Zm9VV8rKK4oSnjmTkKE6MiXe18Qsb +// Uint8Array(64) [ +// 152, 43, 116, 211, 207, 41, 220, 33, 193, 168, 118, +// 24, 176, 83, 206, 132, 47, 194, 2, 203, 186, 131, +// 197, 228, 156, 170, 154, 41, 56, 76, 159, 124, 18, +// 14, 247, 32, 210, 51, 102, 41, 43, 21, 12, 170, +// 166, 210, 195, 188, 60, 220, 210, 96, 136, 158, 6, +// 205, 189, 165, 112, 32, 200, 116, 164, 234 +// ] + +let seed = Uint8Array.from([ + 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, + 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, +]); +let accountFromSeed = Keypair.fromSeed(seed); + +console.log(accountFromSeed.publicKey.toBase58()); +console.log(accountFromSeed.secretKey); + +// 3LDverZtSC9Duw2wyGC1C38atMG49toPNW9jtGJiw9Ar +// Uint8Array(64) [ +// 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, 102, +// 100, 70, 60, 102, 100, 70, 60, 102, 100, 70, 60, +// 102, 100, 70, 60, 102, 100, 70, 60, 102, 100, 34, +// 164, 6, 12, 9, 193, 196, 30, 148, 122, 175, 11, +// 28, 243, 209, 82, 240, 184, 30, 31, 56, 223, 236, +// 227, 60, 72, 215, 47, 208, 209, 162, 59 +// ] + +let accountFromSecret = Keypair.fromSecretKey(account.secretKey); + +console.log(accountFromSecret.publicKey.toBase58()); +console.log(accountFromSecret.secretKey); + +// 2DVaHtcdTf7cm18Zm9VV8rKK4oSnjmTkKE6MiXe18Qsb +// Uint8Array(64) [ +// 152, 43, 116, 211, 207, 41, 220, 33, 193, 168, 118, +// 24, 176, 83, 206, 132, 47, 194, 2, 203, 186, 131, +// 197, 228, 156, 170, 154, 41, 56, 76, 159, 124, 18, +// 14, 247, 32, 210, 51, 102, 41, 43, 21, 12, 170, +// 166, 210, 195, 188, 60, 220, 210, 96, 136, 158, 6, +// 205, 189, 165, 112, 32, 200, 116, 164, 234 +// ] +``` + +Using `generate` generates a random Keypair for use as an account on Solana. +Using `fromSeed`, you can generate a Keypair using a deterministic constructor. +`fromSecret` creates a Keypair from a secret Uint8array. You can see that the +publicKey for the `generate` Keypair and `fromSecret` Keypair are the same +because the secret from the `generate` Keypair is used in `fromSecret`. + +**Warning**: Do not use `fromSeed` unless you are creating a seed with high +entropy. Do not share your seed. Treat the seed like you would a private key. + +### PublicKey + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/PublicKey.html) + +PublicKey is used throughout `@solana/web3.js` in transactions, keypairs, and +programs. You require publickey when listing each account in a transaction and +as a general identifier on Solana. + +A PublicKey can be created with a base58 encoded string, buffer, Uint8Array, +number, and an array of numbers. + +#### Example Usage + +```javascript +const { Buffer } = require("buffer"); +const web3 = require("@solana/web3.js"); +const crypto = require("crypto"); + +// Create a PublicKey with a base58 encoded string +let base58publicKey = new web3.PublicKey( + "5xot9PVkphiX2adznghwrAuxGs2zeWisNSxMW6hU6Hkj", +); +console.log(base58publicKey.toBase58()); + +// 5xot9PVkphiX2adznghwrAuxGs2zeWisNSxMW6hU6Hkj + +// Create a Program Address +let highEntropyBuffer = crypto.randomBytes(31); +let programAddressFromKey = await web3.PublicKey.createProgramAddress( + [highEntropyBuffer.slice(0, 31)], + base58publicKey, +); +console.log(`Generated Program Address: ${programAddressFromKey.toBase58()}`); + +// Generated Program Address: 3thxPEEz4EDWHNxo1LpEpsAxZryPAHyvNVXJEJWgBgwJ + +// Find Program address given a PublicKey +let validProgramAddress = await web3.PublicKey.findProgramAddress( + [Buffer.from("", "utf8")], + programAddressFromKey, +); +console.log(`Valid Program Address: ${validProgramAddress}`); + +// Valid Program Address: C14Gs3oyeXbASzwUpqSymCKpEyccfEuSe8VRar9vJQRE,253 +``` + +### SystemProgram + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/SystemProgram.html) + +The SystemProgram grants the ability to create accounts, allocate account data, +assign an account to programs, work with nonce accounts, and transfer lamports. +You can use the SystemInstruction class to help with decoding and reading +individual instructions + +#### Example Usage + +```javascript +const web3 = require("@solana/web3.js"); + +// Airdrop SOL for paying transactions +let payer = web3.Keypair.generate(); +let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); + +let airdropSignature = await connection.requestAirdrop( + payer.publicKey, + web3.LAMPORTS_PER_SOL, +); + +await connection.confirmTransaction({ signature: airdropSignature }); + +// Allocate Account Data +let allocatedAccount = web3.Keypair.generate(); +let allocateInstruction = web3.SystemProgram.allocate({ + accountPubkey: allocatedAccount.publicKey, + space: 100, +}); +let transaction = new web3.Transaction().add(allocateInstruction); + +await web3.sendAndConfirmTransaction(connection, transaction, [ + payer, + allocatedAccount, +]); + +// Create Nonce Account +let nonceAccount = web3.Keypair.generate(); +let minimumAmountForNonceAccount = + await connection.getMinimumBalanceForRentExemption(web3.NONCE_ACCOUNT_LENGTH); +let createNonceAccountTransaction = new web3.Transaction().add( + web3.SystemProgram.createNonceAccount({ + fromPubkey: payer.publicKey, + noncePubkey: nonceAccount.publicKey, + authorizedPubkey: payer.publicKey, + lamports: minimumAmountForNonceAccount, + }), +); + +await web3.sendAndConfirmTransaction( + connection, + createNonceAccountTransaction, + [payer, nonceAccount], +); + +// Advance nonce - Used to create transactions as an account custodian +let advanceNonceTransaction = new web3.Transaction().add( + web3.SystemProgram.nonceAdvance({ + noncePubkey: nonceAccount.publicKey, + authorizedPubkey: payer.publicKey, + }), +); + +await web3.sendAndConfirmTransaction(connection, advanceNonceTransaction, [ + payer, +]); + +// Transfer lamports between accounts +let toAccount = web3.Keypair.generate(); + +let transferTransaction = new web3.Transaction().add( + web3.SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount.publicKey, + lamports: 1000, + }), +); +await web3.sendAndConfirmTransaction(connection, transferTransaction, [payer]); + +// Assign a new account to a program +let programId = web3.Keypair.generate(); +let assignedAccount = web3.Keypair.generate(); + +let assignTransaction = new web3.Transaction().add( + web3.SystemProgram.assign({ + accountPubkey: assignedAccount.publicKey, + programId: programId.publicKey, + }), +); + +await web3.sendAndConfirmTransaction(connection, assignTransaction, [ + payer, + assignedAccount, +]); +``` + +### Secp256k1Program + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Secp256k1Program.html) + +The Secp256k1Program is used to verify Secp256k1 signatures, which are used by +both Bitcoin and Ethereum. + +#### Example Usage + +```javascript +const { keccak_256 } = require("js-sha3"); +const web3 = require("@solana/web3.js"); +const secp256k1 = require("secp256k1"); + +// Create a Ethereum Address from secp256k1 +let secp256k1PrivateKey; +do { + secp256k1PrivateKey = web3.Keypair.generate().secretKey.slice(0, 32); +} while (!secp256k1.privateKeyVerify(secp256k1PrivateKey)); + +let secp256k1PublicKey = secp256k1 + .publicKeyCreate(secp256k1PrivateKey, false) + .slice(1); + +let ethAddress = + web3.Secp256k1Program.publicKeyToEthAddress(secp256k1PublicKey); +console.log(`Ethereum Address: 0x${ethAddress.toString("hex")}`); + +// Ethereum Address: 0xadbf43eec40694eacf36e34bb5337fba6a2aa8ee + +// Fund a keypair to create instructions +let fromPublicKey = web3.Keypair.generate(); +let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); + +let airdropSignature = await connection.requestAirdrop( + fromPublicKey.publicKey, + web3.LAMPORTS_PER_SOL, +); + +await connection.confirmTransaction({ signature: airdropSignature }); + +// Sign Message with Ethereum Key +let plaintext = Buffer.from("string address"); +let plaintextHash = Buffer.from(keccak_256.update(plaintext).digest()); +let { signature, recid: recoveryId } = secp256k1.ecdsaSign( + plaintextHash, + secp256k1PrivateKey, +); + +// Create transaction to verify the signature +let transaction = new Transaction().add( + web3.Secp256k1Program.createInstructionWithEthAddress({ + ethAddress: ethAddress.toString("hex"), + plaintext, + signature, + recoveryId, + }), +); + +// Transaction will succeed if the message is verified to be signed by the address +await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey]); +``` + +### Message + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Message.html) + +Message is used as another way to construct transactions. You can construct a +message using the accounts, header, instructions, and recentBlockhash that are a +part of a transaction. A [Transaction](/docs/clients/javascript#transaction) +is a Message plus the list of required signatures required to execute the +transaction. + +#### Example Usage + +```javascript +const { Buffer } = require("buffer"); +const bs58 = require("bs58"); +const web3 = require("@solana/web3.js"); + +let toPublicKey = web3.Keypair.generate().publicKey; +let fromPublicKey = web3.Keypair.generate(); + +let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); + +let airdropSignature = await connection.requestAirdrop( + fromPublicKey.publicKey, + web3.LAMPORTS_PER_SOL, +); + +await connection.confirmTransaction({ signature: airdropSignature }); + +let type = web3.SYSTEM_INSTRUCTION_LAYOUTS.Transfer; +let data = Buffer.alloc(type.layout.span); +let layoutFields = Object.assign({ instruction: type.index }); +type.layout.encode(layoutFields, data); + +let recentBlockhash = await connection.getRecentBlockhash(); + +let messageParams = { + accountKeys: [ + fromPublicKey.publicKey.toString(), + toPublicKey.toString(), + web3.SystemProgram.programId.toString(), + ], + header: { + numReadonlySignedAccounts: 0, + numReadonlyUnsignedAccounts: 1, + numRequiredSignatures: 1, + }, + instructions: [ + { + accounts: [0, 1], + data: bs58.encode(data), + programIdIndex: 2, + }, + ], + recentBlockhash, +}; + +let message = new web3.Message(messageParams); + +let transaction = web3.Transaction.populate(message, [ + fromPublicKey.publicKey.toString(), +]); + +await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey]); +``` + +### Struct + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Struct.html) + +The struct class is used to create Rust compatible structs in javascript. This +class is only compatible with Borsh encoded Rust structs. + +#### Example Usage + +Struct in Rust: + +```rust +pub struct Fee { + pub denominator: u64, + pub numerator: u64, +} +``` + +Using web3: + +```javascript +import BN from "bn.js"; +import { Struct } from "@solana/web3.js"; + +export class Fee extends Struct { + denominator: BN; + numerator: BN; +} +``` + +### Enum + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Enum.html) + +The Enum class is used to represent a Rust compatible Enum in javascript. The +enum will just be a string representation if logged but can be properly +encoded/decoded when used in conjunction with +[Struct](/docs/clients/javascript#struct). This class is only compatible with +Borsh encoded Rust enumerations. + +#### Example Usage + +Rust: + +```rust +pub enum AccountType { + Uninitialized, + StakePool, + ValidatorList, +} +``` + +Web3: + +```javascript +import { Enum } from "@solana/web3.js"; + +export class AccountType extends Enum {} +``` + +### NonceAccount + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/NonceAccount.html) + +Normally a transaction is rejected if a transaction's `recentBlockhash` field is +too old. To provide for certain custodial services, Nonce Accounts are used. +Transactions which use a `recentBlockhash` captured on-chain by a Nonce Account +do not expire as long at the Nonce Account is not advanced. + +You can create a nonce account by first creating a normal account, then using +`SystemProgram` to make the account a Nonce Account. + +#### Example Usage + +```javascript +const web3 = require("@solana/web3.js"); + +// Create connection +let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); + +// Generate accounts +let account = web3.Keypair.generate(); +let nonceAccount = web3.Keypair.generate(); + +// Fund account +let airdropSignature = await connection.requestAirdrop( + account.publicKey, + web3.LAMPORTS_PER_SOL, +); + +await connection.confirmTransaction({ signature: airdropSignature }); + +// Get Minimum amount for rent exemption +let minimumAmount = await connection.getMinimumBalanceForRentExemption( + web3.NONCE_ACCOUNT_LENGTH, +); + +// Form CreateNonceAccount transaction +let transaction = new web3.Transaction().add( + web3.SystemProgram.createNonceAccount({ + fromPubkey: account.publicKey, + noncePubkey: nonceAccount.publicKey, + authorizedPubkey: account.publicKey, + lamports: minimumAmount, + }), +); +// Create Nonce Account +await web3.sendAndConfirmTransaction(connection, transaction, [ + account, + nonceAccount, +]); + +let nonceAccountData = await connection.getNonce( + nonceAccount.publicKey, + "confirmed", +); + +console.log(nonceAccountData); +// NonceAccount { +// authorizedPubkey: PublicKey { +// _bn: +// }, +// nonce: '93zGZbhMmReyz4YHXjt2gHsvu5tjARsyukxD4xnaWaBq', +// feeCalculator: { lamportsPerSignature: 5000 } +// } + +let nonceAccountInfo = await connection.getAccountInfo( + nonceAccount.publicKey, + "confirmed", +); + +let nonceAccountFromInfo = web3.NonceAccount.fromAccountData( + nonceAccountInfo.data, +); + +console.log(nonceAccountFromInfo); +// NonceAccount { +// authorizedPubkey: PublicKey { +// _bn: +// }, +// nonce: '93zGZbhMmReyz4YHXjt2gHsvu5tjARsyukxD4xnaWaBq', +// feeCalculator: { lamportsPerSignature: 5000 } +// } +``` + +The above example shows both how to create a `NonceAccount` using +`SystemProgram.createNonceAccount`, as well as how to retrieve the +`NonceAccount` from accountInfo. Using the nonce, you can create transactions +offline with the nonce in place of the `recentBlockhash`. + +### VoteAccount + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/VoteAccount.html) + +Vote account is an object that grants the capability of decoding vote accounts +from the native vote account program on the network. + +#### Example Usage + +```javascript +const web3 = require("@solana/web3.js"); + +let voteAccountInfo = await connection.getProgramAccounts(web3.VOTE_PROGRAM_ID); +let voteAccountFromData = web3.VoteAccount.fromAccountData( + voteAccountInfo[0].account.data, +); +console.log(voteAccountFromData); +/* +VoteAccount { + nodePubkey: PublicKey { + _bn: + }, + authorizedWithdrawer: PublicKey { + _bn: + }, + commission: 10, + rootSlot: 104570885, + votes: [ + { slot: 104570886, confirmationCount: 31 }, + { slot: 104570887, confirmationCount: 30 }, + { slot: 104570888, confirmationCount: 29 }, + { slot: 104570889, confirmationCount: 28 }, + { slot: 104570890, confirmationCount: 27 }, + { slot: 104570891, confirmationCount: 26 }, + { slot: 104570892, confirmationCount: 25 }, + { slot: 104570893, confirmationCount: 24 }, + { slot: 104570894, confirmationCount: 23 }, + ... + ], + authorizedVoters: [ { epoch: 242, authorizedVoter: [PublicKey] } ], + priorVoters: [ + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object], [Object], + [Object], [Object] + ], + epochCredits: [ + { epoch: 179, credits: 33723163, prevCredits: 33431259 }, + { epoch: 180, credits: 34022643, prevCredits: 33723163 }, + { epoch: 181, credits: 34331103, prevCredits: 34022643 }, + { epoch: 182, credits: 34619348, prevCredits: 34331103 }, + { epoch: 183, credits: 34880375, prevCredits: 34619348 }, + { epoch: 184, credits: 35074055, prevCredits: 34880375 }, + { epoch: 185, credits: 35254965, prevCredits: 35074055 }, + { epoch: 186, credits: 35437863, prevCredits: 35254965 }, + { epoch: 187, credits: 35672671, prevCredits: 35437863 }, + { epoch: 188, credits: 35950286, prevCredits: 35672671 }, + { epoch: 189, credits: 36228439, prevCredits: 35950286 }, + ... + ], + lastTimestamp: { slot: 104570916, timestamp: 1635730116 } +} +*/ +``` + +## Staking + +### StakeProgram + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/StakeProgram.html) + +The StakeProgram facilitates staking SOL and delegating them to any validators +on the network. You can use StakeProgram to create a stake account, stake some +SOL, authorize accounts for withdrawal of your stake, deactivate your stake, and +withdraw your funds. The StakeInstruction class is used to decode and read more +instructions from transactions calling the StakeProgram + +#### Example Usage + +```javascript +const web3 = require("@solana/web3.js"); + +// Fund a key to create transactions +let fromPublicKey = web3.Keypair.generate(); +let connection = new web3.Connection(web3.clusterApiUrl("devnet"), "confirmed"); + +let airdropSignature = await connection.requestAirdrop( + fromPublicKey.publicKey, + web3.LAMPORTS_PER_SOL, +); +await connection.confirmTransaction({ signature: airdropSignature }); + +// Create Account +let stakeAccount = web3.Keypair.generate(); +let authorizedAccount = web3.Keypair.generate(); +/* Note: This is the minimum amount for a stake account -- Add additional Lamports for staking + For example, we add 50 lamports as part of the stake */ +let lamportsForStakeAccount = + (await connection.getMinimumBalanceForRentExemption( + web3.StakeProgram.space, + )) + 50; + +let createAccountTransaction = web3.StakeProgram.createAccount({ + fromPubkey: fromPublicKey.publicKey, + authorized: new web3.Authorized( + authorizedAccount.publicKey, + authorizedAccount.publicKey, + ), + lamports: lamportsForStakeAccount, + lockup: new web3.Lockup(0, 0, fromPublicKey.publicKey), + stakePubkey: stakeAccount.publicKey, +}); +await web3.sendAndConfirmTransaction(connection, createAccountTransaction, [ + fromPublicKey, + stakeAccount, +]); + +// Check that stake is available +let stakeBalance = await connection.getBalance(stakeAccount.publicKey); +console.log(`Stake balance: ${stakeBalance}`); +// Stake balance: 2282930 + +// We can verify the state of our stake. This may take some time to become active +let stakeState = await connection.getStakeActivation(stakeAccount.publicKey); +console.log(`Stake state: ${stakeState.state}`); +// Stake state: inactive + +// To delegate our stake, we get the current vote accounts and choose the first +let voteAccounts = await connection.getVoteAccounts(); +let voteAccount = voteAccounts.current.concat(voteAccounts.delinquent)[0]; +let votePubkey = new web3.PublicKey(voteAccount.votePubkey); + +// We can then delegate our stake to the voteAccount +let delegateTransaction = web3.StakeProgram.delegate({ + stakePubkey: stakeAccount.publicKey, + authorizedPubkey: authorizedAccount.publicKey, + votePubkey: votePubkey, +}); +await web3.sendAndConfirmTransaction(connection, delegateTransaction, [ + fromPublicKey, + authorizedAccount, +]); + +// To withdraw our funds, we first have to deactivate the stake +let deactivateTransaction = web3.StakeProgram.deactivate({ + stakePubkey: stakeAccount.publicKey, + authorizedPubkey: authorizedAccount.publicKey, +}); +await web3.sendAndConfirmTransaction(connection, deactivateTransaction, [ + fromPublicKey, + authorizedAccount, +]); + +// Once deactivated, we can withdraw our funds +let withdrawTransaction = web3.StakeProgram.withdraw({ + stakePubkey: stakeAccount.publicKey, + authorizedPubkey: authorizedAccount.publicKey, + toPubkey: fromPublicKey.publicKey, + lamports: stakeBalance, +}); + +await web3.sendAndConfirmTransaction(connection, withdrawTransaction, [ + fromPublicKey, + authorizedAccount, +]); +``` + +### Authorized + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Authorized.html) + +Authorized is an object used when creating an authorized account for staking +within Solana. You can designate a `staker` and `withdrawer` separately, +allowing for a different account to withdraw other than the staker. + +You can find more usage of the `Authorized` object under +[`StakeProgram`](/docs/clients/javascript#stakeprogram) + +### Lockup + +[Source Documentation](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Lockup.html) + +Lockup is used in conjunction with the +[StakeProgram](/docs/clients/javascript#stakeprogram) to create an account. +The Lockup is used to determine how long the stake will be locked, or unable to +be retrieved. If the Lockup is set to 0 for both epoch and the Unix timestamp, +the lockup will be disabled for the stake account. + +#### Example Usage + +```javascript +const { + Authorized, + Keypair, + Lockup, + StakeProgram, +} = require("@solana/web3.js"); + +let account = Keypair.generate(); +let stakeAccount = Keypair.generate(); +let authorized = new Authorized(account.publicKey, account.publicKey); +let lockup = new Lockup(0, 0, account.publicKey); + +let createStakeAccountInstruction = StakeProgram.createAccount({ + fromPubkey: account.publicKey, + authorized: authorized, + lamports: 1000, + lockup: lockup, + stakePubkey: stakeAccount.publicKey, +}); +``` + +The above code creates a `createStakeAccountInstruction` to be used when +creating an account with the `StakeProgram`. The Lockup is set to 0 for both the +epoch and Unix timestamp, disabling lockup for the account. + +See [StakeProgram](/docs/clients/javascript#stakeprogram) for more. diff --git a/content/docs/clients/javascript.mdx b/content/docs/clients/javascript.mdx new file mode 100644 index 000000000..def3769b1 --- /dev/null +++ b/content/docs/clients/javascript.mdx @@ -0,0 +1,408 @@ +--- +title: JavaScript / TypeScript +description: + Learn how to interact with Solana using the JavaScript/TypeScript client + library (@solana/web3.js). This guide covers wallet connections, transactions, + and custom program interactions with code examples. +h1: JavaScript Client for Solana +--- + +## What is Solana-Web3.js? + +The Solana-Web3.js library aims to provide complete coverage of Solana. The +library was built on top of the [Solana JSON RPC API](/docs/rpc). + +You can find the full documentation for the `@solana/web3.js` library +[here](https://solana-labs.github.io/solana-web3.js/v1.x/). + +## Common Terminology + +| Term | Definition | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Program | Stateless executable code written to interpret instructions. Programs are capable of performing actions based on the instructions provided. | +| Instruction | The smallest unit of a program that a client can include in a transaction. Within its processing code, an instruction may contain one or more cross-program invocations. | +| Transaction | One or more instructions signed by the client using one or more Keypairs and executed atomically with only two possible outcomes: success or failure. | + +For the full list of terms, see +[Solana terminology](/docs/terminology#cross-program-invocation-cpi) + +## Getting Started + +### Installation + +#### yarn + +```shell +yarn add @solana/web3.js@1 +``` + +#### npm + +```shell +npm install --save @solana/web3.js@1 +``` + +#### Bundle + +```html + + + + + +``` + +### Usage + +#### Javascript + +```javascript +const solanaWeb3 = require("@solana/web3.js"); +console.log(solanaWeb3); +``` + +#### ES6 + +```javascript +import * as solanaWeb3 from "@solana/web3.js"; +console.log(solanaWeb3); +``` + +#### Browser Bundle + +```javascript +// solanaWeb3 is provided in the global namespace by the bundle script +console.log(solanaWeb3); +``` + +## Quickstart + +### Connecting to a Wallet + +To allow users to use your dApp or application on Solana, they will need to get +access to their Keypair. A Keypair is a private key with a matching public key, +used to sign transactions. + +There are two ways to obtain a Keypair: + +1. Generate a new Keypair +2. Obtain a Keypair using the secret key + +You can obtain a new Keypair with the following: + +```javascript +const { Keypair } = require("@solana/web3.js"); + +let keypair = Keypair.generate(); +``` + +This will generate a brand new Keypair for a user to fund and use within your +application. + +You can allow entry of the secretKey using a textbox, and obtain the Keypair +with `Keypair.fromSecretKey(secretKey)`. + +```javascript +const { Keypair } = require("@solana/web3.js"); + +let secretKey = Uint8Array.from([ + 202, 171, 192, 129, 150, 189, 204, 241, 142, 71, 205, 2, 81, 97, 2, 176, 48, + 81, 45, 1, 96, 138, 220, 132, 231, 131, 120, 77, 66, 40, 97, 172, 91, 245, 84, + 221, 157, 190, 9, 145, 176, 130, 25, 43, 72, 107, 190, 229, 75, 88, 191, 136, + 7, 167, 109, 91, 170, 164, 186, 15, 142, 36, 12, 23, +]); + +let keypair = Keypair.fromSecretKey(secretKey); +``` + +Many wallets today allow users to bring their Keypair using a variety of +extensions or web wallets. The general recommendation is to use wallets, not +Keypairs, to sign transactions. The wallet creates a layer of separation between +the dApp and the Keypair, ensuring that the dApp never has access to the secret +key. You can find ways to connect to external wallets with the +[wallet-adapter](https://github.com/solana-labs/wallet-adapter) library. + +### Creating and Sending Transactions + +To interact with programs on Solana, you create, sign, and send transactions to +the network. Transactions are collections of instructions with signatures. The +order that instructions exist in a transaction determines the order they are +executed. + +A transaction in Solana-Web3.js is created using the +[`Transaction`](/docs/clients/javascript#transaction) object and adding +desired messages, addresses, or instructions. + +Take the example of a transfer transaction: + +```javascript +const { + Keypair, + Transaction, + SystemProgram, + LAMPORTS_PER_SOL, +} = require("@solana/web3.js"); + +let fromKeypair = Keypair.generate(); +let toKeypair = Keypair.generate(); +let transaction = new Transaction(); + +transaction.add( + SystemProgram.transfer({ + fromPubkey: fromKeypair.publicKey, + toPubkey: toKeypair.publicKey, + lamports: LAMPORTS_PER_SOL, + }), +); +``` + +The above code achieves creating a transaction ready to be signed and +broadcasted to the network. The `SystemProgram.transfer` instruction was added +to the transaction, containing the amount of lamports to send, and the `to` and +`from` public keys. + +All that is left is to sign the transaction with keypair and send it over the +network. You can accomplish sending a transaction by using +`sendAndConfirmTransaction` if you wish to alert the user or do something after +a transaction is finished, or use `sendTransaction` if you don't need to wait +for the transaction to be confirmed. + +```javascript +const { + sendAndConfirmTransaction, + clusterApiUrl, + Connection, +} = require("@solana/web3.js"); + +let keypair = Keypair.generate(); +let connection = new Connection(clusterApiUrl("testnet")); + +sendAndConfirmTransaction(connection, transaction, [keypair]); +``` + +The above code takes in a `TransactionInstruction` using `SystemProgram`, +creates a `Transaction`, and sends it over the network. You use `Connection` in +order to define which Solana network you are connecting to, namely +`mainnet-beta`, `testnet`, or `devnet`. + +### Interacting with Custom Programs + +The previous section visits sending basic transactions. In Solana everything you +do interacts with different programs, including the previous section's transfer +transaction. At the time of writing programs on Solana are either written in +Rust or C. + +Let's look at the `SystemProgram`. The method signature for allocating space in +your account on Solana in Rust looks like this: + +```rust +pub fn allocate( + pubkey: &Pubkey, + space: u64 +) -> Instruction +``` + +In Solana when you want to interact with a program you must first know all the +accounts you will be interacting with. + +You must always provide every account that the program will be interacting +within the instruction. Not only that, but you must provide whether or not the +account is `isSigner` or `isWritable`. + +In the `allocate` method above, a single account `pubkey` is required, as well +as an amount of `space` for allocation. We know that the `allocate` method +writes to the account by allocating space within it, making the `pubkey` +required to be `isWritable`. `isSigner` is required when you are designating the +account that is running the instruction. In this case, the signer is the account +calling to allocate space within itself. + +Let's look at how to call this instruction using solana-web3.js: + +```javascript +let keypair = web3.Keypair.generate(); +let payer = web3.Keypair.generate(); +let connection = new web3.Connection(web3.clusterApiUrl("testnet")); + +let airdropSignature = await connection.requestAirdrop( + payer.publicKey, + web3.LAMPORTS_PER_SOL, +); + +await connection.confirmTransaction({ signature: airdropSignature }); +``` + +First, we set up the account Keypair and connection so that we have an account +to make allocate on the testnet. We also create a payer Keypair and airdrop some +SOL so we can pay for the allocate transaction. + +```javascript +let allocateTransaction = new web3.Transaction({ + feePayer: payer.publicKey, +}); +let keys = [{ pubkey: keypair.publicKey, isSigner: true, isWritable: true }]; +let params = { space: 100 }; +``` + +We create the transaction `allocateTransaction`, keys, and params objects. +`feePayer` is an optional field when creating a transaction that specifies who +is paying for the transaction, defaulting to the pubkey of the first signer in +the transaction. `keys` represents all accounts that the program's `allocate` +function will interact with. Since the `allocate` function also required space, +we created `params` to be used later when invoking the `allocate` function. + +```javascript +let allocateStruct = { + index: 8, + layout: struct([u32("instruction"), ns64("space")]), +}; +``` + +The above is created using `u32` and `ns64` from `@solana/buffer-layout` to +facilitate the payload creation. The `allocate` function takes in the parameter +`space`. To interact with the function we must provide the data as a Buffer +format. The `buffer-layout` library helps with allocating the buffer and +encoding it correctly for Rust programs on Solana to interpret. + +Let's break down this struct. + +```javascript +{ + index: 8, /* <-- */ + layout: struct([ + u32('instruction'), + ns64('space'), + ]) +} +``` + +`index` is set to 8 because the function `allocate` is in the 8th position in +the instruction enum for `SystemProgram`. + +```rust +/* https://github.com/solana-labs/solana/blob/21bc43ed58c63c827ba4db30426965ef3e807180/sdk/program/src/system_instruction.rs#L142-L305 */ +pub enum SystemInstruction { + /** 0 **/CreateAccount {/**/}, + /** 1 **/Assign {/**/}, + /** 2 **/Transfer {/**/}, + /** 3 **/CreateAccountWithSeed {/**/}, + /** 4 **/AdvanceNonceAccount, + /** 5 **/WithdrawNonceAccount(u64), + /** 6 **/InitializeNonceAccount(Pubkey), + /** 7 **/AuthorizeNonceAccount(Pubkey), + /** 8 **/Allocate {/**/}, + /** 9 **/AllocateWithSeed {/**/}, + /** 10 **/AssignWithSeed {/**/}, + /** 11 **/TransferWithSeed {/**/}, + /** 12 **/UpgradeNonceAccount, +} +``` + +Next up is `u32('instruction')`. + +```javascript +{ + index: 8, + layout: struct([ + u32('instruction'), /* <-- */ + ns64('space'), + ]) +} +``` + +The `layout` in the allocate struct must always have `u32('instruction')` first +when you are using it to call an instruction. + +```javascript +{ + index: 8, + layout: struct([ + u32('instruction'), + ns64('space'), /* <-- */ + ]) +} +``` + +`ns64('space')` is the argument for the `allocate` function. You can see in the +original `allocate` function in Rust that space was of the type `u64`. `u64` is +an unsigned 64bit integer. Javascript by default only provides up to 53bit +integers. `ns64` comes from `@solana/buffer-layout` to help with type +conversions between Rust and Javascript. You can find more type conversions +between Rust and Javascript at +[solana-labs/buffer-layout](https://github.com/solana-labs/buffer-layout). + +```javascript +let data = Buffer.alloc(allocateStruct.layout.span); +let layoutFields = Object.assign({ instruction: allocateStruct.index }, params); +allocateStruct.layout.encode(layoutFields, data); +``` + +Using the previously created bufferLayout, we can allocate a data buffer. We +then assign our params `{ space: 100 }` so that it maps correctly to the layout, +and encode it to the data buffer. Now the data is ready to be sent to the +program. + +```javascript +allocateTransaction.add( + new web3.TransactionInstruction({ + keys, + programId: web3.SystemProgram.programId, + data, + }), +); + +await web3.sendAndConfirmTransaction(connection, allocateTransaction, [ + payer, + keypair, +]); +``` + +Finally, we add the transaction instruction with all the account keys, payer, +data, and programId and broadcast the transaction to the network. + +The full code can be found below. + +```javascript +const { struct, u32, ns64 } = require("@solana/buffer-layout"); +const { Buffer } = require("buffer"); +const web3 = require("@solana/web3.js"); + +let keypair = web3.Keypair.generate(); +let payer = web3.Keypair.generate(); + +let connection = new web3.Connection(web3.clusterApiUrl("testnet")); + +let airdropSignature = await connection.requestAirdrop( + payer.publicKey, + web3.LAMPORTS_PER_SOL, +); + +await connection.confirmTransaction({ signature: airdropSignature }); + +let allocateTransaction = new web3.Transaction({ + feePayer: payer.publicKey, +}); +let keys = [{ pubkey: keypair.publicKey, isSigner: true, isWritable: true }]; +let params = { space: 100 }; + +let allocateStruct = { + index: 8, + layout: struct([u32("instruction"), ns64("space")]), +}; + +let data = Buffer.alloc(allocateStruct.layout.span); +let layoutFields = Object.assign({ instruction: allocateStruct.index }, params); +allocateStruct.layout.encode(layoutFields, data); + +allocateTransaction.add( + new web3.TransactionInstruction({ + keys, + programId: web3.SystemProgram.programId, + data, + }), +); + +await web3.sendAndConfirmTransaction(connection, allocateTransaction, [ + payer, + keypair, +]); +``` diff --git a/content/docs/clients/meta.json b/content/docs/clients/meta.json new file mode 100644 index 000000000..0e32e8ffa --- /dev/null +++ b/content/docs/clients/meta.json @@ -0,0 +1,5 @@ +{ + "title": "Solana Clients", + "pages": ["rust", "javascript", "javascript-reference"], + "defaultOpen": true +} diff --git a/content/docs/clients/rust.mdx b/content/docs/clients/rust.mdx new file mode 100644 index 000000000..47617bfaa --- /dev/null +++ b/content/docs/clients/rust.mdx @@ -0,0 +1,52 @@ +--- +title: Rust +description: Learn how to use Solana's Rust crates for development. +h1: Rust Client for Solana +--- + +Solana's Rust crates are +[published to crates.io](https://crates.io/search?q=solana-) and can be found +[on docs.rs](https://docs.rs/releases/search?query=solana-) with the `solana-` +prefix. + + + +To quickly get started with Solana development and build your first Rust +program, take a look at these detailed quick start guides: + +- [Build and deploy your first Solana program using only your browser](/developers/guides/getstarted/hello-world-in-your-browser). + No installation needed. +- [Setup your local environment](/docs/intro/installation) and use the local + test validator. + + + +## Rust Crates + +The following are the most important and commonly used Rust crates for Solana +development: + +- [`solana-program`] — Imported by programs running on Solana, compiled to + SBF. This crate contains many fundamental data types and is re-exported from + [`solana-sdk`], which cannot be imported from a Solana program. + +- [`solana-sdk`] — The basic offchain SDK, it re-exports + [`solana-program`] and adds more APIs on top of that. Most Solana programs + that do not run on-chain will import this. + +- [`solana-client`] — For interacting with a Solana node via the + [JSON RPC API](/docs/rpc). + +- [`solana-cli-config`] — Loading and saving the Solana CLI configuration + file. + +- [`solana-clap-utils`] — Routines for setting up a CLI, using [`clap`], + as used by the main Solana CLI. Includes functions for loading all types of + signers supported by the CLI. + +[`solana-program`]: https://docs.rs/solana-program +[`solana-sdk`]: https://docs.rs/solana-sdk +[`solana-client`]: https://docs.rs/solana-client +[`solana-cli-config`]: https://docs.rs/solana-cli-config +[`solana-clap-utils`]: https://docs.rs/solana-clap-utils +[`clap`]: https://docs.rs/clap diff --git a/content/docs/core/accounts.mdx b/content/docs/core/accounts.mdx new file mode 100644 index 000000000..f11421de4 --- /dev/null +++ b/content/docs/core/accounts.mdx @@ -0,0 +1,195 @@ +--- +title: Solana Account Model +description: + Learn about Solana's account model, including how accounts store data and + programs, rent mechanics, account ownership, and the relationship between + programs and data accounts. Understand the core concepts of Solana's key-value + storage system. +--- + +On Solana, all data is stored in what are referred to as "accounts”. The way +data is organized on Solana resembles a +[key-value store](https://en.wikipedia.org/wiki/Key%E2%80%93value_database), +where each entry in the database is called an "account". + +![Accounts](/assets/docs/core/accounts/accounts.svg) + +## Key Points + +- Accounts can store up to 10MB of data, which can consist of either executable + program code or program state. + +- Accounts require a rent deposit in SOL, proportional to the amount of data + stored, which is fully refundable when the account is closed. + +- Every account has a program "owner". Only the program that owns an account can + modify its data or deduct its lamport balance. However, anyone can increase + the balance. + +- Programs (smart contracts) are stateless accounts that store executable code. + +- Data accounts are created by programs to store and manage program state. + +- Native programs are built-in programs included with the Solana runtime. + +- Sysvar accounts are special accounts that store network cluster state. + +## Account + +Each account is identifiable by its unique address, represented as 32 bytes in +the format of an [Ed25519](https://ed25519.cr.yp.to/) `PublicKey`. You can think +of the address as the unique identifier for the account. + +![Account Address](/assets/docs/core/accounts/account-address.svg) + +This relationship between the account and its address can be thought of as a +key-value pair, where the address serves as the key to locate the corresponding +on-chain data of the account. + +### AccountInfo + +Accounts have a +[max size of 10MB](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/system_instruction.rs#L85) +(10 Mega Bytes) and the data stored on every account on Solana has the following +structure known as the +[AccountInfo](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/account_info.rs#L19). + +![AccountInfo](/assets/docs/core/accounts/accountinfo.svg) + +The `AccountInfo` for each account includes the following fields: + +- `data`: A byte array that stores the state of an account. If the account is a + program (smart contract), this stores executable program code. This field is + often referred to as the "account data". +- `executable`: A boolean flag that indicates if the account is a program. +- `lamports`: A numeric representation of the account's balance in + [lamports](/docs/terminology#lamport), the smallest unit of SOL (1 SOL = 1 + billion lamports). +- `owner`: Specifies the public key (program ID) of the program that owns the + account. + +As a key part of the Solana Account Model, every account on Solana has a +designated "owner", specifically a program. Only the program designated as the +owner of an account can modify the data stored on the account or deduct the +lamport balance. It's important to note that while only the owner may deduct the +balance, anyone can increase the balance. + +> To store data on-chain, a certain amount of SOL must be transferred to an +> account. The amount transferred is proportional to the size of the data stored +> on the account. This concept is commonly referred to as “rent”. However, you +> can think of "rent" more like a "deposit" because the SOL allocated to an +> account can be fully recovered when the account is closed. + +## Native Programs + +Solana contains a small handful of native programs that are part of the +validator implementation and provide various core functionalities for the +network. You can find the full list of native programs +[here](https://docs.anza.xyz/runtime/programs). + +When developing custom programs on Solana, you will commonly interact with two +native programs, the System Program and the BPF Loader. + +### System Program + +By default, all new accounts are owned by the +[System Program](https://github.com/solana-labs/solana/tree/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/system/src). +The System Program performs several key tasks such as: + +- [New Account Creation](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/system/src/system_processor.rs#L145): + Only the System Program can create new accounts. +- [Space Allocation](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/system/src/system_processor.rs#L70): + Sets the byte capacity for the data field of each account. +- [Assign Program Ownership](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/system/src/system_processor.rs#L112): + Once the System Program creates an account, it can reassign the designated + program owner to a different program account. This is how custom programs take + ownership of new accounts created by the System Program. + +On Solana, a "wallet" is simply an account owned by the System Program. The +lamport balance of the wallet is the amount of SOL owned by the account. + +![System Account](/assets/docs/core/accounts/system-account.svg) + +> Only accounts owned by the System Program can be used as transaction fee +> payers. + +### BPFLoader Program + +The +[BPF Loader](https://github.com/solana-labs/solana/tree/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src) +is the program designated as the "owner" of all other programs on the network, +excluding Native Programs. It is responsible for deploying, upgrading, and +executing custom programs. + +## Sysvar Accounts + +Sysvar accounts are special accounts located at predefined addresses that +provide access to cluster state data. These accounts are dynamically updated +with data about the network cluster. You can find the full list of Sysvar +Accounts [here](https://docs.anza.xyz/runtime/sysvars). + +## Custom Programs + +On Solana, “smart contracts” are referred to as +[programs](/docs/core/programs). A program is an account that contains +executable code and is indicated by an “executable” flag that is set to true. + +For a more detailed explanation of the program deployment process, refer to the +[Deploying Programs](/docs/programs/deploying) page of this documentation. + +### Program Account + +When new programs are +[deployed](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src/lib.rs#L498) +on Solana, technically three separate accounts are created: + +- **Program Account**: The main account representing an on-chain program. This + account stores the address of an executable data account (which stores the + compiled program code) and the update authority for the program (address + authorized to make changes to the program). +- **Program Executable Data Account**: An account that contains the executable + byte code of the program. +- **Buffer Account**: A temporary account that stores byte code while a program + is being actively deployed or upgraded. Once the process is complete, the data + is transferred to the Program Executable Data Account and the buffer account + is closed. + +For example, here are links to the Solana Explorer for the Token Extensions +[Program Account](https://explorer.solana.com/address/TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb) +and its corresponding +[Program Executable Data Account](https://explorer.solana.com/address/DoU57AYuPFu2QU514RktNPG22QhApEjnKxnBcu4BHDTY). + +![Program and Executable Data Accounts](/assets/docs/core/accounts/program-account-expanded.svg) + +For simplicity, you can think of the "Program Account" as the program itself. + +![Program Account](/assets/docs/core/accounts/program-account-simple.svg) + +> The address of the "Program Account" is commonly referred to as the “Program +> ID”, which is used to invoke the program. + +### Data Account + +Solana programs are "stateless", meaning that program accounts only contain the +program's executable byte code. To store and modify additional data, new +accounts must be created. These accounts are commonly referred to as “data +accounts”. + +Data accounts can store any arbitrary data as defined in the owner program's +code. + +![Data Account](/assets/docs/core/accounts/data-account.svg) + +Note that only the [System Program](/docs/core/accounts#system-program) can +create new accounts. Once the System Program creates an account, it can then +transfer ownership of the new account to another program. + +In other words, creating a data account for a custom program requires two steps: + +1. Invoke the System Program to create an account, which then transfers + ownership to a custom program +2. Invoke the custom program, which now owns the account, to then initialize the + account data as defined in the program code + +This data account creation process is often abstracted as a single step, but +it's helpful to understand the underlying process. diff --git a/content/docs/core/clusters.mdx b/content/docs/core/clusters.mdx new file mode 100644 index 000000000..30d140635 --- /dev/null +++ b/content/docs/core/clusters.mdx @@ -0,0 +1,168 @@ +--- +title: Clusters & Endpoints +description: + Learn about Solana's network clusters (Devnet, Testnet, and Mainnet Beta), + their public RPC endpoints, rate limits, and use cases. Learn how to connect + to different Solana networks for development, testing, and production. +h1: Clusters and Public RPC Endpoints +--- + +The Solana blockchain has several different groups of validators, known as +[Clusters](/docs/core/clusters). Each serving different purposes within the +overall ecosystem and containing dedicated API nodes to fulfill +[JSON-RPC](/docs/rpc/) requests for their respective Cluster. + +The individual nodes within a Cluster are owned and operated by third parties, +with a public endpoint available for each. + +## Solana public RPC endpoints + +The Solana Labs organization operates a public RPC endpoint for each Cluster. +Each of these public endpoints are subject to rate limits, but are available for +users and developers to interact with the Solana blockchain. + +> Public endpoint rate limits are subject to change. The specific rate limits +> listed on this document are not guaranteed to be the most up-to-date. + +### Using explorers with different Clusters + +Many of the popular Solana blockchain explorers support selecting any of the +Clusters, often allowing advanced users to add a custom/private RPC endpoint as +well. + +An example of some of these Solana blockchain explorers include: + +- [http://explorer.solana.com/](https://explorer.solana.com/). +- [http://solana.fm/](https://solana.fm/). +- [http://solscan.io/](https://solscan.io/). +- [http://solanabeach.io/](http://solanabeach.io/). +- [http://validators.app/](http://validators.app/). + +## On a high level + +- Mainnet: Live production environment for deployed applications. +- Devnet: Testing with public accessibility for developers experimenting with + their applications. +- Testnet: Stress-testing for network upgrades and validator performance. + +**Example use cases**: You may want to debug a new program on Devnet or verify +performance metrics on Testnet before Mainnet deployment. + +| **Cluster** | **Endpoint** | **Purpose** | **Notes** | +| ----------- | ------------------------------------- | ------------------------------ | ------------------------------ | +| Mainnet | `https://api.mainnet-beta.solana.com` | Live production environment | Requires SOL for transactions | +| Devnet | `https://api.devnet.solana.com` | Public testing and development | Free SOL airdrop for testing | +| Testnet | `https://api.testnet.solana.com` | Validator and stress testing | May have intermittent downtime | + +## Devnet + +Devnet serves as a playground for anyone who wants to take Solana for a test +drive, as a user, token holder, app developer, or validator. + +- Application developers should target Devnet. +- Potential validators should first target Devnet. +- Key differences between Devnet and Mainnet Beta: + - Devnet tokens are **not real** + - Devnet includes a token faucet for airdrops for application testing + - Devnet may be subject to ledger resets + - Devnet typically runs the same software release branch version as Mainnet + Beta, but may run a newer minor release version than Mainnet Beta. +- Gossip entrypoint for Devnet: `entrypoint.devnet.solana.com:8001` + +### Devnet endpoint + +- `https://api.devnet.solana.com` - single Solana Labs hosted API node; + rate-limited + +#### Example `solana` command-line configuration + +To connect to the `devnet` Cluster using the Solana CLI: + +```shell +solana config set --url https://api.devnet.solana.com +``` + +### Devnet rate limits + +- Maximum number of requests per 10 seconds per IP: 100 +- Maximum number of requests per 10 seconds per IP for a single RPC: 40 +- Maximum concurrent connections per IP: 40 +- Maximum connection rate per 10 seconds per IP: 40 +- Maximum amount of data per 30 second: 100 MB + +## Testnet + +Testnet is where the Solana core contributors stress test recent release +features on a live cluster, particularly focused on network performance, +stability and validator behavior. + +- Testnet tokens are **not real** +- Testnet may be subject to ledger resets. +- Testnet includes a token faucet for airdrops for application testing +- Testnet typically runs a newer software release branch than both Devnet and + Mainnet Beta +- Gossip entrypoint for Testnet: `entrypoint.testnet.solana.com:8001` + +### Testnet endpoint + +- `https://api.testnet.solana.com` - single Solana Labs API node; rate-limited + +#### Example `solana` command-line configuration + +To connect to the `testnet` Cluster using the Solana CLI: + +```shell +solana config set --url https://api.testnet.solana.com +``` + +### Testnet rate limits + +- Maximum number of requests per 10 seconds per IP: 100 +- Maximum number of requests per 10 seconds per IP for a single RPC: 40 +- Maximum concurrent connections per IP: 40 +- Maximum connection rate per 10 seconds per IP: 40 +- Maximum amount of data per 30 second: 100 MB + +## Mainnet beta + +A permissionless, persistent cluster for Solana users, builders, validators and +token holders. + +- Tokens that are issued on Mainnet Beta are **real** SOL +- Gossip entrypoint for Mainnet Beta: `entrypoint.mainnet-beta.solana.com:8001` + +### Mainnet beta endpoint + +- `https://api.mainnet-beta.solana.com` - Solana Labs hosted API node cluster, + backed by a load balancer; rate-limited + +#### Example `solana` command-line configuration + +To connect to the `mainnet-beta` Cluster using the Solana CLI: + +```shell +solana config set --url https://api.mainnet-beta.solana.com +``` + +### Mainnet beta rate limits + +- Maximum number of requests per 10 seconds per IP: 100 +- Maximum number of requests per 10 seconds per IP for a single RPC: 40 +- Maximum concurrent connections per IP: 40 +- Maximum connection rate per 10 seconds per IP: 40 +- Maximum amount of data per 30 seconds: 100 MB + +> The public RPC endpoints are not intended for production applications. Please +> use dedicated/private RPC servers when you launch your application, drop NFTs, +> etc. The public services are subject to abuse and rate limits may change +> without prior notice. Likewise, high-traffic websites may be blocked without +> prior notice. + +## Common HTTP Error Codes + +- 403 -- Your IP address or website has been blocked. It is time to run your own + RPC server(s) or find a private service. +- 429 -- Your IP address is exceeding the rate limits. Slow down! Use the + [Retry-After](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After) + HTTP response header to determine how long to wait before making another + request. diff --git a/content/docs/core/cpi.mdx b/content/docs/core/cpi.mdx new file mode 100644 index 000000000..eef396ee2 --- /dev/null +++ b/content/docs/core/cpi.mdx @@ -0,0 +1,133 @@ +--- +title: Cross Program Invocation +description: + Learn about Cross Program Invocation (CPI) on Solana - how programs can call + instructions on other programs, handle PDA signers, and compose functionality + across the Solana network. +h1: Cross Program Invocation (CPI) +--- + +A Cross Program Invocation (CPI) refers to when one program invokes the +instructions of another program. This mechanism allows for the composability of +Solana programs. + +You can think of instructions as API endpoints that a program exposes to the +network and a CPI as one API internally invoking another API. + +![Cross Program Invocation](/assets/docs/core/cpi/cpi.svg) + +When a program initiates a Cross Program Invocation (CPI) to another program: + +- The signer privileges from the initial transaction invoking the caller program + (A) extend to the callee (B) program +- The callee (B) program can make further CPIs to other programs, up to a + maximum depth of 4 (ex. B->C, C->D) +- The programs can "sign" on behalf of the [PDAs](/docs/core/pda) derived + from its program ID + +> The Solana program runtime defines a constant called +> [`max_invoke_stack_height`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/program-runtime/src/compute_budget.rs#L31-L35), +> which is set to a +> [value of 5](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/program-runtime/src/compute_budget.rs#L138). +> This represents the maximum height of the program instruction invocation +> stack. The stack height begins at 1 for transaction instructions, increases by +> 1 each time a program invokes another instruction. This setting effectively +> limits invocation depth for CPIs to 4. + +## Key Points + +- CPIs enable Solana program instructions to directly invoke instructions on + another program. + +- Signer privileges from a caller program are extended to the callee program. + +- When making a CPI, programs can "sign" on behalf of PDAs derived from their + own program ID. + +- The callee program can make additional CPIs to other programs, up to a maximum + depth of 4. + +## How to write a CPI + +Writing an instruction for a CPI follows the same pattern as building an +[instruction](/docs/core/transactions#instruction) to add to a transaction. +Under the hood, each CPI instruction must specify the following information: + +- **Program address**: Specifies the program being invoked +- **Accounts**: Lists every account the instruction reads from or writes to, + including other programs +- **Instruction Data**: Specifies which instruction on the program to invoke, + plus any additional data required by the instruction (function arguments) + +Depending on the program you are making the call to, there may be crates +available with helper functions for building the instruction. Programs then +execute CPIs using either one of the following functions from the +`solana_program` crate: + +- `invoke` - used when there are no PDA signers +- `invoke_signed` - used when the caller program needs to sign with a PDA + derived from its program ID + +### Basic CPI + +The +[`invoke`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/program.rs#L132) +function is used when making a CPI that does not require PDA signers. When +making CPIs, signers provided to the caller program automatically extend to the +callee program. + +```rust +pub fn invoke( + instruction: &Instruction, + account_infos: &[AccountInfo<'_>] +) -> Result<(), ProgramError> +``` + +Here is an example program on +[Solana Playground](https://beta.solpg.io/github.com/ZYJLiu/doc-examples/tree/main/cpi-invoke) +that makes a CPI using the `invoke` function to call the transfer instruction on +the System Program. You can also reference the +[Basic CPI guide](/developers/guides/getstarted/how-to-cpi) for further details. + +### CPI with PDA Signer + +The +[`invoke_signed`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/program.rs#L247) +function is used when making a CPI that requires PDA signers. The seeds used to +derive the signer PDAs are passed into the `invoke_signed` function as +`signer_seeds`. + +You can reference the [Program Derived Address](/docs/core/pda) page for +details on how PDAs are derived. + +```rust +pub fn invoke_signed( + instruction: &Instruction, + account_infos: &[AccountInfo<'_>], + signers_seeds: &[&[&[u8]]] +) -> Result<(), ProgramError> +``` + +The runtime uses the privileges granted to the caller program to determine what +privileges can be extended to the callee. Privileges in this context refer to +signers and writable accounts. For example, if the instruction the caller is +processing contains a signer or writable account, then the caller can invoke an +instruction that also contains that signer and/or writable account. + +While PDAs have [no private keys](/docs/core/pda#what-is-a-pda), they can +still act as a signer in an instruction via a CPI. To verify that a PDA is +derived from the calling program, the seeds used to generate the PDA must be +included as `signers_seeds`. + +When the CPI is processed, the Solana runtime +[internally calls `create_program_address`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src/syscalls/cpi.rs#L550) +using the `signers_seeds` and the `program_id` of the calling program. If a +valid PDA is found, the address is +[added as a valid signer](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src/syscalls/cpi.rs#L552). + +Here is an example program on +[Solana Playground](https://beta.solpg.io/github.com/ZYJLiu/doc-examples/tree/main/cpi-invoke-signed) +that makes a CPI using the `invoke_signed` function to call the transfer +instruction on the System Program with a PDA signer. You can reference the +[CPI with PDA Signer guide](/developers/guides/getstarted/how-to-cpi-with-signer) +for further details. diff --git a/content/docs/core/fees.mdx b/content/docs/core/fees.mdx new file mode 100644 index 000000000..2601ddfc1 --- /dev/null +++ b/content/docs/core/fees.mdx @@ -0,0 +1,495 @@ +--- +title: Fees on Solana +description: + Learn about Solana's fee structure including transaction fees, prioritization + fees, and rent costs. Understand how fees are calculated, collected and + distributed across the network. +keywords: + - instruction fee + - processing fee + - storage fee + - rent + - gas + - gwei +altRoutes: + - /docs/core/rent + - /docs/intro/rent + - /docs/intro/transaction_fees + - /docs/intro/transaction-fees + - /docs/core/runtime +--- + +The Solana blockchain has a few different types of fees and costs that are +incurred to use the permissionless network. These can be segmented into a few +specific types: + +- Transaction Fees - A fee to have validators process transactions/instructions +- Prioritization Fees - An optional fee to boost transactions processing order +- Rent - A withheld balance to keep data stored on-chain + +## Transaction Fees + +The small fee paid to process logic (instruction) within an on-chain program on +the Solana blockchain is known as a "_transaction fee_". + +As each [transaction](/docs/core/transactions#transaction) (which contains +one or more [instructions](/docs/core/transactions#instruction)) is sent +through the network, it gets processed by the current validator leader. Once +confirmed as a global state transaction, this _transaction fee_ is paid to the +network to help support the economic design of the Solana blockchain. + +> Transaction fees are different from account data storage deposit fee of +> [rent](#rent). While transaction fees are paid to process instructions on the +> Solana network, a rent deposit is withheld in an account to store its data on +> the blockchain and reclaimable. + +Currently, the base Solana transaction fee is set at a static value of 5k +lamports per signature. On top of this base fee, any additional +[prioritization fees](#prioritization-fee) can be added. + +### Why pay transaction fees? + +Transaction fees offer many benefits in the Solana +[economic design](#basic-economic-design), mainly they: + +- provide compensation to the validator network for the expended CPU/GPU compute + resources necessary to process transactions +- reduce network spam by introducing a real cost to transactions +- provide long-term economic stability to the network through a + protocol-captured minimum fee amount per transaction + +### Basic economic design + +Many blockchain networks (including Bitcoin and Ethereum), rely on inflationary +_protocol-based rewards_ to secure the network in the short-term. Over the +long-term, these networks will increasingly rely on _transaction fees_ to +sustain security. + +The same is true on Solana. Specifically: + +- A fixed proportion (initially 50%) of each transaction fee is _burned_ + (destroyed), with the remaining going to the current + [leader](/docs/terminology#leader) processing the transaction. +- A scheduled global inflation rate provides a source for + [rewards](https://docs.anza.xyz/implemented-proposals/staking-rewards) + distributed to [Solana Validators](https://docs.anza.xyz/operations). + +### Fee collection + +Transactions are required to have at least one account which has signed the +transaction and is writable. These _writable signer accounts_ are serialized +first in the list of accounts and the first of these is always used as the "_fee +payer_". + +Before any transaction instructions are processed, the fee payer account +[balance will be deducted](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/runtime/src/bank.rs#L4045-L4064) +to pay for transaction fees. If the fee payer balance is not sufficient to cover +transaction fees, the transaction processing will halt and result in a failed +transaction. + +If the balance was sufficient, the fees will be deducted and the transaction's +instructions will begin execution. Should any of the instructions result in an +error, transaction processing will halt and ultimately be recorded as a failed +transaction in the Solana ledger. The fee is still collected by the runtime for +these failed transactions. + +Should any of the instructions return an error or violate runtime restrictions, +all account changes **_except_** the transaction fee deduction will be rolled +back. This is because the validator network has already expended computational +resources to collect transactions and begin the initial processing. + +### Fee distribution + +Transaction fees are +[partially burned](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/runtime/src/bank/fee_distribution.rs#L55-L64) +and the remaining fees are collected by the validator that produced the block +that the corresponding transactions were included in. Specifically, +[50% are burned](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/sdk/program/src/fee_calculator.rs#L79) +and +[50% percent are distributed](https://github.com/anza-xyz/agave/blob/e621336acad4f5d6e5b860eaa1b074b01c99253c/runtime/src/bank/fee_distribution.rs#L58-L62) +to the validator that produced the block. + +### Why burn some fees? + +As mentioned above, a fixed proportion of each transaction fee is _burned_ +(destroyed). This is intended to cement the economic value of SOL and thus +sustain the network's security. Unlike a scheme where transactions fees are +completely burned, leaders are still incentivized to include as many +transactions as possible in their slots (opportunity to create a block). + +Burnt fees can also help prevent malicious validators from censoring +transactions by being considered in [fork](/docs/terminology#fork) selection. + +#### Example of an attack: + +In the case of a +[Proof of History (PoH)](/docs/terminology#proof-of-history-poh) fork with a +malicious or censoring leader: + +- due to the fees lost from censoring, we would expect the total fees burned to + be **_less than_** a comparable honest fork +- if the censoring leader is to compensate for these lost protocol fees, they + would have to replace the burnt fees on their fork themselves +- thus potentially reducing the incentive to censor in the first place + +### Calculating transaction fees + +The complete fee for a given transaction is calculated based on two main parts: + +- a statically set base fee per signature, and +- the computational resources used during the transaction, measured in + "[_compute units_](/docs/terminology#compute-units)" + +Since each transaction may require a different amount of computational +resources, each is allotted a maximum number of _compute units_ per transaction +as part of the _compute budget_. + +## Compute Budget + +To prevent abuse of computational resources, each transaction is allocated a +"_compute budget_". This budget specifies details about +[compute units](#compute-units) and includes: + +- the compute costs associated with different types of operations the + transaction may perform (compute units consumed per operation), +- the maximum number of compute units that a transaction can consume (compute + unit limit), +- and the operational bounds the transaction must adhere to (like account data + size limits) + +When the transaction consumes its entire compute budget (compute budget +exhaustion), or exceeds a bound such as attempting to exceed the +[max call stack depth](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget.rs#L138) +or [max loaded account](#accounts-data-size-limit) data size limit, the runtime +halts the transaction processing and returns an error. Resulting in a failed +transaction and no state changes (aside from the transaction fee being +[collected](#fee-collection)). + +### Accounts data size limit + +A transaction may specify the maximum bytes of account data it is allowed to +load by including a `SetLoadedAccountsDataSizeLimit` instruction (not to exceed +the runtime's absolute max). If no `SetLoadedAccountsDataSizeLimit` is provided, +the transaction defaults to use the runtime's +[`MAX_LOADED_ACCOUNTS_DATA_SIZE_BYTES`](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget_processor.rs#L137-L139) +value. + +The `ComputeBudgetInstruction::set_loaded_accounts_data_size_limit` function can +be used to create this instruction: + +```rust +let instruction = ComputeBudgetInstruction::set_loaded_accounts_data_size_limit(100_000); +``` + +### Compute units + +All the operations performed on-chain within a transaction require different +amounts of computation resources be expended by validators when processing +(compute cost). The smallest unit of measure for the consumption of these +resources is called a _"compute unit"_. + +As a transaction is processed, compute units are incrementally consumed by each +of its instructions being executed on-chain (consuming the budget). Since each +instruction is executing different logic (writing to accounts, cpi, performing +syscalls, etc), each may consume a +[different amount](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget.rs#L133-L178) +of compute units. + +> A program can log details about its compute usage, including how much remains +> in its allotted compute budget. You can also find more information in this +> guide for +> [optimizing your compute usage](/developers/guides/advanced/how-to-optimize-compute). + +Each transaction is allotted a [compute unit limit](#compute-unit-limit), either +with the default limit set by the runtime or by explicitly requesting a higher +limit. After a transaction exceeds its compute unit limit, its processing is +halted resulting in a transaction failure. + +The following are some common operations that incur a compute cost: + +- executing instructions +- passing data between programs +- performing syscalls +- using sysvars +- logging with the `msg!` macro +- logging pubkeys +- creating program addresses (PDAs) +- cross-program invocations (CPI) +- cryptographic operations + +> For [cross-program invocations](/docs/core/cpi), the instruction invoked +> inherits the compute budget and limits of their parent. If an invoked +> instruction consumes the transaction's remaining budget, or exceeds a bound, +> the entire invocation chain and the top level transaction processing are +> halted. + +You can find more details about all the operations that consume compute units +within the Solana runtime's +[`ComputeBudget`](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget.rs#L19-L123). + +### Compute unit limit + +Each transaction has a maximum number of compute units (CU) it can consume +called the _"compute unit limit"_. Per transaction, the Solana runtime has an +absolute max compute unit limit of +[1.4 million CU](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget_processor.rs#L19) +and sets a default requested max limit of +[200k CU per instruction](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget_processor.rs#L18). + +A transaction can request a more specific and optimal compute unit limit by +including a single `SetComputeUnitLimit` instruction. Either a higher or lower +limit. But it may never request higher than the absolute max limit per +transaction. + +While a transaction's default compute unit limit will work in most cases for +simple transactions, they are often less than optimal (both for the runtime and +the user). For more complex transactions, like invoking programs that perform +multiple CPIs, you may need to request a higher compute unit limit for the +transaction. + +Requesting the optimal compute unit limits for your transaction is essential to +help you pay less for your transaction and to help schedule your transaction +better on the network. Wallets, dApps, and other services should ensure their +compute unit requests are optimal to provide the best experience possible for +their users. + +> For more details and best practices, read this guide on +> [requesting optimal compute limits](/developers/guides/advanced/how-to-request-optimal-compute). + +### Compute unit price + +When a transaction desires to pay a higher fee to boost its processing +prioritization, it can set a _"compute unit price"_. This price, used in +combination with [compute unit limit](#compute-unit-limit), will be used to +determine a transaction's prioritization fee. + +By default, there is +[no compute unit price set](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget_processor.rs#L38) +resulting in no additional prioritization fee. + +## Prioritization Fees + +As part of the [Compute Budget](#compute-budget), the runtime supports +transactions paying an **optional** fee known as a _"prioritization fee"_. +Paying this additional fee helps boost how a transaction is prioritized against +others when processing, resulting in faster execution times. + +### How the prioritization fee is calculated + +A transaction's prioritization fee is calculated by multiplying its **_compute +unit limit_** by the **_compute unit price_** (measured in _micro-lamports_). +These values can be set once per transaction by including the following Compute +Budget instructions: + +- [`SetComputeUnitLimit`](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/sdk/src/compute_budget.rs#L47-L50) - + setting the maximum number of compute units the transaction can consume +- [`SetComputeUnitPrice`](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/sdk/src/compute_budget.rs#L52-L55) - + setting the desired additional fee the transaction is willing to pay to boost + its prioritization + +If no `SetComputeUnitLimit` instruction is provided, the +[default compute unit limit](#compute-unit-limit) will be used. + +If no `SetComputeUnitPrice` instruction is provided, the transaction will +default to no additional elevated fee and the lowest priority (i.e. no +prioritization fee). + +### How to set the prioritization fee + +A transaction's prioritization fee is set by including a `SetComputeUnitPrice` +instruction, and optionally a `SetComputeUnitLimit` instruction. The runtime +will use these values to calculate the prioritization fee, which will be used to +prioritize the given transaction within the block. + +You can craft each of these instructions via their Rust or `@solana/web3.js` +functions. Each instruction can then be included in the transaction and sent to +the cluster like normal. See also the +[best practices](#prioritization-fee-best-practices) below. + +Unlike other instructions inside a Solana transaction, Compute Budget +instructions do **NOT** require any accounts. A transaction with multiple of +either of the instructions will fail. + + + +Transactions can only contain **one of each type** of compute budget +instruction. Duplicate instruction types will result in an +[`TransactionError::DuplicateInstruction`](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/sdk/src/transaction/error.rs#L143-L145) +error, and ultimately transaction failure. + + + +#### Rust + +The rust `solana-sdk` crate includes functions within +[`ComputeBudgetInstruction`](https://docs.rs/solana-sdk/latest/solana_sdk/compute_budget/enum.ComputeBudgetInstruction.html) +to craft instructions for setting the _compute unit limit_ and _compute unit +price_: + +```rust +let instruction = ComputeBudgetInstruction::set_compute_unit_limit(300_000); +``` + +```rust +let instruction = ComputeBudgetInstruction::set_compute_unit_price(1); +``` + +#### Javascript + +The `@solana/web3.js` library includes functions within the +[`ComputeBudgetProgram`](https://solana-labs.github.io/solana-web3.js/v1.x/classes/ComputeBudgetProgram.html) +class to craft instructions for setting the _compute unit limit_ and _compute +unit price_: + +```js +const instruction = ComputeBudgetProgram.setComputeUnitLimit({ + units: 300_000, +}); +``` + +```js +const instruction = ComputeBudgetProgram.setComputeUnitPrice({ + microLamports: 1, +}); +``` + +### Prioritization fee best practices + +Below you can find general information on the best practices for prioritization +fees. You can also find more detailed information in this guide on +[how to request optimal compute](/developers/guides/advanced/how-to-request-optimal-compute), +including how to simulate a transaction to determine its approximate compute +usage. + +#### Request the minimum compute units + +Transactions should request the minimum amount of compute units required for +execution to minimize fees. Also note that fees are not adjusted when the number +of requested compute units exceeds the number of compute units actually consumed +by an executed transaction. + +#### Get recent prioritization fees + +Prior to sending a transaction to the cluster, you can use the +[`getRecentPrioritizationFees`](/docs/rpc/http/getrecentprioritizationfees) +RPC method to get a list of the recent paid prioritization fees within the +recent blocks processed by the node. + +You could then use this data to estimate an appropriate prioritization fee for +your transaction to both (a) better ensure it gets processed by the cluster and +(b) minimize the fees paid. + +## Rent + +The fee deposited into every [Solana Account](/docs/core/accounts) to keep +its associated data available on-chain is called "_rent_". This fee is withheld +in the normal lamport balance on every account and reclaimable when the account +is closed. + +> Rent is different from [transaction fees](#transaction-fees). Rent is "paid" +> (withheld in an Account) to keep data stored on the Solana blockchain and can +> be reclaimed. Whereas transaction fees are paid to process +> [instructions](/docs/core/transactions#instructions) on the network. + +All accounts are required to maintain a high enough lamport balance (relative to +its allocated space) to become [rent exempt](#rent-exempt) and remain on the +Solana blockchain. Any transaction that attempts to reduce an account's balance +below its respective minimum balance for rent exemption will fail (unless the +balance is reduced to exactly zero). + +When an account's owner no longer desires to keep this data on-chain and +available in the global state, the owner can close the account and reclaim the +rent deposit. + +This is accomplished by withdrawing (transferring) the account's entire lamport +balance to another account (i.e. your wallet). By reducing the account's balance +to exactly `0`, the runtime will remove the account and its associated data from +the network in the process of _"[garbage collection](#garbage-collection)"_. + +### Rent rate + +The Solana rent rate is set on a network wide basis, primarily based on a +runtime set +"[lamports _per_ byte _per_ year](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/sdk/program/src/rent.rs#L27-L34)". +Currently, the rent rate is a static amount and stored in the +[Rent sysvar](https://docs.anza.xyz/runtime/sysvars#rent). + +This rent rate is used to calculate the exact amount of rent required to be +withheld inside an account for the space allocated to the account (i.e. the +amount of data that can be stored in the account). The more space an account +allocates, the higher the withheld rent deposit will be. + +### Rent exempt + +Accounts must maintain a lamport balance greater than the minimum required to +store its respective data on-chain. This is called "_rent exempt_" and that +balance is called the "_minimum balance for rent exemption_". + +> New accounts (and programs) on Solana are **REQUIRED** to be initialized with +> enough lamports to become _rent exempt_. This was not always the case. +> Previously, the runtime would periodically and automatically collect a fee +> from each account below its _minimum balance for rent exemption_. Eventually +> reducing those accounts to a balance of zero and garbage collecting them from +> the global state (unless manually topped up). + +In the process of creating a new account, you must ensure you deposit enough +lamports to be above this minimum balance. Anything lower that this minimum +threshold will result in a failed transaction. + +Every time an account's balance is reduced, the runtime performs a check to see +if the account will still be above this minimum balance for rent exemption. +Unless they reduce the final balance to exactly `0` (closing the account), +transactions that would cause an account's balance to drop below the rent exempt +threshold will fail. + +The specific minimum balance for an account to become rent exempt is dependant +on the blockchain's current [rent rate](#rent-rate) and the desired amount of +storage space an account wants to allocate (account size). Therefore, it is +recommended to use the +[`getMinimumBalanceForRentExemption`](/docs/rpc/http/getminimumbalanceforrentexemption) +RPC endpoint to calculate the specific balance for a given account size. + +The required rent deposit amount can also be estimated via the +[`solana rent` CLI subcommand](https://docs.anza.xyz/cli/usage#solana-rent): + +```shell +solana rent 15000 + +# output +Rent per byte-year: 0.00000348 SOL +Rent per epoch: 0.000288276 SOL +Rent-exempt minimum: 0.10529088 SOL +``` + +### Garbage collection + +Accounts that do not maintain a lamport balance greater than zero are removed +from the network in a process known as _garbage collection_. This process is +done to help reduce the network wide storage of no longer used/maintained data. + +After a transaction successfully reduces an accounts balance to exactly `0`, +garbage collection happens automatically by the runtime. Any transaction that +attempts to reduce an accounts balance lower that its minimum balance for rent +exemption (that is not exactly zero) will fail. + + + +It's important to note that garbage collection happens **after** the transaction +execution is completed. If there is an instruction to "close" an account by +reducing the account balance to zero, the account can be "reopened" within the +same transaction via a later instruction. If the account state was not cleared +in the "close" instruction, the later "reopen" instruction will have the same +account state. It's a security concern, so it's good to know the exact timing +garbage collection takes effect. + + + +Even after an account has been removed from the network (via garbage +collection), it may still have transactions associated with it's address (either +past history or in the future). Even though a Solana block explorer may display +an "account not found" type of message, you may still be able to view +transaction history associated with that account. + +You can read the validator +[implemented proposal](https://docs.anza.xyz/implemented-proposals/persistent-account-storage#garbage-collection) +for garbage collection to learn more. diff --git a/content/docs/core/index.mdx b/content/docs/core/index.mdx new file mode 100644 index 000000000..c1a0f166a --- /dev/null +++ b/content/docs/core/index.mdx @@ -0,0 +1,116 @@ +--- +title: Core Concepts +description: + Learn essential Solana blockchain concepts including accounts, transactions, + programs, program derived addresses, cross program invocations, and how tokens + work on Solana. +--- + +Build a strong understanding of the core concepts that make Solana different +from other blockchains. Understanding the "Solana programming model" through +these core concepts is very important to maximize your success as a Solana +blockchain developer. + +## Solana Account Model + +On Solana, all data is stored in what are referred to as "accounts”. The way +data is organized on the Solana blockchain resembles a +[key-value store](https://en.wikipedia.org/wiki/Key%E2%80%93value_database), +where each entry in the database is called an "account". + +Learn more about [Accounts](/docs/core/accounts) here. + +## Transactions and Instructions + +On Solana, we send [transactions](/docs/core/transactions#transaction) to +interact with the network. Transactions include one or more +[instructions](/docs/core/transactions#instruction), each representing a +specific operation to be processed. The execution logic for instructions is +stored on [programs](/docs/core/programs) deployed to the Solana network, where +each program stores its own set of instructions. + +Learn more about [Transactions](/docs/core/transactions) and +[Instructions](/docs/core/transactions#instruction) here. + +## Fees on Solana + +The Solana blockchain has a few different types of fees and costs that are +incurred to use the permissionless network. These can be segmented into a few +specific types: + +- [Transaction Fees](/docs/core/fees#transaction-fees) - A fee to have + validators process transactions/instructions +- [Prioritization Fees](/docs/core/fees#prioritization-fees) - An optional + fee to boost transactions processing order +- [Rent](/docs/core/fees#rent) - A withheld balance to keep data stored + on-chain + +Learn more about [Fees on Solana](/docs/core/fees) here. + +## Programs on Solana + +In the Solana ecosystem, "smart contracts" are called programs. Each program is +an on-chain account that stores executable logic, organized into specific +functions referred to as _instructions_ and called via _instruction handler_ +functions within the respective deployed program. + +Learn more about [Programs on Solana](/docs/core/programs) here. + +## Program Derived Address + +Program Derived Addresses (PDAs) provide developers on Solana with two main use +cases: + +- **Deterministic Account Addresses**: PDAs provide a mechanism to + deterministically derive an address using a combination of optional "seeds" + (predefined inputs) and a specific program ID. +- **Enable Program Signing**: The Solana runtime enables programs to "sign" for + PDAs which are derived from its program ID. + +You can think of PDAs as a way to create hashmap-like structures on-chain from a +predefined set of inputs (e.g. strings, numbers, and other account addresses). + +Learn more about [Program Derived Address](/docs/core/pda) here. + +## Cross Program Invocation + +A Cross Program Invocation (CPI) refers to when one program invokes the +instructions of another program. This mechanism allows for the composability of +Solana programs. + +You can think of instructions as API endpoints that a program exposes to the +network and a CPI as one API internally invoking another API. + +Learn more about [Cross Program Invocation](/docs/core/cpi) here. + +## Tokens on Solana + +Tokens are digital assets that represent ownership over diverse categories of +assets. Tokenization enables the digitalization of property rights, serving as a +fundamental component for managing both fungible and non-fungible assets. + +- Fungible Tokens represent interchangeable and divisible assets of the same + type and value (ex. USDC). +- Non-fungible Tokens (NFT) represent ownership of indivisible assets (e.g. + artwork). + +Learn more about [Tokens on Solana](/docs/core/tokens) here. + +## Clusters and Endpoints + +The Solana blockchain has several different groups of validators, known as +[Clusters](/docs/core/clusters). Each serving different purposes within the +overall ecosystem and containing dedicated api nodes to fulfill +[JSON-RPC](/docs/rpc/) requests for their respective Cluster. + +The individual nodes within a Cluster are owned and operated by third parties, +with a public endpoint available for each. + +There are three primary clusters on the Solana network, each with a different +public endpoint: + +- Mainnet - `https://api.mainnet-beta.solana.com` +- Devnet - `https://api.devnet.solana.com` +- Testnet - `https://api.testnet.solana.com` + +Learn more about [Clusters and Endpoints](/docs/core/clusters) here. diff --git a/content/docs/core/meta.es.json b/content/docs/core/meta.es.json new file mode 100644 index 000000000..483b65f92 --- /dev/null +++ b/content/docs/core/meta.es.json @@ -0,0 +1,14 @@ +{ + "title": "Conceptos Principales", + "pages": [ + "accounts", + "transactions", + "fees", + "programs", + "pda", + "cpi", + "tokens", + "clusters" + ], + "defaultOpen": true +} diff --git a/content/docs/core/meta.json b/content/docs/core/meta.json new file mode 100644 index 000000000..eb8b61b94 --- /dev/null +++ b/content/docs/core/meta.json @@ -0,0 +1,14 @@ +{ + "title": "Core Concepts", + "pages": [ + "accounts", + "transactions", + "fees", + "programs", + "pda", + "cpi", + "tokens", + "clusters" + ], + "defaultOpen": true +} diff --git a/content/docs/core/pda.mdx b/content/docs/core/pda.mdx new file mode 100644 index 000000000..88748c41d --- /dev/null +++ b/content/docs/core/pda.mdx @@ -0,0 +1,393 @@ +--- +title: Program Derived Address +description: + Learn about Program Derived Addresses (PDAs) on Solana - deterministic account + addresses that enable secure program signing. Understand PDA derivation, + canonical bumps, and how to create PDA accounts. +h1: Program Derived Address (PDA) +--- + +Program Derived Addresses (PDAs) provide developers on Solana with two main use +cases: + +- **Deterministic Account Addresses**: PDAs provide a mechanism to + deterministically derive an address using a combination of optional "seeds" + (predefined inputs) and a specific program ID. +- **Enable Program Signing**: The Solana runtime enables programs to "sign" for + PDAs which are derived from its program ID. + +You can think of PDAs as a way to create hashmap-like structures on-chain from a +predefined set of inputs (e.g. strings, numbers, and other account addresses). + +The benefit of this approach is that it eliminates the need to keep track of an +exact address. Instead, you simply need to recall the specific inputs used for +its derivation. + +![Program Derived Address](/assets/docs/core/pda/pda.svg) + +It's important to understand that simply deriving a Program Derived Address +(PDA) does not automatically create an on-chain account at that address. +Accounts with a PDA as the on-chain address must be explicitly created through +the program used to derive the address. You can think of deriving a PDA as +finding an address on a map. Just having an address does not mean there is +anything built at that location. + +> This section will cover the details of deriving PDAs. The details on how +> programs use PDAs for signing will be addressed in the section on +> [Cross Program Invocations (CPIs)](/docs/core/cpi) as it requires context +> for both concepts. + +## Key Points + +- PDAs are addresses derived deterministically using a combination of + user-defined seeds, a bump seed, and a program's ID. + +- PDAs are addresses that fall off the Ed25519 curve and have no corresponding + private key. + +- Solana programs can programmatically "sign" on behalf of PDAs that are derived + using its program ID. + +- Deriving a PDA does not automatically create an on-chain account. + +- An account using a PDA as its address must be explicitly created through a + dedicated instruction within a Solana program. + +## What is a PDA + +PDAs are addresses that are deterministically derived and look like standard +public keys, but have no associated private keys. This means that no external +user can generate a valid signature for the address. However, the Solana runtime +enables programs to programmatically "sign" for PDAs without needing a private +key. + +For context, Solana +[Keypairs](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/src/signer/keypair.rs#L25) +are points on the Ed25519 curve (elliptic-curve cryptography) which have a +public key and corresponding private key. We often use public keys as the unique +IDs for new on-chain accounts and private keys for signing. + +![On Curve Address](/assets/docs/core/pda/address-on-curve.svg) + +A PDA is a point that is intentionally derived to fall off the Ed25519 curve +using a predefined set of inputs. A point that is not on the Ed25519 curve does +not have a valid corresponding private key and cannot be used for cryptographic +operations (signing). + +A PDA can then be used as the address (unique identifier) for an on-chain +account, providing a method to easily store, map, and fetch program state. + +![Off Curve Address](/assets/docs/core/pda/address-off-curve.svg) + +## How to derive a PDA + +The derivation of a PDA requires 3 inputs. + +- **Optional seeds**: Predefined inputs (e.g. string, number, other account + addresses) used to derive a PDA. These inputs are converted to a buffer of + bytes. +- **Bump seed**: An additional input (with a value between 255-0) that is used + to guarantee that a valid PDA (off curve) is generated. This bump seed + (starting with 255) is appended to the optional seeds when generating a PDA to + "bump" the point off the Ed25519 curve. The bump seed is sometimes referred to + as a "nonce". +- **Program ID**: The address of the program the PDA is derived from. This is + also the program that can "sign" on behalf of the PDA + +![PDA Derivation](/assets/docs/core/pda/pda-derivation.svg) + +The examples below include links to Solana Playground, where you can run the +examples in an in-browser editor. + +### FindProgramAddress + +To derive a PDA, we can use the +[`findProgramAddressSync`](https://github.com/solana-labs/solana-web3.js/blob/ca9da583a39cdf8fd874a2e03fccdc849e29de34/packages/library-legacy/src/publickey.ts#L212) +method from [`@solana/web3.js`](https://www.npmjs.com/package/@solana/web3.js). +There are equivalents of this function in other programming languages (e.g. +[Rust](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/pubkey.rs#L484)), +but in this section, we will walk through examples using Javascript. + +When using the `findProgramAddressSync` method, we pass in: + +- the predefined optional seeds converted to a buffer of bytes, and +- the program ID (address) used to derive the PDA + +Once a valid PDA is found, `findProgramAddressSync` returns both the address +(PDA) and bump seed used to derive the PDA. + +The example below derives a PDA without providing any optional seeds. + +```ts /[]/ +import { PublicKey } from "@solana/web3.js"; + +const programId = new PublicKey("11111111111111111111111111111111"); + +const [PDA, bump] = PublicKey.findProgramAddressSync([], programId); + +console.log(`PDA: ${PDA}`); +console.log(`Bump: ${bump}`); +``` + +You can run this example on +[Solana Playground](https://beta.solpg.io/66031e5acffcf4b13384cfef). The PDA and +bump seed output will always be the same: + +``` +PDA: Cu7NwqCXSmsR5vgGA3Vw9uYVViPi3kQvkbKByVQ8nPY9 +Bump: 255 +``` + +The next example below adds an optional seed "helloWorld". + +```ts /string/ +import { PublicKey } from "@solana/web3.js"; + +const programId = new PublicKey("11111111111111111111111111111111"); +const string = "helloWorld"; + +const [PDA, bump] = PublicKey.findProgramAddressSync( + [Buffer.from(string)], + programId, +); + +console.log(`PDA: ${PDA}`); +console.log(`Bump: ${bump}`); +``` + +You can also run this example on +[Solana Playground](https://beta.solpg.io/66031ee5cffcf4b13384cff0). The PDA and +bump seed output will always be the same: + +``` +PDA: 46GZzzetjCURsdFPb7rcnspbEMnCBXe9kpjrsZAkKb6X +Bump: 254 +``` + +Note that the bump seed is 254. This means that 255 derived a point on the +Ed25519 curve, and is not a valid PDA. + +The bump seed returned by `findProgramAddressSync` is the first value (between +255-0) for the given combination of optional seeds and program ID that derives a +valid PDA. + +> This first valid bump seed is referred to as the "canonical bump". For program +> security, it is recommended to only use the canonical bump when working with +> PDAs. + +### CreateProgramAddress + +Under the hood, `findProgramAddressSync` will iteratively append an additional +bump seed (nonce) to the seeds buffer and call the +[`createProgramAddressSync`](https://github.com/solana-labs/solana-web3.js/blob/ca9da583a39cdf8fd874a2e03fccdc849e29de34/packages/library-legacy/src/publickey.ts#L168) +method. The bump seed starts with a value of 255 and is decreased by 1 until a +valid PDA (off curve) is found. + +You can replicate the previous example by using `createProgramAddressSync` and +explicitly passing in the bump seed of 254. + +```ts /bump/ +import { PublicKey } from "@solana/web3.js"; + +const programId = new PublicKey("11111111111111111111111111111111"); +const string = "helloWorld"; +const bump = 254; + +const PDA = PublicKey.createProgramAddressSync( + [Buffer.from(string), Buffer.from([bump])], + programId, +); + +console.log(`PDA: ${PDA}`); +``` + +Run this example above on +[Solana Playground](https://beta.solpg.io/66031f8ecffcf4b13384cff1). Given the +same seeds and program ID, the PDA output will match the previous one: + +``` +PDA: 46GZzzetjCURsdFPb7rcnspbEMnCBXe9kpjrsZAkKb6X +``` + +### Canonical Bump + +The "canonical bump" refers to the first bump seed (starting from 255 and +decrementing by 1) that derives a valid PDA. For program security, it is +recommended to only use PDAs derived from a canonical bump. + +Using the previous example as a reference, the example below attempts to derive +a PDA using every bump seed from 255-0. + +```ts +import { PublicKey } from "@solana/web3.js"; + +const programId = new PublicKey("11111111111111111111111111111111"); +const string = "helloWorld"; + +// Loop through all bump seeds for demonstration +for (let bump = 255; bump >= 0; bump--) { + try { + const PDA = PublicKey.createProgramAddressSync( + [Buffer.from(string), Buffer.from([bump])], + programId, + ); + console.log("bump " + bump + ": " + PDA); + } catch (error) { + console.log("bump " + bump + ": " + error); + } +} +``` + +Run the example on +[Solana Playground](https://beta.solpg.io/66032009cffcf4b13384cff2) and you +should see the following output: + +``` +bump 255: Error: Invalid seeds, address must fall off the curve +bump 254: 46GZzzetjCURsdFPb7rcnspbEMnCBXe9kpjrsZAkKb6X +bump 253: GBNWBGxKmdcd7JrMnBdZke9Fumj9sir4rpbruwEGmR4y +bump 252: THfBMgduMonjaNsCisKa7Qz2cBoG1VCUYHyso7UXYHH +bump 251: EuRrNqJAofo7y3Jy6MGvF7eZAYegqYTwH2dnLCwDDGdP +bump 250: Error: Invalid seeds, address must fall off the curve +... +// remaining bump outputs +``` + +As expected, the bump seed 255 throws an error and the first bump seed to derive +a valid PDA is 254. + +However, note that bump seeds 253-251 all derive valid PDAs with different +addresses. This means that given the same optional seeds and `programId`, a bump +seed with a different value can still derive a valid PDA. + + + When building Solana programs, it is recommended to include security checks that + validate a PDA passed to the program is derived using the canonical bump. + Failing to do so may introduce vulnerabilities that allow for unexpected + accounts to be provided to a program. + + +## Create PDA Accounts + +This example program on +[Solana Playground](https://beta.solpg.io/github.com/ZYJLiu/doc-examples/tree/main/pda-account) +demonstrates how to create an account using a PDA as the address of the new +account. The example program is written using the Anchor framework. + +In the `lib.rs` file, you will find the following program which includes a +single instruction to create a new account using a PDA as the address of the +account. The new account stores the address of the `user` and the `bump` seed +used to derive the PDA. + +```rust title="lib.rs" {11-14,26-29} +use anchor_lang::prelude::*; + +declare_id!("75GJVCJNhaukaa2vCCqhreY31gaphv7XTScBChmr1ueR"); + +#[program] +pub mod pda_account { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + let account_data = &mut ctx.accounts.pda_account; + // store the address of the `user` + account_data.user = *ctx.accounts.user.key; + // store the canonical bump + account_data.bump = ctx.bumps.pda_account; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + init, + // set the seeds to derive the PDA + seeds = [b"data", user.key().as_ref()], + // use the canonical bump + bump, + payer = user, + space = 8 + DataAccount::INIT_SPACE + )] + pub pda_account: Account<'info, DataAccount>, + pub system_program: Program<'info, System>, +} + +#[account] + +#[derive(InitSpace)] +pub struct DataAccount { + pub user: Pubkey, + pub bump: u8, +} +``` + +The seeds used to derive the PDA include the hardcoded string `data` and the +address of the `user` account provided in the instruction. The Anchor framework +automatically derives the canonical `bump` seed. + +```rust /data/ /user.key()/ /bump/ +#[account( + init, + seeds = [b"data", user.key().as_ref()], + bump, + payer = user, + space = 8 + DataAccount::INIT_SPACE +)] +pub pda_account: Account<'info, DataAccount>, +``` + +The `init` constraint instructs Anchor to invoke the System Program to create a +new account using the PDA as the address. Under the hood, this is done through a +[CPI](/docs/core/cpi). + +```rust /init/ +#[account( + init, + seeds = [b"data", user.key().as_ref()], + bump, + payer = user, + space = 8 + DataAccount::INIT_SPACE +)] +pub pda_account: Account<'info, DataAccount>, +``` + +In the test file (`pda-account.test.ts`) located within the Solana Playground +link provided above, you will find the Javascript equivalent to derive the PDA. + +```ts /data/ /user.publicKey/ +const [PDA] = PublicKey.findProgramAddressSync( + [Buffer.from("data"), user.publicKey.toBuffer()], + program.programId, +); +``` + +A transaction is then sent to invoke the `initialize` instruction to create a +new on-chain account using the PDA as the address. Once the transaction is sent, +the PDA is used to fetch the on-chain account that was created at the address. + +```ts /initialize()/ /PDA/ {14} +it("Is initialized!", async () => { + const transactionSignature = await program.methods + .initialize() + .accounts({ + user: user.publicKey, + pdaAccount: PDA, + }) + .rpc(); + + console.log("Transaction Signature:", transactionSignature); +}); + +it("Fetch Account", async () => { + const pdaAccount = await program.account.dataAccount.fetch(PDA); + console.log(JSON.stringify(pdaAccount, null, 2)); +}); +``` + +Note that if you invoke the `initialize` instruction more than once using the +same `user` address as a seed, then the transaction will fail. This is because +an account will already exist at the derived address. diff --git a/content/docs/core/programs.es.mdx b/content/docs/core/programs.es.mdx new file mode 100644 index 000000000..b06e65f3b --- /dev/null +++ b/content/docs/core/programs.es.mdx @@ -0,0 +1,51 @@ +--- +title: Programas en Solana +description: Aprende sobre programas en Solana (smart contracts) y cómo desarrollarlos usando Rust o Anchor. +h1: Programas +--- + +En el ecosistema de Solana, los "contratos inteligentes" se llaman programas. Cada [programa](/docs/core/accounts#program-account) es una cuenta en la cadena de bloques que almacena lógica ejecutable, organizada en funciones específicas conocidas como [instrucciones](/docs/core/transactions#instruction). + +## Puntos clave + +- Los programas son cuentas en la cadena de bloques que contienen código ejecutable. Este código es organizado en distintas funciones conocidas como instrucciones. + +- Los programas no tienen estado, pero pueden incluir instrucciones para crear cuentas, las cuales son usadas para almacenar y manejar el estado del programa. + +- Los programas pueden ser actualizados por una autoridad de actualización. Un programa se vuelve inmutable cuando la autoridad de actualización está establecida en nula. + +- Los builds verificables permiten a los usuarios verificar que los programas en la cadena de bloques coincidan con el código fuente disponible públicamente. + +## Escribir Programas en Solana + +Los programas en Solana se escriben principalmente en el lenguaje de programación [Rust](https://doc.rust-lang.org/book/), con dos enfoques comunes para el desarrollo: + +- [Anchor](/docs/programs/anchor): Un framework diseñado para el desarrollo de programas en Solana. Proporciona una forma más rápida y sencilla de escribir programas, utilizando macros de Rust para reducir significativamente el código boilerplate. Para principiantes, se recomienda comenzar con el framework Anchor. + +- [Rust Nativo](/developers/guides/getstarted/intro-to-native-rust): Este enfoque implica escribir programas de Solana en Rust sin aprovechar los beneficios de ningún marco de trabajo. Ofrece más flexibilidad pero viene acompañado de una mayor complejidad. + +## Actualizar Programas en Solana + +Los programas en la cadena de bloques pueden ser [modificados directamente](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src/lib.rs#L675) por una cuenta designada como la "autoridad de actualización", que es típicamente la cuenta que originalmente desplegó el programa. + +Si la [autoridad de actualización](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src/lib.rs#L865) se revoca y se establece como `None`, el programa se vuelve inmutable y ya no se puede actualizar. + +## Programas verificables + +Asegurar la integridad y verificabilidad del código en la cadena de bloques es esencial. Una compilación verificable asegura que el código ejecutable desplegado en la cadena de bloques puede ser independientemente verificado para que coincida con su código fuente público por cualquier tercero. Este proceso aumenta la transparencia y la confianza, permitiendo detectar discrepancias entre el código fuente y el programa desplegado. + +The Solana developer community has introduced tools to support verifiable builds, enabling both developers and users to verify that onchain programs accurately reflect their publicly shared source code. + +- **Buscando programas verificados**: Para comprobar rápidamente los programas verificados, los usuarios pueden buscar un programa por su dirección en el explorador [SolanaFM](https://solana.fm/) e ir a la pestaña "Verification". Vea un ejemplo de un programa verificado [aquí](https://solana.fm/address/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY). + +- **Herramientas de verificación**: + [Solana Verifiable Build CLI](https://github.com/Ellipsis-Labs/solana-verifiable-build) + de Ellipsis Labs permite a los usuarios verificar programas en la cadena de bloques contra el código fuente publicado. + +- **Soporte para compilaciones verificables en Anchor**: Anchor proporciona soporte integrado para compilaciones verificables. Los detalles pueden ser encontrados en la [documentación de Anchor](https://www.anchor-lang.com/docs/verifiable-builds). + +## Berkeley Packet Filter (BPF) + +Solana aprovecha la [infraestructura del compilador LLVM](https://llvm.org/) para compilar programas en archivos con [formato ejecutable y enlazable (ELF)](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format). Estos archivos incluyen una versión modificada del bytecode [Berkeley Packet Filter (eBPF)](https://en.wikipedia.org/wiki/EBPF) para programas de Solana, conocido como "Solana Bytecode Format" (sBPF). + +El uso de LLVM permite a Solana soportar potencialmente cualquier lenguaje de programación que pueda compilar al backend BPF de LLVM. Esto mejora significativamente la flexibilidad de Solana como plataforma de desarrollo. diff --git a/content/docs/core/programs.mdx b/content/docs/core/programs.mdx new file mode 100644 index 000000000..00c9e58c7 --- /dev/null +++ b/content/docs/core/programs.mdx @@ -0,0 +1,94 @@ +--- +title: Programs on Solana +description: + Learn about Solana programs (smart contracts) and how to develop them using + Rust or the Anchor framework. Understand program deployment, upgrades, and + verification on the Solana network. +h1: Programs +--- + +In the Solana ecosystem, "smart contracts" are called programs. Each +[program](/docs/core/accounts#program-account) is an on-chain account that +stores executable logic, organized into specific functions referred to as +[instructions](/docs/core/transactions#instruction). + +## Key Points + +- Programs are on-chain accounts that contain executable code. This code is + organized into distinct functions known as instructions. + +- Programs are stateless but can include instructions to create new accounts, + which are used to store and manage program state. + +- Programs can be updated by an upgrade authority. A program becomes immutable + when the upgrade authority is set to null. + +- Verifiable builds enable users to verify that onchain programs match the + publicly available source code. + +## Writing Solana Programs + +Solana programs are predominantly written in the +[Rust](https://doc.rust-lang.org/book/) programming language, with two common +approaches for development: + +- [Anchor](/docs/programs/anchor): A framework designed for Solana program + development. It provides a faster and simpler way to write programs, using + Rust macros to significantly reduce boilerplate code. For beginners, it is + recommended to start with the Anchor framework. + +- [Native Rust](/developers/guides/getstarted/intro-to-native-rust): This + approach involves writing Solana programs in Rust without leveraging any + frameworks. It offers more flexibility but comes with increased complexity. + +## Updating Solana Programs + +On-chain programs can be +[directly modified](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src/lib.rs#L675) +by an account designated as the "upgrade authority", which is typically the +account that originally deployed the program. + +If the +[upgrade authority](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/programs/bpf_loader/src/lib.rs#L865) +is revoked and set to `None`, the program becomes immutable and can no longer be +updated. + +## Verifiable Programs + +Ensuring the integrity and verifiability of on-chain code is essential. A +verifiable build ensures that the executable code deployed on-chain can be +independently verified to match its public source code by any third party. This +process enhances transparency and trust, making it possible to detect +discrepancies between the source code and the deployed program. + +The Solana developer community has introduced tools to support verifiable +builds, enabling both developers and users to verify that onchain programs +accurately reflect their publicly shared source code. + +- **Searching for Verified Programs**: To quickly check for verified programs, + users can search for a program address on the [SolanaFM](https://solana.fm/) + Explorer and navigate to the "Verification" tab. View an example of a verified + program + [here](https://solana.fm/address/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY). + +- **Verification Tools**: The + [Solana Verifiable Build CLI](https://github.com/Ellipsis-Labs/solana-verifiable-build) + by Ellipsis Labs enables users to independently verify onchain programs + against published source code. + +- **Support for Verifiable Builds in Anchor**: Anchor provides built-in support + for verifiable builds. Details can be found in the + [Anchor documentation](https://www.anchor-lang.com/docs/verifiable-builds). + +## Berkeley Packet Filter (BPF) + +Solana leverages the [LLVM compiler infrastructure](https://llvm.org/) to +compile programs into +[Executable and Linkable Format (ELF)](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) +files. These files include a modified version of +[Berkeley Packet Filter (eBPF)](https://en.wikipedia.org/wiki/EBPF) bytecode for +Solana programs, known as "Solana Bytecode Format" (sBPF). + +The use of LLVM enables Solana to potentially support any programming language +that can compile to LLVM's BPF backend. This significantly enhances the +flexibility of Solana as a development platform. diff --git a/content/docs/core/tokens.mdx b/content/docs/core/tokens.mdx new file mode 100644 index 000000000..15768bec3 --- /dev/null +++ b/content/docs/core/tokens.mdx @@ -0,0 +1,579 @@ +--- +title: "Tokens on Solana" +description: + Learn about Solana tokens (SPL Tokens) including fungible and non-fungible + tokens, Token Program, Token Extensions Program, mint accounts, token + accounts, and practical examples for creating and managing tokens on Solana. +--- + +Tokens are digital assets that represent ownership over diverse categories of +assets. Tokenization enables the digitalization of property rights, serving as a +fundamental component for managing both fungible and non-fungible assets. + +- Fungible Tokens represent interchangeable and divisible assets of the same + type and value (ex. USDC). +- Non-fungible Tokens (NFT) represent ownership of indivisible assets (e.g. + artwork). + +This section will cover the basics of how tokens are represented on Solana. +These are referred to as SPL +([Solana Program Library](https://github.com/solana-labs/solana-program-library)) +Tokens. + +- The [Token Program](#token-program) contains all the instruction logic for + interacting with tokens on the network (both fungible and non-fungible). + +- A [Mint Account](#mint-account) represents a specific type of token and stores + global metadata about the token such as the total supply and mint authority + (address authorized to create new units of a token). + +- A [Token Account](#token-account) keeps track of individual ownership of how + many units of a specific type of token (mint account) are owned by a specific + address. + +> There are currently two versions of the Token Program. The original +> [Token Program](https://github.com/solana-labs/solana-program-library/tree/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program) +> and the +> [Token Extensions Program](https://github.com/solana-labs/solana-program-library/tree/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program-2022) +> (Token2022). The Token Extensions Program functions the same as the original +> Token Program, but with additional features and improvements. The Token +> Extensions Program is the recommended version to use for creating new tokens +> (mint accounts). + +## Key Points + +- Tokens represent ownership over either fungible (interchangeable) or + non-fungible (unique) assets. + +- The Token Program contains all instruction for interacting with both fungible + and non-fungible tokens on the network. + +- The Token Extensions Program is a new version of the Token Program that + includes additional features while maintaining the same core functionalities. + +- A Mint Account represents a unique token on the network and stores global + metadata such as total supply. + +- A Token Account tracks individual ownership of tokens for a specific mint + account. + +- An Associated Token Account is a Token Account created with an address derived + from the owner's and mint account's addresses. + +## Token Program + +The +[Token Program](https://github.com/solana-labs/solana-program-library/tree/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program) +contains all the instruction logic for interacting with tokens on the network +(both fungible and non-fungible). All tokens on Solana are effectively +[data accounts](/docs/core/accounts#data-account) owned by the Token Program. + +You can find the full list of Token Program instructions +[here](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/instruction.rs). + +![Token Program](/assets/docs/core/tokens/token-program.svg) + +A few commonly used instructions include: + +- [`InitializeMint`](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/processor.rs#L29): + Create a new mint account to represent a new type of token. +- [`InitializeAccount`](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/processor.rs#L84): + Create a new token account to hold units of a specific type of token (mint). +- [`MintTo`](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/processor.rs#L522): + Create new units of a specific type of token and add them to a token account. + This increases the supply of the token and can only be done by the mint + authority of the mint account. +- [`Transfer`](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/processor.rs#L228): + Transfer units of a specific type of token from one token account to another. + +### Mint Account + +Tokens on Solana are uniquely identified by the address of a +[Mint Account](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/state.rs#L18-L32) +owned by the Token Program. This account is effectively a global counter for a +specific token, and stores data such as: + +- Supply: Total supply of the token +- Decimals: Decimal precision of the token +- Mint authority: The account authorized to create new units of the token, thus + increasing the supply +- Freeze authority: The account authorized to freeze tokens from being + transferred from "token accounts" + +![Mint Account](/assets/docs/core/tokens/mint-account.svg) + +The full details stored on each Mint Account include the following: + +```rust +pub struct Mint { + /// Optional authority used to mint new tokens. The mint authority may only + /// be provided during mint creation. If no mint authority is present + /// then the mint has a fixed supply and no further tokens may be + /// minted. + pub mint_authority: COption, + /// Total supply of tokens. + pub supply: u64, + /// Number of base 10 digits to the right of the decimal place. + pub decimals: u8, + /// Is `true` if this structure has been initialized + pub is_initialized: bool, + /// Optional authority to freeze token accounts. + pub freeze_authority: COption, +} +``` + +For reference, here is a Solana Explorer link to the +[USDC Mint Account](https://explorer.solana.com/address/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v). + +### Token Account + +To track the individual ownership of each unit of a specific token, another type +of data account owned by the Token Program must be created. This account is +referred to as a +[Token Account](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/state.rs#L89-L110). + +The most commonly referenced data stored on the Token Account include the +following: + +- Mint: The type of token the Token Account holds units of +- Owner: The account authorized to transfer tokens out of the Token Account +- Amount: Units of the token the Token Account currently holds + +![Token Account](/assets/docs/core/tokens/token-account.svg) + +The full details stored on each Token Account includes the following: + +```rust +pub struct Account { + /// The mint associated with this account + pub mint: Pubkey, + /// The owner of this account. + pub owner: Pubkey, + /// The amount of tokens this account holds. + pub amount: u64, + /// If `delegate` is `Some` then `delegated_amount` represents + /// the amount authorized by the delegate + pub delegate: COption, + /// The account's state + pub state: AccountState, + /// If is_native.is_some, this is a native token, and the value logs the + /// rent-exempt reserve. An Account is required to be rent-exempt, so + /// the value is used by the Processor to ensure that wrapped SOL + /// accounts do not drop below this threshold. + pub is_native: COption, + /// The amount delegated + pub delegated_amount: u64, + /// Optional authority to close the account. + pub close_authority: COption, +} +``` + +For a wallet to own units of a certain token, it needs to create a token account +for a specific type of token (mint) that designates the wallet as the owner of +the token account. A wallet can create multiple token accounts for the same type +of token, but each token account can only be owned by one wallet and hold units +of one type of token. + +![Account Relationship](/assets/docs/core/tokens/token-account-relationship.svg) + +> Note that each Token Account's data includes an `owner` field used to identify +> who has authority over that specific Token Account. This is separate from the +> program owner specified in the +> [AccountInfo](/docs/core/accounts#accountinfo), which is the Token Program +> for all Token Accounts. + +### Associated Token Account + +To simplify the process of locating a token account's address for a specific +mint and owner, we often use Associated Token Accounts. + +An Associated Token Account is a token account whose address is +deterministically derived using the owner's address and the mint account's +address. You can think of the Associated Token Account as the "default" token +account for a specific mint and owner. + +It's important to understand that an Associated Token Account isn't a different +type of token account. It's just a token account with a specific address. + +![Associated Token Account](/assets/docs/core/tokens/associated-token-account.svg) + +This introduces a key concept in Solana development: +[Program Derived Address (PDA)](/docs/core/pda). Conceptually, a PDA provides +a deterministic way to generate an address using some predefined inputs. This +enables us to easily find the address of an account at a later time. + +Here is a [Solana Playground](https://beta.solpg.io/656a2dd0fb53fa325bfd0c41) +example that derives the USDC Associated Token Account address and owner. It +will always generate the +[same address](https://explorer.solana.com/address/4kokFKCFMxpCpG41yLYkLEqXW8g1WPfCt2NC9KGivY6N) +for the same mint and owner. + +```ts +import { getAssociatedTokenAddressSync } from "@solana/spl-token"; + +const associatedTokenAccountAddress = getAssociatedTokenAddressSync( + USDC_MINT_ADDRESS, + OWNER_ADDRESS, +); +``` + +Specifically, the address for an Associated Token Account is derived using the +following inputs. Here is a +[Solana Playground](https://beta.solpg.io/656a31d0fb53fa325bfd0c42) example that +generates the same address as the previous example. + +```ts +import { PublicKey } from "@solana/web3.js"; + +const [PDA, bump] = PublicKey.findProgramAddressSync( + [ + OWNER_ADDRESS.toBuffer(), + TOKEN_PROGRAM_ID.toBuffer(), + USDC_MINT_ADDRESS.toBuffer(), + ], + ASSOCIATED_TOKEN_PROGRAM_ID, +); +``` + +For two wallets to hold units of the same type of token, each wallet needs its +own token account for the specific mint account. The image below demonstrates +what this account relationship looks like. + +![Accounts Relationship Expanded](/assets/docs/core/tokens/token-account-relationship-ata.svg) + +## Token Examples + +The [`spl-token` CLI](https://docs.anza.xyz/cli) can be used to experiment with +SPL tokens. In the examples below, we'll use the +[Solana Playground](https://beta.solpg.io/) terminal to run the CLI commands +directly in the browser without having to install the CLI locally. + +Creating tokens and accounts requires SOL for account rent deposits and +transaction fees. If it is your first time using Solana Playground, created a +Playground wallet and run the `solana airdrop` command in the Playground +terminal. You can also get devnet SOL using the public +[web faucet](https://faucet.solana.com/). + +```sh +solana airdrop 2 +``` + +Run `spl-token --help` for a full description of available commands. + +```sh +spl-token --help +``` + +Alternatively, you can install the spl-token CLI locally using the following +command. This requires first [installing Rust](https://rustup.rs/). + +> In the following sections, the account addresses displayed when you run the +> CLI command will differ from the example output shown below. Please use the +> address shown in your Playground terminal when following along. For example, +> the address output from the `create-token` is the mint account where your +> Playground wallet is set as the mint authority. + +### Create a New Token + +To create a new token ([mint account](#mint-account)) run the following command +in the Solana Playground terminal. + +```sh +spl-token create-token +``` + +You should see an output similar to the following below. You can inspect both +the token and transaction details on +[Solana Explorer](https://explorer.solana.com/?cluster=devnet) using the +`Address` and `Signature`. + +In the example output below, the unique identifier (address) of the new token is +`99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg`. + +```shell title="Terminal Output" /99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg/ +Creating token 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg + +Address: 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg +Decimals: 9 + +Signature: 44fvKfT1ezBUwdzrCys3fvCdFxbLMnNvBstds76QZyE6cXag5NupBprSXwxPTzzjrC3cA6nvUZaLFTvmcKyzxrm1 +``` + +New tokens initially have no supply. You can check the current supply of a token +using the following command: + +```sh +spl-token supply +``` + +Running the `supply` command for a newly created token will return a value of +`0`: + +```sh /99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg/ +spl-token supply 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg +``` + +Under the hood, creating a new Mint Account requires sending a transaction with +two instructions. Here is a Javascript example on +[Solana Playground](https://beta.solpg.io/660ce32ecffcf4b13384d00f). + +1. Invoke the System Program to create a new account with enough space for the + Mint Account data and then transfer ownership to the Token Program. + +2. Invoke the Token Program to initialize the data of the new account as a Mint + Account + +### Create Token Account + +To hold units of a particular token, you must first create a +[token account](#token-account). To create a new token account, use the +following command: + +```sh +spl-token create-account [OPTIONS] +``` + +For example, running the following command in the Solana Playground terminal: + +```sh /99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg/ +spl-token create-account 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg +``` + +Returns the following output: + +- `AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9` is the address of the token + account created to hold units of the token specified in the `create-account` + command. + +```shell title="Terminal Output" /AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9/ +Creating account AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9 + +Signature: 2BtrynuCLX9CNofFiaw6Yzbx6hit66pup9Sk7aFjwU2NEbFz7NCHD9w9sWhrCfEd73XveAGK1DxFpJoQZPXU9tS1 +``` + +By default the `create-account` command creates an +[associated token account](#associated-token-account) with your wallet address +as the token account owner. + +You can create a token account with a different owner using the following +command: + +```sh +spl-token create-account --owner +``` + +For example, running the following command: + +```sh /2i3KvjDCZWxBsqcxBHpdEaZYQwQSYE6LXUMx5VjY5XrR/ +spl-token create-account --owner 2i3KvjDCZWxBsqcxBHpdEaZYQwQSYE6LXUMx5VjY5XrR 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg +``` + +Returns the following output: + +- `Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt` is the address of the token + account created to hold units of the token specified in the `create-account` + command (`99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg`) and owned by the + address specified following the `--owner` flag + (`2i3KvjDCZWxBsqcxBHpdEaZYQwQSYE6LXUMx5VjY5XrR`). This is useful when you need + to create a token account for another user. + +```shell title="Terminal Output" /Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt/ +Creating account Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt + +Signature: 44vqKdfzspT592REDPY4goaRJH3uJ3Ce13G4BCuUHg35dVUbHuGTHvqn4ZjYF9BGe9QrjMfe9GmuLkQhSZCBQuEt +``` + +Under the hood, creating an Associated Token Account requires a single +instruction that invokes the +[Associated Token Program](https://github.com/solana-labs/solana-program-library/tree/b1c44c171bc95e6ee74af12365cb9cbab68be76c/associated-token-account/program/src). +Here is a Javascript example on +[Solana Playground](https://beta.solpg.io/660ce868cffcf4b13384d011). + +The Associated Token Program uses [Cross Program Invocations](/docs/core/cpi) +to handle: + +- [Invoking the System Program](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/associated-token-account/program/src/tools/account.rs#L19) + to create a new account using the provided PDA as the address of the new + account +- [Invoking the Token Program](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/associated-token-account/program/src/processor.rs#L138-L161) + to initialize the Token Account data for the new account. + +Alternatively, creating a new Token Account using a randomly generated keypair +(not an Associated Token Account) requires sending a transaction with two +instructions. Here is a Javascript example on +[Solana Playground](https://beta.solpg.io/660ce716cffcf4b13384d010). + +1. Invoke the System Program to create a new account with enough space for the + Token Account data and then transfer ownership to the Token Program. + +2. Invoke the Token Program to initialize the data of the new account as a Token + Account + +### Mint Tokens + +To create new units of a token, use the following command: + +```sh +spl-token mint [OPTIONS] [--] [RECIPIENT_TOKEN_ACCOUNT_ADDRESS] +``` + +For example, running the following command: + +```sh +spl-token mint 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg 100 +``` + +Returns the following output: + +- `99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg` is the address of the mint + account that tokens are being minted for (increasing total supply). + +- `AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9` is the address of your wallet's + token account that units of the token are being minted to (increasing amount). + +```shell title="Terminal Output" /99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg/ /AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9/ +Minting 100 tokens + Token: 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg + Recipient: AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9 + +Signature: 2NJ1m7qCraPSBAVxbr2ssmWZmBU9Jc8pDtJAnyZsZJRcaYCYMqq1oRY1gqA4ddQno3g3xcnny5fzr1dvsnFKMEqG +``` + +To mint tokens to a different token account, specify the address of the intended +recipient token account. For example, running the following command: + +```sh /Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt/ +spl-token mint 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg 100 -- Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt +``` + +Returns the following output: + +- `99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg` is the address of the mint + account that tokens are being minted for (increasing total supply). + +- `Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt` is the address of the token + account that units of the token are being minted to (increasing amount). + +```shell title="Terminal Output" /99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg/ /Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt/ +Minting 100 tokens + Token: 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg + Recipient: Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt + +Signature: 3SQvNM3o9DsTiLwcEkSPT1Edr14RgE2wC54TEjonEP2swyVCp2jPWYWdD6RwXUGpvDNUkKWzVBZVFShn5yntxVd7 +``` + +Under the hood, creating new units of a token requires invoking the `MintTo` +instruction on the Token Program. This instruction must be signed by the mint +authority. The instruction mints new units of the token to a Token Account and +increases the total supply on the Mint Account. Here is a Javascript example on +[Solana Playground](https://beta.solpg.io/660cea45cffcf4b13384d012). + +### Transfer Tokens + +To transfer units of a token between two token accounts, use the following +command: + +```sh +spl-token transfer [OPTIONS] +``` + +For example, running the following command: + +```sh +spl-token transfer 99zqUzQGohamfYxyo8ykTEbi91iom3CLmwCA75FK5zTg 100 Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt +``` + +Returns the following output: + +- `AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9` is the address of the token + account that tokens are being transferred from. This would be the address of + your token account for the specified token being transferred. + +- `Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt` is the address of the token + account that tokens are being transferred to. + +```shell title="Terminal Output" /AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9/ /Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt/ +Transfer 100 tokens + Sender: AfB7uwBEsGtrrBqPTVqEgzWed5XdYfM1psPNLmf7EeX9 + Recipient: Hmyk3FSw4cfsuAes7sanp2oxSkE9ivaH6pMzDzbacqmt + +Signature: 5y6HVwV8V2hHGLTVmTmdySRiEUCZnWmkasAvJ7J6m7JR46obbGKCBqUFgLpZu5zQGwM4Xy6GZ4M5LKd1h6Padx3o +``` + +Under the hood, transferring tokens requires invoking the `Transfer` instruction +on the Token Program. This instruction must be signed by the owner of the +sender's Token Account. The instruction transfers units of a token from one +Token Account to another Token Account. Here is a Javascript example on +[Solana Playground](https://beta.solpg.io/660ced84cffcf4b13384d013). + +It's important to understand that both the sender and recipient must have +existing token accounts for the specific type of token being transferred. The +sender can include additional instructions on the transaction to create the +recipient's token account, which generally is the Associated Token Account. + +### Create Token Metadata + +The Token Extensions Program enables additional customizable metadata (such as +name, symbol, link to image) to be stored directly on the Mint Account. + + + To use the Token Extensions CLI flags, ensure you have a local installation of the CLI, version 3.4.0 or later: + + `cargo install --version 3.4.0 spl-token-cli` + + +To create a new token with the metadata extension enabled, using the following +command: + +```sh +spl-token create-token --program-id TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb +--enable-metadata +``` + +The command returns the following output: + +- `BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP` is the address of the new token + created with the metadata extension enabled. + +```shell title="Terminal Output" /BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP/ +Creating token BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP under program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb +To initialize metadata inside the mint, please run `spl-token initialize-metadata BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP `, and sign with the mint authority. + +Address: BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP +Decimals: 9 + +Signature: 5iQofFeXdYhMi9uTzZghcq8stAaa6CY6saUwcdnELST13eNSifiuLbvR5DnRt311frkCTUh5oecj8YEvZSB3wfai +``` + +Once a new token is created with the metadata extension enabled, use the +following command to initialize the metadata. + +```sh +spl-token initialize-metadata + +``` + +The token URI is normally a link to offchain metadata you want to associate with +the token. You can find an example of the JSON format +[here](https://raw.githubusercontent.com/solana-developers/opos-asset/main/assets/DeveloperPortal/metadata.json). + +For example, running the following command will store the additional metadata +directly on the specified mint account: + +```sh /BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP/ +spl-token initialize-metadata BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP "TokenName" "TokenSymbol" "https://raw.githubusercontent.com/solana-developers/opos-asset/main/assets/DeveloperPortal/metadata.json" +``` + +You can then look up the address of the mint account on an explorer to inspect +the metadata. For example, here is a token created with the metadata extension +enabled on the +[SolanaFm](https://solana.fm/address/BdhzpzhTD1MFqBiwNdrRy4jFo2FHFufw3n9e8sVjJczP?cluster=devnet-solana) +explorer. + +You can learn more on the +[Metadata Extension Guide](/developers/guides/token-extensions/metadata-pointer). +For more details related to various Token Extensions, refer to the Token +Extensions +[Getting Started Guide](/developers/guides/token-extensions/getting-started) +and the [SPL documentation](https://spl.solana.com/token-2022/extensions). diff --git a/content/docs/core/transactions.mdx b/content/docs/core/transactions.mdx new file mode 100644 index 000000000..932a286fe --- /dev/null +++ b/content/docs/core/transactions.mdx @@ -0,0 +1,415 @@ +--- +title: "Transactions and Instructions" +description: + Learn about Solana transactions and instructions - the fundamental building + blocks for interacting with the Solana blockchain. Understand transaction + structure and instruction composition with practical examples. +--- + +On Solana, we send [transactions](/docs/core/transactions#transaction) to +interact with the network. Transactions include one or more +[instructions](/docs/core/transactions#instruction), each representing a +specific operation to be processed. The execution logic for instructions is +stored on [programs](/docs/core/programs) deployed to the Solana network, where +each program stores its own set of instructions. + +Below are key details about how transactions are executed: + +- Execution Order: If a transaction includes multiple instructions, the + instructions are processed in the order they are added to the transaction. +- Atomicity: A transaction is atomic, meaning it either fully completes with all + instructions successfully processed, or fails altogether. If any instruction + within the transaction fails, none of the instructions are executed. + +For simplicity, a transaction can be thought of as a request to process one or +multiple instructions. + +![Transaction Simplified](/assets/docs/core/transactions/transaction-simple.svg) + +You can imagine a transaction as an envelope, where each instruction is a +document that you fill out and place inside the envelope. We then mail out the +envelope to process the documents, just like sending a transaction on the +network to process our instructions. + +## Key Points + +- Solana transactions consist of instructions that interact with various + programs on the network, where each instruction represents a specific + operation. + +- Each instruction specifies the program to execute the instruction, the + accounts required by the instruction, and the data required for the + instruction's execution. + +- Instructions within a transaction are processed in the order they are listed. + +- Transactions are atomic, meaning either all instructions process successfully, + or the entire transaction fails. + +- The maximum size of a transaction is 1232 bytes. + +## Basic Example + +Below is a diagram representing a transaction with a single instruction to +transfer SOL from a sender to a receiver. + +Individual "wallets" on Solana are accounts owned by the +[System Program](/docs/core/accounts#system-program). As part of the +[Solana Account Model](/docs/core/accounts), only the program that owns an +account is allowed to modify the data on the account. + +Therefore, transferring SOL from a "wallet" account requires sending a +transaction to invoke the transfer instruction on the System Program. + +![SOL Transfer](/assets/docs/core/transactions/sol-transfer.svg) + +The sender account must be included as a signer (`is_signer`) on the transaction +to approve the deduction of their lamport balance. Both the sender and recipient +accounts must be mutable (`is_writable`) because the instruction modifies the +lamport balance for both accounts. + +Once the transaction is sent, the System Program is invoked to process the +transfer instruction. The System Program then updates the lamport balances of +both the sender and recipient accounts accordingly. + +![SOL Transfer Process](/assets/docs/core/transactions/sol-transfer-process.svg) + +### Simple SOL Transfer + +Here is a [Solana Playground](https://beta.solpg.io/656a0ea7fb53fa325bfd0c3e) +example of how to build a SOL transfer instruction using the +`SystemProgram.transfer` method: + +```typescript +// Define the amount to transfer +const transferAmount = 0.01; // 0.01 SOL + +// Create a transfer instruction for transferring SOL from wallet_1 to wallet_2 +const transferInstruction = SystemProgram.transfer({ + fromPubkey: sender.publicKey, + toPubkey: receiver.publicKey, + lamports: transferAmount * LAMPORTS_PER_SOL, // Convert transferAmount to lamports +}); + +// Add the transfer instruction to a new transaction +const transaction = new Transaction().add(transferInstruction); +``` + +Run the script and inspect the transaction details logged to the console. In the +sections below, we'll walk through the details of what's happening under the +hood. + +## Transaction + +A Solana +[transaction](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/src/transaction/mod.rs#L173) +consists of: + +1. [Signatures](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/src/signature.rs#L27): + An array of signatures included on the transaction. +2. [Message](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/message/legacy.rs#L110): + List of instructions to be processed atomically. + +![Transaction Format](/assets/docs/core/transactions/tx_format.png) + +The structure of a transaction message comprises of: + +- [Message Header](/docs/core/transactions#message-header): Specifies the number + of signer and read-only account. +- [Account Addresses](/docs/core/transactions#array-of-account-addresses): An + array of account addresses required by the instructions on the transaction. +- [Recent Blockhash](/docs/core/transactions#recent-blockhash): Acts as a + timestamp for the transaction. +- [Instructions](/docs/core/transactions#array-of-instructions): An array of + instructions to be executed. + +![Transaction Message](/assets/docs/core/transactions/legacy_message.png) + +### Transaction Size + +The Solana network adheres to a maximum transmission unit (MTU) size of 1280 +bytes, consistent with the [IPv6 MTU](https://en.wikipedia.org/wiki/IPv6_packet) +size constraints to ensure fast and reliable transmission of cluster information +over UDP. After accounting for the necessary headers (40 bytes for IPv6 and 8 +bytes for the fragment header), +[1232 bytes remain available for packet data](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/src/packet.rs#L16-L21), +such as serialized transactions. + +This means that the total size of a Solana transaction is limited to 1232 bytes. +The combination of the signatures and the message cannot exceed this limit. + +- Signatures: Each signature requires 64 bytes. The number of signatures can + vary, depending on the transaction's requirements. +- Message: The message includes instructions, accounts, and additional metadata, + with each account requiring 32 bytes. The combined size of the accounts plus + metadata can vary, depending on the instructions included in the transaction. + +![Transaction Format](/assets/docs/core/transactions/issues_with_legacy_txs.png) + +### Message Header + +The +[message header](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/message/mod.rs#L96) +specifies the privileges of accounts included in the transaction's account +address array. It is comprised of three bytes, each containing a u8 integer, +which collectively specify: + +1. The number of required signatures for the transaction and message version + number. +2. The number of read-only account addresses that require signatures. +3. The number of read-only account addresses that do not require signatures. + +![Message Header](/assets/docs/core/transactions/message_header.png) + +### Compact-Array Format + +A compact array in the context of a transaction message refers to an array +serialized in the following format: + +1. The length of the array, encoded as + [compact-u16](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/short_vec.rs). +2. The individual items of the array listed sequentially after the encoded + length. + +![Compact array format](/assets/docs/core/transactions/compact_array_format.png) + +This encoding method is used to specify the lengths of both the +[Account Addresses](/docs/core/transactions#array-of-account-addresses) and +[Instructions](/docs/core/transactions#array-of-instructions) arrays within a +transaction message. + +### Array of Account Addresses + +A transaction message includes an array containing all the +[account addresses](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/message/legacy.rs#L119) +needed for the instructions within the transaction. + +This array starts with a +[compact-u16](/docs/core/transactions#compact-array-format) encoding of the +number of account addresses, followed by the addresses ordered by the privileges +for the accounts. The metadata in the message header is used to determine the +number of accounts in each section. + +- Accounts that are writable and signers +- Accounts that are read-only and signers +- Accounts that are writable and not signers +- Accounts that are read-only and not signers + +![Compact array of account addresses](/assets/docs/core/transactions/compat_array_of_account_addresses.png) + +### Recent Blockhash + +All transactions include a +[recent blockhash](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/message/legacy.rs#L122) +to act as a timestamp for the transaction. The blockhash is used to prevent +duplications and eliminate stale transactions. + +The maximum age of a transaction's blockhash is 150 blocks (~1 minute assuming +400ms block times). If a transaction's blockhash is 150 blocks older than the +latest blockhash, it is considered expired. This means that transactions not +processed within a specific timeframe will never be executed. + +You can use the [`getLatestBlockhash`](/docs/rpc/http/getlatestblockhash) RPC +method to get the current blockhash and last block height at which the blockhash +will be valid. Here is an example on +[Solana Playground](https://beta.solpg.io/661a06e1cffcf4b13384d046). + +### Array of Instructions + +A transaction message includes an array of all +[instructions](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/message/legacy.rs#L128) +requesting to be processed. Instructions within a transaction message are in the +format of +[CompiledInstruction](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/instruction.rs#L633). + +Much like the array of account addresses, this compact array starts with a +[compact-u16](/docs/core/transactions#compact-array-format) encoding of the +number of instructions, followed by an array of instructions. Each instruction +in the array specifies the following information: + +1. **Program ID**: Identifies an on-chain program that will process the + instruction. This is represented as an u8 index pointing to an account + address within the account addresses array. +2. **Compact array of account address indexes**: Array of u8 indexes pointing to + the account addresses array for each account required by the instruction. +3. **Compact array of opaque u8 data**: A u8 byte array specific to the program + invoked. This data specifies the instruction to invoke on the program along + with any additional data that the instruction requires (such as function + arguments). + +![Compact array of Instructions](/assets/docs/core/transactions/compact_array_of_ixs.png) + +### Example Transaction Structure + +Below is an example of the structure of a transaction including a single +[SOL transfer](/docs/core/transactions#basic-example) instruction. It shows the +message details including the header, account keys, blockhash, and the +instructions, along with the signature for the transaction. + +- `header`: Includes data used to specify the read/write and signer privileges + in the `accountKeys` array. + +- `accountKeys`: Array including account addresses for all instructions on the + transaction. + +- `recentBlockhash`: The blockhash included on the transaction when the + transaction was created. + +- `instructions`: Array including all the instructions on the transaction. Each + `account` and `programIdIndex` in an instruction references the `accountKeys` + array by index. + +- `signatures`: Array including signatures for all accounts required as signers + by the instructions on the transaction. A signature is created by signing the + transaction message using the corresponding private key for an account. + +```json +"transaction": { + "message": { + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 1, + "numRequiredSignatures": 1 + }, + "accountKeys": [ + "3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R", + "5snoUseZG8s8CDFHrXY2ZHaCrJYsW457piktDmhyb5Jd", + "11111111111111111111111111111111" + ], + "recentBlockhash": "DzfXchZJoLMG3cNftcf2sw7qatkkuwQf4xH15N5wkKAb", + "instructions": [ + { + "accounts": [ + 0, + 1 + ], + "data": "3Bxs4NN8M2Yn4TLb", + "programIdIndex": 2, + "stackHeight": null + } + ], + "indexToProgramIds": {} + }, + "signatures": [ + "5LrcE2f6uvydKRquEJ8xp19heGxSvqsVbcqUeFoiWbXe8JNip7ftPQNTAVPyTK7ijVdpkzmKKaAQR7MWMmujAhXD" + ] + } +``` + +## Instruction + +An +[instruction](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/instruction.rs#L329) +is a request to process a specific action on-chain and is the smallest +contiguous unit of execution logic in a +[program](/docs/core/accounts#program-account). + +When building an instruction to add to a transaction, each instruction must +include the following information: + +- **Program address**: Specifies the program being invoked. +- **Accounts**: Lists every account the instruction reads from or writes to, + including other programs, using the `AccountMeta` struct. +- **Instruction Data**: A byte array that specifies which + [instruction handler](/docs/terminology#instruction-handler) on the program to + invoke, plus any additional data required by the instruction handler (function + arguments). + +![Transaction Instruction](/assets/docs/core/transactions/instruction.svg) + +### AccountMeta + +For every account required by an instruction, the following info must be +specified: + +- `pubkey`: The on-chain address of an account +- `is_signer`: Specify if the account is required as a signer on the transaction +- `is_writable`: Specify if the account data will be modified + +This information is referred to as the +[AccountMeta](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/instruction.rs#L539). + +![AccountMeta](/assets/docs/core/transactions/accountmeta.svg) + +By specifying all accounts required by an instruction, and whether each account +is writable, transactions can be processed in parallel. + +For example, two transactions that do not include any accounts that write to the +same state can be executed at the same time. + +### Example Instruction Structure + +Below is an example of the structure of a +[SOL transfer](/docs/core/transactions#basic-examples) instruction which details +the account keys, program ID, and data required by the instruction. + +- `keys`: Includes the `AccountMeta` for each account required by an + instruction. +- `programId`: The address of the program which contains the execution logic for + the instruction invoked. +- `data`: The instruction data for the instruction as a buffer of bytes + +``` +{ + "keys": [ + { + "pubkey": "3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R", + "isSigner": true, + "isWritable": true + }, + { + "pubkey": "BpvxsLYKQZTH42jjtWHZpsVSa7s6JVwLKwBptPSHXuZc", + "isSigner": false, + "isWritable": true + } + ], + "programId": "11111111111111111111111111111111", + "data": [2,0,0,0,128,150,152,0,0,0,0,0] +} +``` + +## Expanded Example + +The details for building program instructions are often abstracted away by +client libraries. However, if one is not available, you can always fall back to +manually building the instruction. + +### Manual SOL Transfer + +Here is a [Solana Playground](https://beta.solpg.io/656a102efb53fa325bfd0c3f) +example of how to manually build a SOL transfer instruction: + +```typescript +// Define the amount to transfer +const transferAmount = 0.01; // 0.01 SOL + +// Instruction index for the SystemProgram transfer instruction +const transferInstructionIndex = 2; + +// Create a buffer for the data to be passed to the transfer instruction +const instructionData = Buffer.alloc(4 + 8); // uint32 + uint64 +// Write the instruction index to the buffer +instructionData.writeUInt32LE(transferInstructionIndex, 0); +// Write the transfer amount to the buffer +instructionData.writeBigUInt64LE(BigInt(transferAmount * LAMPORTS_PER_SOL), 4); + +// Manually create a transfer instruction for transferring SOL from sender to receiver +const transferInstruction = new TransactionInstruction({ + keys: [ + { pubkey: sender.publicKey, isSigner: true, isWritable: true }, + { pubkey: receiver.publicKey, isSigner: false, isWritable: true }, + ], + programId: SystemProgram.programId, + data: instructionData, +}); + +// Add the transfer instruction to a new transaction +const transaction = new Transaction().add(transferInstruction); +``` + +Under the hood, the +[simple example](/docs/core/transactions#simple-sol-transfer) using the +`SystemProgram.transfer` method is functionally equivalent to the more verbose +example above. The `SystemProgram.transfer` method simply abstracts away the +details of creating the instruction data buffer and `AccountMeta` for each +account required by the instruction. diff --git a/content/docs/economics/index.mdx b/content/docs/economics/index.mdx new file mode 100644 index 000000000..49493b030 --- /dev/null +++ b/content/docs/economics/index.mdx @@ -0,0 +1,51 @@ +--- +title: Economics +altRoutes: + - /docs/intro/economics +h1: Solana Economics Overview +--- + +**Subject to change.** + +Solana's crypto-economic system is designed to promote a healthy, long term +self-sustaining economy with participant incentives aligned to the security and +decentralization of the network. The main participants in this economy are +validation-clients. Their contributions to the network, state validation, and +their requisite incentive mechanisms are discussed below. + +The main channels of participant remittances are referred to as protocol-based +rewards and transaction fees. Protocol-based rewards are generated from +inflationary issuances from a protocol-defined inflation schedule. These rewards +will constitute the total protocol-based reward delivered to validation clients, +the remaining sourced from transaction fees. In the early days of the network, +it is likely that protocol-based rewards, deployed based on predefined issuance +schedule, will drive the majority of participant incentives to participate in +the network. + +These protocol-based rewards are calculated per epoch and distributed across the +active delegated stake and validator set (per validator commission). As +discussed further below, the per annum inflation rate is based on a +pre-determined disinflationary schedule. This provides the network with supply +predictability which supports long term economic stability and security. + +Transaction fees are participant-to-participant transfers, attached to network +interactions as a motivation and compensation for the inclusion and execution of +a proposed transaction. A mechanism for long-term economic stability and forking +protection through partial burning of each transaction fee is also discussed +below. + +First, an overview of the inflation design is presented. This section starts +with defining and clarifying +[Terminology](/docs/economics/inflation/terminology) commonly used +subsequently in the discussion of inflation and the related components. +Following that, we outline Solana's proposed +[Inflation Schedule](/docs/economics/inflation/inflation_schedule), i.e. the +specific parameters that uniquely parameterize the protocol-driven inflationary +issuance over time. Next is a brief section on +Adjusted Staking Yield, +and how token dilution might influence staking behavior. + +An overview of [Transaction Fees](/docs/core/fees#transaction-fees) on Solana +is followed by a discussion of [Storage Rent Economics](/docs/core/fees#rent) +in which we describe an implementation of storage rent to account for the +externality costs of maintaining the active state of the ledger. diff --git a/content/docs/economics/inflation/inflation-schedule.mdx b/content/docs/economics/inflation/inflation-schedule.mdx new file mode 100644 index 000000000..a51b9f186 --- /dev/null +++ b/content/docs/economics/inflation/inflation-schedule.mdx @@ -0,0 +1,85 @@ +--- +title: Proposed Inflation Schedule +altRoutes: + - /docs/economics/inflation/inflation_schedule + - /docs/intro/economics +--- + +As mentioned above, the network's _Inflation Schedule_ is uniquely described by +three parameters: _Initial Inflation Rate_, _Disinflation Rate_ and _Long-term +Inflation Rate_. When considering these numbers, there are many factors to take +into account: + +- A large portion of the SOL issued via inflation will be distributed to + stake-holders in proportion to the SOL they have staked. We want to ensure + that the _Inflation Schedule_ design results in reasonable _Staking Yields_ + for token holders who delegate SOL and for validation service providers (via + commissions taken from _Staking Yields_). +- The primary driver of _Staked Yield_ is the amount of SOL staked divided by + the total amount of SOL (% of total SOL staked). Therefore the distribution + and delegation of tokens across validators are important factors to understand + when determining initial inflation parameters. +- Yield throttling is a current area of research that would impact + _staking-yields_. This is not taken into consideration in the discussion here + or the modeling below. +- Overall token issuance - i.e. what do we expect the Current Total Supply to be + in 10 years, or 20 years? +- Long-term, steady-state inflation is an important consideration not only for + sustainable support for the validator ecosystem and the Solana Foundation + grant programs, but also should be tuned in consideration with expected token + losses and burning over time. +- The rate at which we expect network usage to grow, as a consideration to the + disinflationary rate. Over time, we plan for inflation to drop and expect that + usage will grow. + +Based on these considerations and the community discussions following the +initial design, the Solana Foundation proposes the following Inflation Schedule +parameters: + +- Initial Inflation Rate: 8% +- Disinflation Rate: -15% +- Long-term Inflation Rate: 1.5% + +These parameters define the proposed _Inflation Schedule_. Below we show +implications of these parameters. These plots only show the impact of inflation +issuances given the Inflation Schedule as parameterized above. They _do not +account_ for other factors that may impact the Total Supply such as fee/rent +burning, slashing or other unforeseen future token destruction events. +Therefore, what is presented here is an **upper limit** on the amount of SOL +issued via inflation. + +![Example proposed inflation schedule graph](/assets/docs/economics/proposed_inflation_schedule.png) + +In the above graph we see the annual inflation rate percentage over time, given +the inflation parameters proposed above. + +![Example proposed total supply graph](/assets/docs/economics/proposed_total_supply.png) + +Similarly, here we see the _Total Current Supply_ of SOL [MM] over time, +assuming an initial _Total Current Supply_ of `488,587,349 SOL` (i.e. for this +example, taking the _Total Current Supply_ as of `2020-01-25` and simulating +inflation starting from that day). + +Setting aside validator uptime and commissions, the expected Staking Yield and +Adjusted Staking Yield metrics are then primarily a function of the % of total +SOL staked on the network. Therefore we can model _Staking Yield_, if we +introduce an additional parameter _% of Staked SOL_: + + + +This parameter must be estimated because it is a dynamic property of the token +holders and staking incentives. The values of _% of Staked SOL_ presented here +range from 60% - 90%, which we feel covers the likely range we expect to +observe, based on feedback from the investor and validator communities as well +as what is observed on comparable Proof-of-Stake protocols. + +![Example staked yields graph](/assets/docs/economics/example_staked_yields.png) + +Again, the above shows an example _Staked Yield_ that a staker might expect over +time on the Solana network with the _Inflation Schedule_ as specified. This is +an idealized _Staked Yield_ as it neglects validator uptime impact on rewards, +validator commissions, potential yield throttling and potential slashing +incidents. It additionally ignores that _% of Staked SOL_ is dynamic by design - +the economic incentives set up by this _Inflation Schedule_ are more clearly +seen when _Token Dilution_ is taken into account (see the **Adjusted Staking +Yield** section below). diff --git a/content/docs/economics/inflation/terminology.mdx b/content/docs/economics/inflation/terminology.mdx new file mode 100644 index 000000000..3238d85da --- /dev/null +++ b/content/docs/economics/inflation/terminology.mdx @@ -0,0 +1,105 @@ +--- +title: Inflation Terminology +h1: Inflation Related Terminology +--- + +Many terms are thrown around when discussing inflation and the related +components (e.g. rewards/yield/interest), we try to define and clarify some +commonly used concept here: + +### Total Current Supply [SOL] + +The total amount of tokens (locked or unlocked) that have been generated (via +genesis block or protocol inflation) minus any tokens that have been burnt (via +transaction fees or other mechanism) or slashed. At network launch, 500,000,000 +SOL were instantiated in the genesis block. Since then the Total Current Supply +has been reduced by the burning of transaction fees and a planned token +reduction event. Solana's _Total Current Supply_ can be found at +https://explorer.solana.com/supply + +### Inflation Rate [%] + +The Solana protocol will automatically create new tokens on a predetermined +inflation schedule (discussed below). The _Inflation Rate [%]_ is the annualized +growth rate of the _Total Current Supply_ at any point in time. + +### Inflation Schedule + +A deterministic description of token issuance over time. The Solana Foundation +is proposing a disinflationary _Inflation Schedule_. I.e. Inflation starts at +its highest value, the rate reduces over time until stabilizing at a +predetermined long-term inflation rate (see discussion below). This schedule is +completely and uniquely parameterized by three numbers: + +- **Initial Inflation Rate [%]**: The starting _Inflation Rate_ for when + inflation is first enabled. Token issuance rate can only decrease from this + point. +- **Disinflation Rate [%]**: The rate at which the _Inflation Rate_ is reduced. +- **Long-term Inflation Rate [%]**: The stable, long-term _Inflation Rate_ to be + expected. + +### Effective Inflation Rate [%] + +The inflation rate actually observed on the Solana network after accounting for +other factors that might decrease the _Total Current Supply_. Note that it is +not possible for tokens to be created outside of what is described by the +_Inflation Schedule_. + +- While the _Inflation Schedule_ determines how the protocol issues SOL, this + neglects the concurrent elimination of tokens in the ecosystem due to various + factors. The primary token burning mechanism is the burning of a portion of + each transaction fee. 50% of each transaction fee is burned, with the + remaining fee retained by the validator that processes the transaction. +- Additional factors such as loss of private keys and slashing events should + also be considered in a holistic analysis of the _Effective Inflation Rate_. + For example, it's estimated that 10-20% of all BTC have been lost and are + unrecoverable and that networks may experience similar yearly losses at the + rate of 1-2%. + +### Staking Yield [%] + +The rate of return (aka _interest_) earned on SOL staked on the network. It is +often quoted as an annualized rate (e.g. "the network _staking yield_ is +currently 10% per year"). + +- _Staking yield_ is of great interest to validators and token holders who wish + to delegate their tokens to avoid token dilution due to inflation (the extent + of which is discussed below). +- 100% of inflationary issuances are to be distributed to staked token-holders + in proportion to their staked SOL and to validators who charge a commission on + the rewards earned by their delegated SOL. + - There may be future consideration for an additional split of inflation + issuance with the introduction of _Archivers_ into the economy. _Archivers_ + are network participants who provide a decentralized storage service and + should also be incentivized with token distribution from inflation issuances + for this service. - Similarly, early designs specified a fixed percentage of + inflationary issuance to be delivered to the Foundation treasury for + operational expenses and future grants. However, inflation will be launching + without any portion allocated to the Foundation. +- _Staking yield_ can be calculated from the _Inflation Schedule_ along with the + fraction of the _Total Current Supply_ that is staked at any given time. The + explicit relationship is given by: + + + +### Token Dilution [%] + +Dilution is defined here as the change in proportional representation of a set +of tokens within a larger set due to the introduction of new tokens. In +practical terms, we discuss the dilution of staked or un-staked tokens due to +the introduction and distribution of inflation issuance across the network. As +will be shown below, while dilution impacts every token holder, the _relative_ +dilution between staked and un-staked tokens should be the primary concern to +un-staked token holders. Staking tokens, which will receive their proportional +distribution of inflation issuance, should assuage any dilution concerns for +staked token holders. I.e. dilution from 'inflation' is offset by the +distribution of new tokens to staked token holders, nullifying the 'dilutive' +effects of the inflation for that group. + +### Adjusted Staking Yield [%] + +A complete appraisal of earning potential from staking tokens should take into +account staked _Token Dilution_ and its impact on the _Staking Yield_. For this, +we define the _Adjusted Staking Yield_ as the change in fractional token supply +ownership of staked tokens due to the distribution of inflation issuance. I.e. +the positive dilutive effects of inflation. diff --git a/content/docs/economics/meta.json b/content/docs/economics/meta.json new file mode 100644 index 000000000..7fda53018 --- /dev/null +++ b/content/docs/economics/meta.json @@ -0,0 +1,5 @@ +{ + "title": "Economics", + "pages": ["inflation", "staking"], + "defaultOpen": true +} diff --git a/content/docs/economics/staking/index.mdx b/content/docs/economics/staking/index.mdx new file mode 100644 index 000000000..3e95bcbb9 --- /dev/null +++ b/content/docs/economics/staking/index.mdx @@ -0,0 +1,98 @@ +--- +title: Staking +h1: Staking on Solana +--- + +_Note before reading: All references to increases in values are in absolute +terms with regards to balance of SOL. This document makes no suggestion as to +the monetary value of SOL at any time._ + +By staking your SOL tokens, you help secure the network and +[earn rewards](https://docs.anza.xyz/implemented-proposals/staking-rewards) +while doing so. + +You can stake by delegating your tokens to validators who process transactions +and run the network. + +Delegating stake is a shared-risk shared-reward financial model that may provide +returns to holders of tokens delegated for a long period. This is achieved by +aligning the financial incentives of the token-holders (delegators) and the +validators to whom they delegate. + +The more stake delegated to a validator, the more often this validator is chosen +to write new transactions to the ledger. The more transactions the validator +writes, the more rewards the validator and its delegators earn. Validators who +configure their systems to be able to process more transactions earn +proportionally more rewards and because they keep the network running as fast +and as smoothly as possible. + +Validators incur costs by running and maintaining their systems, and this is +passed on to delegators in the form of a fee collected as a percentage of +rewards earned. This fee is known as a _commission_. Since validators earn more +rewards the more stake is delegated to them, they may compete with one another +to offer the lowest commission for their services. + +Although this is not implemented in the Solana protocol today, in the future, +delegators could risk losing tokens when staking through a process known as +_slashing_. Slashing involves the removal and destruction of a portion of a +validator's SOL in response to intentional malicious behavior, such as creating +invalid transactions or censoring certain types of transactions or network +participants. + +There is no in protocol implementation of slashing currently. For more +information on slashing see the +[slashing roadmap](https://docs.anza.xyz/proposals/optimistic-confirmation-and-slashing#slashing-roadmap). + +## How do I stake my SOL tokens? + +You can stake SOL by moving your tokens into a wallet that supports staking. The +wallet provides steps to create a stake account and do the delegation. + +#### Supported Wallets + +Many web and mobile wallets support Solana staking operations. Please check with +your favorite wallet's maintainers regarding status + +#### Solana command line tools + +- Solana command line tools can perform all stake operations in conjunction with + a CLI-generated keypair file wallet, a paper wallet, or with a connected + Ledger Nano. + [Staking commands using the Solana Command Line Tools](https://docs.anza.xyz/cli/examples/delegate-stake). + +#### Create a Stake Account + +Follow the wallet's instructions for creating a staking account. This account +will be of a different type than one used to simply send and receive tokens. + +#### Select a Validator + +Follow the wallet's instructions for selecting a validator. You can get +information about potentially performant validators from the links below. The +Solana Foundation does not recommend any particular validator. + +The site solanabeach.io is built and maintained by one of our validators, +Staking Facilities. It provides a some high-level graphical information about +the network as a whole, as well as a list of each validator and some recent +performance statistics about each one. + +- https://solanabeach.io + +To view block production statistics, use the Solana command-line tools: + +- `solana validators` +- `solana block-production` + +The Solana team does not make recommendations on how to interpret this +information. Do your own due diligence. + +#### Delegate your Stake + +Follow the wallet's instructions for delegating your stake to your chosen +validator. + +## Stake Account Details + +For more information about the operations and permissions associated with a +stake account, please see +[Stake Accounts](/docs/economics/staking/stake-accounts) diff --git a/content/docs/economics/staking/meta.json b/content/docs/economics/staking/meta.json new file mode 100644 index 000000000..a722de3b1 --- /dev/null +++ b/content/docs/economics/staking/meta.json @@ -0,0 +1,4 @@ +{ + "title": "Staking", + "pages": ["stake-accounts", "stake-programming"] +} diff --git a/content/docs/economics/staking/stake-accounts.mdx b/content/docs/economics/staking/stake-accounts.mdx new file mode 100644 index 000000000..d50703c48 --- /dev/null +++ b/content/docs/economics/staking/stake-accounts.mdx @@ -0,0 +1,145 @@ +--- +title: Stake Accounts +--- + +A stake account on Solana can be used to delegate tokens to validators on the +network to potentially earn rewards for the owner of the stake account. Stake +accounts are created and managed differently than a traditional wallet address, +known as a _system account_. A system account is only able to send and receive +SOL from other accounts on the network, whereas a stake account supports more +complex operations needed to manage a delegation of tokens. + +Stake accounts on Solana also work differently than those of other +Proof-of-Stake blockchain networks that you may be familiar with. This document +describes the high-level structure and functions of a Solana stake account. + +#### Account Address + +Each stake account has a unique address which can be used to look up the account +information in the command line or in any network explorer tools. However, +unlike a wallet address in which the holder of the address's keypair controls +the wallet, the keypair associated with a stake account address does not +necessarily have any control over the account. In fact, a keypair or private key +may not even exist for a stake account's address. + +The only time a stake account's address has a keypair file is when +[creating a stake account using the command line tools](https://docs.anza.xyz/cli/examples/delegate-stake#create-a-stake-account). +A new keypair file is created first only to ensure that the stake account's +address is new and unique. + +#### Understanding Account Authorities + +Certain types of accounts may have one or more _signing authorities_ associated +with a given account. An account authority is used to sign certain transactions +for the account it controls. This is different from some other blockchain +networks where the holder of the keypair associated with the account's address +controls all of the account's activity. + +Each stake account has two signing authorities specified by their respective +address, each of which is authorized to perform certain operations on the stake +account. + +The _stake authority_ is used to sign transactions for the following operations: + +- Delegating stake +- Deactivating the stake delegation +- Splitting the stake account, creating a new stake account with a portion of + the funds in the first account +- Merging two stake accounts into one +- Setting a new stake authority + +The _withdraw authority_ signs transactions for the following: + +- Withdrawing un-delegated stake into a wallet address +- Setting a new withdraw authority +- Setting a new stake authority + +The stake authority and withdraw authority are set when the stake account is +created, and they can be changed to authorize a new signing address at any time. +The stake and withdraw authority can be the same address or two different +addresses. + +The withdraw authority keypair holds more control over the account as it is +needed to liquidate the tokens in the stake account, and can be used to reset +the stake authority if the stake authority keypair becomes lost or compromised. + +Securing the withdraw authority against loss or theft is of utmost importance +when managing a stake account. + +#### Multiple Delegations + +Each stake account may only be used to delegate to one validator at a time. All +of the tokens in the account are either delegated or un-delegated, or in the +process of becoming delegated or un-delegated. To delegate a fraction of your +tokens to a validator, or to delegate to multiple validators, you must create +multiple stake accounts. + +This can be accomplished by creating multiple stake accounts from a wallet +address containing some tokens, or by creating a single large stake account and +using the stake authority to split the account into multiple accounts with token +balances of your choosing. + +The same stake and withdraw authorities can be assigned to multiple stake +accounts. + +#### Merging stake accounts + +Two stake accounts that have the same authorities and lockup can be merged into +a single resulting stake account. A merge is possible between two stakes in the +following states with no additional conditions: + +- two deactivated stakes +- an inactive stake into an activating stake during its activation epoch + +For the following cases, the voter pubkey and vote credits observed must match: + +- two activated stakes +- two activating accounts that share an activation epoch, during the activation + epoch + +All other combinations of stake states will fail to merge, including all +"transient" states, where a stake is activating or deactivating with a non-zero +effective stake. + +#### Delegation Warmup and Cooldown + +When a stake account is delegated, or a delegation is deactivated, the operation +does not take effect immediately. + +A delegation or deactivation takes several [epochs](/docs/terminology#epoch) +to complete, with a fraction of the delegation becoming active or inactive at +each epoch boundary after the transaction containing the instructions has been +submitted to the cluster. + +There is also a limit on how much total stake can become delegated or +deactivated in a single epoch, to prevent large sudden changes in stake across +the network as a whole. Since warmup and cooldown are dependent on the behavior +of other network participants, their exact duration is difficult to predict. +Details on the warmup and cooldown timing can be found +[here](https://docs.anza.xyz/consensus/stake-delegation-and-rewards#stake-warmup-cooldown-withdrawal). + +#### Lockups + +Stake accounts can have a lockup which prevents the tokens they hold from being +withdrawn before a particular date or epoch has been reached. While locked up, +the stake account can still be delegated, un-delegated, or split, and its stake +authority can be changed as normal. Only withdrawal into another wallet or +updating the withdraw authority is not allowed. + +A lockup can only be added when a stake account is first created, but it can be +modified later, by the _lockup authority_ or _custodian_, the address of which +is also set when the account is created. + +#### Destroying a Stake Account + +Like other types of accounts on the Solana network, a stake account that has a +balance of 0 SOL is no longer tracked. If a stake account is not delegated and +all of the tokens it contains are withdrawn to a wallet address, the account at +that address is effectively destroyed, and will need to be manually re-created +for the address to be used again. + +#### Viewing Stake Accounts + +Stake account details can be viewed on the +[Solana Explorer](http://explorer.solana.com/accounts) by copying and pasting an +account address into the search bar. diff --git a/content/docs/economics/staking/stake-programming.mdx b/content/docs/economics/staking/stake-programming.mdx new file mode 100644 index 000000000..ed9852168 --- /dev/null +++ b/content/docs/economics/staking/stake-programming.mdx @@ -0,0 +1,28 @@ +--- +title: Stake Programming +--- + +To maximize stake distribution, decentralization, and censorship resistance on +the Solana network, staking can be performed programmatically. The team and +community have developed several on-chain and offchain programs to make stakes +easier to manage. + +#### Stake-o-matic aka Auto-delegation Bots + +This offchain program manages a large population of validators staked by a +central authority. The Solana Foundation uses an auto-delegation bot to +regularly delegate its stake to "non-delinquent" validators that meet specified +performance requirements. + +#### Stake Pools + +This on-chain program pools together SOL to be staked by a manager, allowing SOL +holders to stake and earn rewards without managing stakes. Users deposit SOL in +exchange for SPL tokens (staking derivatives) that represent their ownership in +the stake pool. The pool manager stakes deposited SOL according to their +strategy, perhaps using a variant of an auto-delegation bot as described above. +As stakes earn rewards, the pool and pool tokens grow proportionally in value. +Finally, pool token holders can send SPL tokens back to the stake pool to redeem +SOL, thereby participating in decentralization with much less work required. +More information can be found at the +[SPL stake pool documentation](https://spl.solana.com/stake-pool). diff --git a/content/docs/index.mdx b/content/docs/index.mdx new file mode 100644 index 000000000..46cb34f78 --- /dev/null +++ b/content/docs/index.mdx @@ -0,0 +1,102 @@ +--- +title: Solana Documentation +seoTitle: Learn how the Solana blockchain works +description: + "Solana is the high-performance blockchain designed for mass adoption. Learn + why Solana is the top choice for developers looking to build scalable + blockchain applications." +altRoutes: + - /docs/intro/history + - /docs/intro + - /docs/intro/overview +isHiddenInNavSidebar: true +--- + +Solana is a blockchain built for mass adoption. It's a high-performance network +that is utilized for a range of use cases, including finance, NFTs, payments, +and gaming. Solana operates as a single global state machine and is open, +interoperable and decentralized. + +## Getting started + +Dive right into Solana to start building or setting up your local environment. + +- [Quick Start](/docs/intro/quick-start) - Build and deploy your first on-chain + Solana program, directly in your browser using Solana Playground +- [Setup Local Environment](/docs/intro/installation) - Install the Solana CLI + to get your local development environment setup + +## Start learning + +Build a strong understanding of the core concepts that make Solana different +from other blockchains. + +- [Accounts](/docs/core/accounts) - Data and state storage mechanism for Solana +- [Fees on Solana](/docs/core/fees) - Various costs associated with using the + network. +- [Transactions](/docs/core/transactions) - Collection of instructions for the + blockchain to execute +- [Programs](/docs/core/programs) - The executable code used to perform actions + on the blockchain +- [Programs Derived Address](/docs/core/pda) - Deterministically generated + addresses that allow Solana programs to programmatically "sign" transactions. +- [Cross Program Invocation](/docs/core/cpi) - Core of the "composability" of + Solana, this is how programs can "call" each other. + +## Understanding the architecture + +Get to know the underlying architecture of how the proof-of-stake blockchain +works. + +- [Validators](https://docs.anza.xyz/validator/anatomy) - the individual nodes + that are the backbone of the network +- [Clusters](/docs/core/clusters) - a collection of validators that work + together for consensus + +## Running a validator + +Explore what it takes to operate a Solana validator and help secure the network. + +- [System Requirements](https://docs.anza.xyz/operations/requirements) - + Recommended hardware requirements and expected SOL needed to operate a + validator +- [Quick Start](https://docs.anza.xyz/operations/setup-a-validator) - Setup a + validator and get connected to a cluster for the first time + +## Why Solana? + +Designed to scale, Solana is purpose-built for blockchain applications to reach +millions of users. Instead of worrying about optimizing for the blockchain +layer, developers can focus on building their applications to reach product +market fit. Not only can it scale now for blockchain application needs, but the +network continues to be optimized with user experience in mind. + +Building the best user experience in an application is a top priority for +developers. With blockchains, the user experience is often limited by the +underlying technology, causing slow response times and high fees. Solana's low +fees and 400ms confirmation times enable developers to build applications that +are user-friendly and accessible to everyone. + +## Solana Features + +| Feature | Description | +| --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Onchain Program Development | The ability to develop and deploy programs onchain. Users can interact with these programs permissionlessly, removing the need for middleman servers to build applications. | +| 400ms Slot Times | Every transaction sent by users on Solana is confirmed in a block. With a target slot time of 400ms for each block, interactions by users do not have to wait, providing the best in class user experience of any blockchain. | +| Low Fees | Fees on Solana are well known to be low. With a median fee of 0.00064 SOL per transaction, Solana enables building for users across the world regardless of background. | +| High Throughput | Scaling to thousands of transactions per second, Solana is built to scale with your application's user needs. | + +## How to Use These Docs + +On the left, you will find the docs sidebar. This lists the documentation in +order from basic to more advanced information. If you are new to Solana, we +recommend starting from the top and working your way down. However, you're +welcome to read them in any order that you like. + +Once you're ready to start building, check out the +[Quick Start](/docs/intro/quick-start) guide. + +## Need Help? + +Get help from the Solana community on the +[Solana StackExchange](https://solana.stackexchange.com). diff --git a/content/docs/intro/dev.mdx b/content/docs/intro/dev.mdx new file mode 100644 index 000000000..016534597 --- /dev/null +++ b/content/docs/intro/dev.mdx @@ -0,0 +1,194 @@ +--- +title: Intro to Development +description: "Learn how to get started building on Solana" +keywords: + - solana basics + - tutorial + - intro to solana development + - blockchain developer + - web3 developer +h1: Getting Started with Solana Development +--- + +Welcome to the Solana developer docs! + +This page has everything you need to know to get started with Solana +development, including basic requirements, how Solana development works, and the +tools you'll need to get started. + +## High Level Developer Overview + +Development on Solana can be broken down into two main parts: + +1. **Onchain Program Development**: This is where you create and deploy custom + programs directly to the blockchain. Once deployed, anyone who knows how to + communicate with them can use them. You can write these programs in Rust, C, + or C++. Rust has the most support for onchain program development today. +2. **Client Development**: This is where you write software (called + decentralized applications, or dApps) that communicates with onchain + programs. Your apps can submit transactions to perform actions onchain. + Client development can be written in any programming language. + +The "glue" between the client side and the onchain side is the +[Solana JSON RPC API](/docs/rpc). The client-side sends RPC +requests to the Solana network to interact with onchain programs. This is very +similar to normal development between a frontend and backend. The major +difference with working on Solana is that the backend is a global permissionless +blockchain. This means that anyone can interact with your onchain program +without the need of issuing API keys or any other form of permission. + +![How clients work with the Solana blockchain](/assets/docs/intro/developer_flow.png) + +Solana development is a bit different from other blockchains because of its +highly composable onchain programs. This means you can build on top of any +program already deployed, and often you can do so without needing to do any +custom onchain program development. For example, if you wanted to work with +tokens, you could use the [Token Program](/docs/core/tokens) that is already +deployed on the network. All development on your application would be +client-side in your language of choice. + +Developers looking to build on Solana will find that the development stack is +very similar to any other development stack. The main difference is that you'll +be working with a blockchain and have to think about how users potentially +interact with your application onchain instead of just on the frontend. +Developing on Solana still has CI/CD pipelines, testing, debugging tools, a +frontend and backend, and anything you'd find in a normal development flow. + +## What You'll Need to Get Started + +To get started with Solana development, you'll need different tools based on +whether you are developing for client-side, onchain programs, or both. + +### Client-side Development + +If you're developing onchain apps, you should know Rust. + +If you're developing on the client-side, you can work with any programming +language you're comfortable with. Solana has community-contributed SDKs to help +developers interact with the Solana network in most popular languages : + +| Language | SDK | +| ---------- | -------------------------------------------------------------------------------------------------------- | +| RUST | [solana_sdk](https://docs.rs/solana-sdk/latest/solana_sdk/) | +| Typescript | [@solana/web3.js](https://github.com/solana-labs/solana-web3.js) | +| Python | [solders](https://github.com/kevinheavey/solders) | +| Java | [solanaj](https://github.com/skynetcap/solanaj) or [solana4j](https://github.com/LMAX-Exchange/solana4j) | +| C++ | [solcpp](https://github.com/mschneider/solcpp) | +| Go | [solana-go](https://github.com/gagliardetto/solana-go) | +| Kotlin | [solanaKT](https://github.com/metaplex-foundation/SolanaKT) or [sol4k](https://github.com/sol4k/sol4k) | +| Dart | [solana](https://github.com/espresso-cash/espresso-cash-public/tree/master/packages/solana) | +| C# | [solnet](https://github.com/bmresearch/Solnet) | +| GdScript | [godot](https://github.com/Virus-Axel/godot-solana-sdk/) | + +You'll also need a connection with an RPC to interact with the network. You can +either work with a [RPC infrastructure provider](/rpc) or +[run your own RPC node](https://docs.anza.xyz/operations/setup-an-rpc-node). + +To quickly get started with a front-end for your application, you can generate a +customizable Solana scaffold by typing the following into your CLI: + +```bash +npx create-solana-dapp +``` + +This will create a new project with all the necessary files and basic +configuration to get started building on Solana. The scaffold will include both +an example frontend and an onchain program template (if you selected one). You +can read the +[`create-solana-dapp` docs](https://github.com/solana-developers/create-solana-dapp?tab=readme-ov-file#create-solana-dapp) +to learn more. + +### Onchain Program Development + +Onchain program development consists of either writing programs in Rust, C, or +C++. First you'll need to make sure you have Rust installed on your machine. You +can do this with the following command: + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +You'll then need to have the [Solana CLI installed](/docs/intro/installation) +to compile and deploy your programs. You can install the Solana CLI by running +the following command: + +```bash +sh -c "$(curl -sSfL https://release.anza.xyz/stable/install)" +``` + +Using the Solana CLI, it is recommended to run a local validator for testing +your program. To run a local validator after installing the Solana CLI, run the +following command: + +```bash +solana-test-validator +``` + +This will start a local validator on your machine that you can use to test your +programs. You can +[read more about local development in this guide](/docs/intro/installation). + +When building onchain programs, you have a choice to either build with native +Rust (ie, without a framework) or use the Anchor framework. Anchor is a +framework that makes it easier to build on Solana by providing a higher-level +API for developers. Think of Anchor like building with React for your websites +instead of raw Javascript and HTML. While Javascript and HTML give you more +control over your website, React accelerates your development and makes +developing easy. You can read more about [Anchor](https://www.anchor-lang.com/) +on their website. + +You'll need a way to test your program. There are a few different ways to test +your program based on your language preference: + +- [solana-program-test](https://docs.rs/solana-program-test/latest/solana_program_test/) - + Testing framework built in Rust +- [solana-bankrun](https://kevinheavey.github.io/solana-bankrun/) - Testing + framework built for writing Typescript tests +- [bankrun](https://kevinheavey.github.io/solders/tutorials/bankrun.html) - + Testing framework built for writing Python tests + +If you do not want to develop your programs locally, there's also the +[online IDE Solana Playground](https://beta.solpg.io). Solana Playground allows +you to write, test, and deploy programs on Solana. You can get started with +Solana Playground by [following our quick start guide](/docs/intro/quick-start). + +### Developer Environments + +Choosing the right environment based on your work is very important. On Solana, +there are a few different network environments (called clusters) to facilitate +mature testing and CI/CD practices: + +- **Mainnet Beta**: The production network where all the action happens. + Transactions cost real money here. +- **Devnet**: The quality assurance network where you deploy your programs to + test before deploying to production. Think "staging environment". +- **Local**: The local network that you run on your machine using + `solana-test-validator` to test your programs. This should be your first + choice when developing programs. + +## Build by Example + +While you get started building on Solana, there's a few more resources available +to help accelerate your journey: + +- [Solana Cookbook](/developers/cookbook): A collection of + references and code snippets to help you build on Solana. +- [Solana Program Examples](https://github.com/solana-developers/program-examples): + A repository of example programs providing building blocks for different + actions on your programs. +- [Guides](/developers/guides): Tutorials and guides to walk + you through building on Solana. + +## Getting Support + +The best support you can find is on +[Solana StackExchange](https://solana.stackexchange.com/). Search for your +question there first - there's a good chance there will already be a question +asked by someone else, with an answer. If it's not there, add a new question! +Remember to include as much detail as you can in your question, and please use +text (not screenshots) to show error messages, so other people with the same +problem can find your question! + +## Next steps + +[You're now ready to get started building on Solana!](/docs/intro/quick-start) diff --git a/content/docs/intro/installation.mdx b/content/docs/intro/installation.mdx new file mode 100644 index 000000000..c08419412 --- /dev/null +++ b/content/docs/intro/installation.mdx @@ -0,0 +1,686 @@ +--- +title: Installation +seoTitle: Install the Solana CLI and Anchor +description: + A comprehensive guide to setting up your local Solana development environment. + Learn how to install Rust, the Solana CLI, and Anchor Framework on Windows + (WSL), Linux, and Mac. Includes step-by-step instructions for creating + wallets, requesting airdrops, and running a local validator. +altRoutes: + - /developers/guides/getstarted/setup-local-development + - /docs/install + - /install + - /setup +--- + +This section covers the steps to set up your local environment for Solana +development. + +## Install Dependencies + +- Windows users must first install WSL (Windows subsystem for Linux) and then + install the dependencies specified in the Linux section below. +- Linux users should first install the dependencies specified in the Linux + section below. +- Mac users should start with the Rust installation instructions below. + + + + +To develop Solana programs on Windows **you must use +[WSL](https://learn.microsoft.com/en-us/windows/wsl/install)** (Windows +subsystem for Linux). All additional dependencies must be installed through the +Linux terminal. + +Once WSL is installed, install the dependencies specified in the Linux section +below before proceeding to install Rust, Solana CLI, and Anchor CLI. + +To install WSL, run the following command in Windows PowerShell: + +```shell +wsl --install +``` + +The install process will prompt you to create a default user account. + +![WSL Install](/assets/docs/intro/installation/wsl-install.png) + +By default, WSL installs Ubuntu. You can open a Linux terminal by searching +"Ubuntu" in the Search bar. + +![WSL Ubuntu](/assets/docs/intro/installation/wsl-ubuntu-search.png) + +If your Ubuntu terminal looks like the image below, you may encounter an issue +where `ctrl + v` (paste keyboard shortcut) doesn't work in the terminal. + +![Ubuntu Terminal](/assets/docs/intro/installation/wsl-ubuntu-terminal-1.png) + +If you encounter this issue, open Windows Terminal by searching for "Terminal" +in the Search bar. + +![Windows Terminal](/assets/docs/intro/installation/wsl-windows-terminal.png) + +Next, close the Windows Terminal and reopen a Linux terminal by searching for +Ubuntu again. The terminal should now look like the image below, where +`ctrl + v` (paste keyboard shortcut) works. + +![Ubuntu Terminal](/assets/docs/intro/installation/wsl-ubuntu-terminal-2.png) + +If you are using VS Code, the +[WSL extension](https://code.visualstudio.com/docs/remote/wsl-tutorial) enables +you to use WSL and VS Code together. + +![WSL Setup in VS Code](/assets/docs/intro/installation/wsl-vscode.png) + +You should then see the following in the VS Code status bar: + +![WSL: Ubuntu](/assets/docs/intro/installation/wsl-vscode-ubuntu.png) + +Once you have WSL set up, all additional dependencies must be installed through +the Linux terminal. Install the dependencies specified in the Linux section +below before proceeding to install Rust, Solana CLI, and Anchor CLI. + + + + +The following dependencies are required for the Anchor CLI installation. + +First, run the following command: + +```shell +sudo apt-get update +``` + +Next, install the following dependencies: + +```shell +sudo apt-get install -y \ + build-essential \ + pkg-config \ + libudev-dev llvm libclang-dev \ + protobuf-compiler libssl-dev +``` + +If you encounter the following error when installing `protobuf-compiler`, make +sure you first run `sudo apt-get update`: + +``` +Package protobuf-compiler is not available, but is referred to by another package. +This may mean that the package is missing, has been obsoleted, or +is only available from another source +``` + + + + + + + + +### Install Rust + +Solana programs are written in the +[Rust programming language](https://www.rust-lang.org/). + +The recommended installation method for Rust is +[rustup](https://www.rust-lang.org/tools/install). + +Run the following command to install Rust: + +```shell +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +``` + +You should see the following message after the installation completes: + + + + +``` +Rust is installed now. Great! + +To get started you may need to restart your current shell. +This would reload your PATH environment variable to include +Cargo's bin directory ($HOME/.cargo/bin). + +To configure your current shell, you need to source +the corresponding env file under $HOME/.cargo. + +This is usually done by running one of the following (note the leading DOT): +. "$HOME/.cargo/env" # For sh/bash/zsh/ash/dash/pdksh +source "$HOME/.cargo/env.fish" # For fish +``` + + + + +Run the following command to reload your PATH environment variable to include +Cargo's bin directory: + +```shell +. "$HOME/.cargo/env" +``` + +To verify that the installation was successful, check the Rust version: + +```shell +rustc --version +``` + +You should see output similar to the following: + +``` +rustc 1.80.1 (3f5fd8dd4 2024-08-06) +``` + + + + +### Install the Solana CLI + +The Solana CLI provides all the tools required to build and deploy Solana +programs. + +Install the Solana CLI tool suite using the official install command: + +```shell +sh -c "$(curl -sSfL https://release.anza.xyz/stable/install)" +``` + +You can replace `stable` with the release tag matching the software version of +your desired release (i.e. `v2.0.3`), or use one of the three symbolic channel +names: `stable`, `beta`, or `edge`. + +If it is your first time installing the Solana CLI, you may see the following +message prompting you to add a PATH environment variable: + +``` +Close and reopen your terminal to apply the PATH changes or run the following in your existing shell: + +export PATH="/Users/test/.local/share/solana/install/active_release/bin:$PATH" +``` + + + + +If you are using a Linux or WSL terminal, you can add the PATH environment +variable to your shell configuration file by running the command logged from the +installation or by restarting your terminal. + +```shell +export PATH="$HOME/.local/share/solana/install/active_release/bin:$PATH" +``` + + + + +If you're on Mac using `zsh`, running the default `export PATH` command logged +from the installation does not persist once you close your terminal. + +Instead, you can add the PATH to your shell configuration file by running the +following command: + +```shell +echo 'export PATH="$HOME/.local/share/solana/install/active_release/bin:$PATH"' >> ~/.zshrc +``` + +Then run the following command to refresh the terminal session or restart your +terminal. + +```shell +source ~/.zshrc +``` + + + + +To verify that the installation was successful, check the Solana CLI version: + +```shell +solana --version +``` + +You should see output similar to the following: + +``` +solana-cli 1.18.22 (src:9efdd74b; feat:4215500110, client:Agave) +``` + +You can view all available versions on the +[Agave Github repo](https://github.com/anza-xyz/agave/releases). + + + +Agave is the validator client from [Anza](https://www.anza.xyz/), formerly known +as Solana Labs validator client. + + + +To later update the Solana CLI to the latest version, you can use the following +command: + +```shell +agave-install update +``` + + + + +### Install Anchor CLI + +[Anchor](https://www.anchor-lang.com/) is a framework for developing Solana +programs. The Anchor framework leverages Rust macros to simplify the process of +writing Solana programs. + +There are two ways to install the Anchor CLI and tooling: + +1. Using Anchor Version Manager (AVM) - is the **recommended installation** + method since it simplifies updating Anchor versions in the future +2. Without AVM - this requires more a manual process to update Anchor versions + later + + + + +The Anchor version manager (AVM) allows you to install and manage different +Anchor versions on your system, including more easily updating Anchor versions +in the future. + +Install AVM with the following command: + +```shell +cargo install --git https://github.com/coral-xyz/anchor avm --force +``` + +Test to ensure AVM was installed and is accessible: + +```shell +avm --version +``` + +Install the latest version of Anchor CLI using AVM: + +```shell +avm install latest +avm use latest +``` + +Or install a specific version of the Anchor CLI by declaring which version you +want to install: + +```shell +avm install 0.30.1 +avm use 0.30.1 +``` + +> Don't forget to run the `avm use` command to declare which Anchor CLI version +> should be used on your system. +> +> - If you installed the `latest` version, run `avm use latest`. +> - If you installed the version `0.30.1`, run `avm use 0.30.1`. + + + + + +Install a specific version of the Anchor CLI with the following command: + +```shell +cargo install --git https://github.com/coral-xyz/anchor --tag v0.30.1 anchor-cli +``` + + + + +You may see the following warning during installation. However, it does not +affect the installation process. + + + + +``` +warning: unexpected `cfg` condition name: `nightly` + --> cli/src/lib.rs:1:13 + | +1 | #![cfg_attr(nightly, feature(proc_macro_span))] + | ^^^^^^^ + | + = help: expected names are: `clippy`, `debug_assertions`, `doc`, `docsrs`, `doctest`, `feature`, `miri`, `overflow_checks`, `panic`, `proc_macro`, `relocation_model`, `rustfmt`, `sanitize`, `sanitizer_cfi_generalize_pointers`, `sanitizer_cfi_normalize_integers`, `target_abi`, `target_arch`, `target_endian`, `target_env`, `target_family`, `target_feature`, `target_has_atomic`, `target_has_atomic_equal_alignment`, `target_has_atomic_load_store`, `target_os`, `target_pointer_width`, `target_thread_local`, `target_vendor`, `test`, `ub_checks`, `unix`, and `windows` + = help: consider using a Cargo feature instead + = help: or consider adding in `Cargo.toml` the `check-cfg` lint config for the lint: + [lints.rust] + unexpected_cfgs = { level = "warn", check-cfg = ['cfg(nightly)'] } + = help: or consider adding `println!("cargo::rustc-check-cfg=cfg(nightly)");` to the top of the `build.rs` + = note: see for more information about checking conditional configuration + = note: `#[warn(unexpected_cfgs)]` on by default + +warning: `anchor-cli` (lib) generated 1 warning +``` + + + + +To verify that the installation was successful, check the Anchor CLI version: + +```shell +anchor --version +``` + +You should see output similar to the following: + +``` +anchor-cli 0.30.1 +``` + +When installing the Anchor CLI on Linux or WSL, you may encounter this error: + +``` +error: could not exec the linker cc = note: Permission denied (os error 13) +``` + +If you see this error message, follow these steps: + +1. Install the dependencies listed in the Linux section at the top of this page. +2. Retry installing the Anchor CLI. + +#### Node.js and Yarn + +Node.js and Yarn are required to run the default Anchor project test file +(TypeScript) created with the `anchor init` command. (Rust test template is also +available using `anchor init --test-template rust`) + + + + +The recommended way to install node is using +[Node Version Manager (nvm)](https://github.com/nvm-sh/nvm). + +Install nvm using the following command: + +```shell +curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash +``` + +Restart your terminal and verify that nvm is installed: + +```shell +command -v nvm +``` + +Next, use `nvm` to install node: + +```shell +nvm install node +``` + +To verify that the installation was successful, check the Node version: + +``` +node --version +``` + +You should see output similar to the following: + +``` +v22.7.0 +``` + + + + +Install Yarn: + +```shell +npm install --global yarn +``` + +To verify that the installation was successful, check the Yarn version: + +``` +yarn --version +``` + +You should the following output: + +``` +1.22.1 +``` + + + + +When running `anchor build`, if you encounter similar errors following: + + + + +``` +error: not a directory: '.../solana-release/bin/sdk/sbf/dependencies/platform-tools/rust/lib' +``` + +Try these solutions: + +1. Force install using the following command: + +```shell +cargo build-sbf --force-tools-install +``` + +2. If the above doesn't work, clear the Solana cache: + +```shell +rm -rf ~/.cache/solana/* +``` + + + + +You can fix this by changing the version field of `Cargo.lock` file + +``` +version = 3 +``` + +See [this issue](https://github.com/coral-xyz/anchor/issues/3392) for more +information. + + + + + +After applying either solution, attempt to run `anchor build` again. + +If you are on Linux or WSL and encounter the following errors when running +`anchor test` after creating a new Anchor project, it's may be due to missing +Node.js or Yarn: + +``` +Permission denied (os error 13) +``` + +``` +No such file or directory (os error 2) +``` + + + + + +## Solana CLI Basics + +This section will walk through some common Solana CLI commands to get you +started. + + + + + +### Solana Config + +To see your current config: + +```shell +solana config get +``` + +You should see output similar to the following: + +``` +Config File: /Users/test/.config/solana/cli/config.yml +RPC URL: https://api.mainnet-beta.solana.com +WebSocket URL: wss://api.mainnet-beta.solana.com/ (computed) +Keypair Path: /Users/test/.config/solana/id.json +Commitment: confirmed +``` + +The RPC URL and Websocket URL specify the Solana cluster the CLI will make +requests to. By default this will be mainnet-beta. + +You can update the Solana CLI cluster using the following commands: + +``` +solana config set --url mainnet-beta +solana config set --url devnet +solana config set --url localhost +solana config set --url testnet +``` + +You can also use the following short options: + +``` +solana config set -um # For mainnet-beta +solana config set -ud # For devnet +solana config set -ul # For localhost +solana config set -ut # For testnet +``` + +The Keypair Path specifies the location of the default wallet used by the Solana +CLI (to pay transaction fees and deploy programs). The default path is +`~/.config/solana/id.json`. The next step walks through how to generate a +keypair at the default location. + + + + +### Create Wallet + +To interact with the Solana network using the Solana CLI, you need a Solana +wallet funded with SOL. + +To generate a keypair at the default Keypair Path, run the following command: + +```shell +solana-keygen new +``` + +You should see output similar to the following: + +``` +Generating a new keypair + +For added security, enter a BIP39 passphrase + +NOTE! This passphrase improves security of the recovery seed phrase NOT the +keypair file itself, which is stored as insecure plain text + +BIP39 Passphrase (empty for none): + +Wrote new keypair to /Users/test/.config/solana/id.json +=========================================================================== +pubkey: 8dBTPrjnkXyuQK3KDt9wrZBfizEZijmmUQXVHpFbVwGT +=========================================================================== +Save this seed phrase and your BIP39 passphrase to recover your new keypair: +cream bleak tortoise ocean nasty game gift forget fancy salon mimic amazing +=========================================================================== +``` + + + +If you already have a file system wallet saved at the default location, this +command will **NOT** override it unless you explicitly force override using the +`--force` flag. + + + +Once a keypair is generated, you can get the address (public key) of the keypair +with the following command: + +```shell +solana address +``` + + + + +### Airdrop SOL + +Once you've set up your local wallet, request an airdrop of SOL to fund your +wallet. You need SOL to pay for transaction fees and to deploy programs. + +Set your cluster to the devnet: + +```shell +solana config set -ud +``` + +Then request an airdrop of devnet SOL: + +```shell +solana airdrop 2 +``` + +To check your wallet's SOL balance, run the following command: + +```shell +solana balance +``` + + + +The `solana airdrop` command is currently limited to 5 SOL per request on +devnet. Errors are likely due to rate limits. + +Alternatively, you can get devnet SOL using the +[Solana Web Faucet](https://faucet.solana.com). + + + + + + +### Run Local Validator + +The Solana CLI comes with the +[test validator](https://docs.anza.xyz/cli/examples/test-validator) built-in. +Running a local validator will allow you to deploy and test your programs +locally. + +In a separate terminal, run the following command to start a local validator: + +```shell +solana-test-validator +``` + + + +In WSL you may need to first navigate to a folder where you have default write +access: + +```shell +cd ~ +mkdir validator +cd validator +solana-test-validator +``` + + + +Make sure to update the Solana CLI config to localhost before commands. + +```shell +solana config set -ul +``` + + + + diff --git a/content/docs/intro/meta.json b/content/docs/intro/meta.json new file mode 100644 index 000000000..d98bcce20 --- /dev/null +++ b/content/docs/intro/meta.json @@ -0,0 +1,5 @@ +{ + "title": "Getting Started", + "pages": ["quick-start", "installation", "dev", "wallets"], + "defaultOpen": true +} diff --git a/content/docs/intro/quick-start/cross-program-invocation.mdx b/content/docs/intro/quick-start/cross-program-invocation.mdx new file mode 100644 index 000000000..a9a075d77 --- /dev/null +++ b/content/docs/intro/quick-start/cross-program-invocation.mdx @@ -0,0 +1,620 @@ +--- +title: Cross Program Invocation +description: + Learn how to implement Cross Program Invocations (CPIs) in Solana programs + using the Anchor framework. This tutorial demonstrates how to transfer SOL + between accounts, interact with the System Program, and handle Program Derived + Addresses (PDAs) in CPIs. Perfect for developers looking to build composable + Solana programs. +--- + +In this section, we'll update the CRUD program from the previous PDA section to +include Cross Program Invocations (CPIs). We'll modify the program to transfer +SOL between accounts in the `update` and `delete` instructions, demonstrating +how to interact with other programs (in this case, the System Program) from +within our program. + +The purpose of this section is to walk through the process of implementing CPIs +in a Solana program using the Anchor framework, building upon the PDA concepts +we explored in the previous section. For more details, refer to the +[Cross Program Invocation](/docs/core/cpi) page. + + + + +### Modify Update Instruction + +First, we'll implement a simple "pay-to-update" mechanism by modifying the +`Update` struct and `update` function. + +Begin by updating the `lib.rs` file to bring into scope items from the +`system_program` module. + +```rs title="lib.rs" +use anchor_lang::system_program::{transfer, Transfer}; +``` + + + + +```diff + use anchor_lang::prelude::*; ++ use anchor_lang::system_program::{transfer, Transfer}; +``` + + + + +Next, update the `Update` struct to include an additional account called +`vault_account`. This account, controlled by our program, will receive SOL from +a user when they update their message account. + +```rs title="lib.rs" +#[account( + mut, + seeds = [b"vault", user.key().as_ref()], + bump, +)] +pub vault_account: SystemAccount<'info>, +``` + + + + +```diff +#[derive(Accounts)] +#[instruction(message: String)] +pub struct Update<'info> { + #[account(mut)] + pub user: Signer<'info>, + ++ #[account( ++ mut, ++ seeds = [b"vault", user.key().as_ref()], ++ bump, ++ )] ++ pub vault_account: SystemAccount<'info>, + #[account( + mut, + seeds = [b"message", user.key().as_ref()], + bump = message_account.bump, + realloc = 8 + 32 + 4 + message.len() + 1, + realloc::payer = user, + realloc::zero = true, + )] + pub message_account: Account<'info, MessageAccount>, + pub system_program: Program<'info, System>, +} +``` + + + + +We're adding a new account called `vault_account` to our `Update` struct. This +account serves as a program-controlled "vault" that will receive SOL from users +when they update their messages. + +By using a PDA for the vault, we create a program-controlled account unique to +each user, enabling us to manage user funds within our program's logic. + +--- + +Key aspects of the `vault_account`: + +- The address of the account is a PDA derived using seeds + `[b"vault", user.key().as_ref()]` +- As a PDA, it has no private key, so only our program can "sign" for the + address when performing CPIs +- As a `SystemAccount` type, it's owned by the System Program like regular + wallet accounts + +This setup allows our program to: + +- Generate unique, deterministic addresses for each user's "vault" +- Control funds without needing a private key to sign for transactions. + +In the `delete` instruction, we'll demonstrate how our program can "sign" for +this PDA in a CPI. + + + + +Next, implement the CPI logic in the `update` instruction to transfer 0.001 SOL +from the user's account to the vault account. + +```rs title="lib.rs" +let transfer_accounts = Transfer { + from: ctx.accounts.user.to_account_info(), + to: ctx.accounts.vault_account.to_account_info(), +}; +let cpi_context = CpiContext::new( + ctx.accounts.system_program.to_account_info(), + transfer_accounts, +); +transfer(cpi_context, 1_000_000)?; +``` + + + + +```diff + pub fn update(ctx: Context, message: String) -> Result<()> { + msg!("Update Message: {}", message); + let account_data = &mut ctx.accounts.message_account; + account_data.message = message; + ++ let transfer_accounts = Transfer { ++ from: ctx.accounts.user.to_account_info(), ++ to: ctx.accounts.vault_account.to_account_info(), ++ }; ++ let cpi_context = CpiContext::new( ++ ctx.accounts.system_program.to_account_info(), ++ transfer_accounts, ++ ); ++ transfer(cpi_context, 1_000_000)?; + Ok(()) + } +``` + + + + +In the `update` instruction, we implement a Cross Program Invocation (CPI) to +invoke the System Program's `transfer` instruction. This demonstrates how to +perform a CPI from within our program, enabling the composability of Solana +programs. + +The `Transfer` struct specifies the required accounts for the System Program's +transfer instruction: + +- `from` - The user's account (source of funds) +- `to` - The vault account (destination of funds) + + ```rs title="lib.rs" + let transfer_accounts = Transfer { + from: ctx.accounts.user.to_account_info(), + to: ctx.accounts.vault_account.to_account_info(), + }; + ``` + +The `CpiContext` specifies: + +- The program to be invoked (System Program) +- The accounts required in the CPI (defined in the `Transfer` struct) + + ```rs title="lib.rs" + let cpi_context = CpiContext::new( + ctx.accounts.system_program.to_account_info(), + transfer_accounts, + ); + ``` + +The `transfer` function then invokes the transfer instruction on the System +Program, passing in the: + +- The `cpi_context` (program and accounts) +- The `amount` to transfer (1,000,000 lamports, equivalent to 0.001 SOL) + + ```rs title="lib.rs" + transfer(cpi_context, 1_000_000)?; + ``` + +--- + +The setup for a CPI matches how client-side instructions are built, where we +specify the program, accounts, and instruction data for a particular instruction +to invoke. When our program's `update` instruction is invoked, it internally +invokes the System Program's transfer instruction. + + + + +Rebuild the program. + +```shell title="Terminal" +build +``` + + + + +### Modify Delete Instruction + +We'll now implement a "refund on delete" mechanism by modifying the `Delete` +struct and `delete` function. + +First, update the `Delete` struct to include the `vault_account`. This allows us +to transfer any SOL in the vault back to the user when they close their message +account. + +```rs title="lib.rs" +#[account( + mut, + seeds = [b"vault", user.key().as_ref()], + bump, +)] +pub vault_account: SystemAccount<'info>, +``` + +Also add the `system_program` as the CPI for the transfer requires invoking the +System Program. + +```rs title="lib.rs" +pub system_program: Program<'info, System>, +``` + + + + +```diff +#[derive(Accounts)] +pub struct Delete<'info> { + #[account(mut)] + pub user: Signer<'info>, + ++ #[account( ++ mut, ++ seeds = [b"vault", user.key().as_ref()], ++ bump, ++ )] ++ pub vault_account: SystemAccount<'info>, + #[account( + mut, + seeds = [b"message", user.key().as_ref()], + bump = message_account.bump, + close= user, + )] + pub message_account: Account<'info, MessageAccount>, ++ pub system_program: Program<'info, System>, +} +``` + + + + +The `vault_account` uses the same PDA derivation as in the Update struct. + +Add the `vault_account` to the Delete struct enables our program to access the +user's vault account during the delete instruction to transfer any accumulated +SOL back to the user. + + + + +Next, implement the CPI logic in the `delete` instruction to transfer SOL from +the vault account back to the user's account. + +```rs title="lib.rs" +let user_key = ctx.accounts.user.key(); +let signer_seeds: &[&[&[u8]]] = + &[&[b"vault", user_key.as_ref(), &[ctx.bumps.vault_account]]]; + +let transfer_accounts = Transfer { + from: ctx.accounts.vault_account.to_account_info(), + to: ctx.accounts.user.to_account_info(), +}; +let cpi_context = CpiContext::new( + ctx.accounts.system_program.to_account_info(), + transfer_accounts, +).with_signer(signer_seeds); +transfer(cpi_context, ctx.accounts.vault_account.lamports())?; +``` + +Note that we updated `_ctx: Context` to `ctx: Context` as we'll +be using the context in the body of the function. + + + + +```diff +- pub fn delete(_ctx: Context) -> Result<()> { ++ pub fn delete(ctx: Context) -> Result<()> { + msg!("Delete Message"); + ++ let user_key = ctx.accounts.user.key(); ++ let signer_seeds: &[&[&[u8]]] = ++ &[&[b"vault", user_key.as_ref(), &[ctx.bumps.vault_account]]]; ++ ++ let transfer_accounts = Transfer { ++ from: ctx.accounts.vault_account.to_account_info(), ++ to: ctx.accounts.user.to_account_info(), ++ }; ++ let cpi_context = CpiContext::new( ++ ctx.accounts.system_program.to_account_info(), ++ transfer_accounts, ++ ).with_signer(signer_seeds); ++ transfer(cpi_context, ctx.accounts.vault_account.lamports())?; + Ok(()) + } + +``` + + + + +In the delete instruction, we implement another Cross Program Invocation (CPI) +to invoke the System Program's transfer instruction. This CPI demonstrates how +to make a transfer that requires a Program Derived Address (PDA) signer. + +First, we define the signer seeds for the vault PDA: + +```rs title="lib.rs" +let user_key = ctx.accounts.user.key(); +let signer_seeds: &[&[&[u8]]] = + &[&[b"vault", user_key.as_ref(), &[ctx.bumps.vault_account]]]; +``` + +The `Transfer` struct specifies the required accounts for the System Program's +transfer instruction: + +- from: The vault account (source of funds) +- to: The user's account (destination of funds) + + ```rs title="lib.rs" + let transfer_accounts = Transfer { + from: ctx.accounts.vault_account.to_account_info(), + to: ctx.accounts.user.to_account_info(), + }; + ``` + +The `CpiContext` specifies: + +- The program to be invoked (System Program) +- The accounts involved in the transfer (defined in the Transfer struct) +- The signer seeds for the PDA + + ```rs title="lib.rs" + let cpi_context = CpiContext::new( + ctx.accounts.system_program.to_account_info(), + transfer_accounts, + ).with_signer(signer_seeds); + ``` + +The transfer function then invokes the transfer instruction on the System +Program, passing: + +- The `cpi_context` (program, accounts, and PDA signer) +- The amount to transfer (the entire balance of the vault account) + + ```rs title="lib.rs" + transfer(cpi_context, ctx.accounts.vault_account.lamports())?; + ``` + +This CPI implementation demonstrates how programs can utilize PDAs to manage +funds. When our program's delete instruction is invoked, it internally calls the +System Program's transfer instruction, signing for the PDA to authorize the +transfer of all funds from the vault back to the user. + + + + +Rebuild the program. + +```shell title="Terminal" +build +``` + + + + +### Redeploy Program + +After making these changes, we need to redeploy our updated program. This +ensures that our modified program is available for testing. On Solana, updating +a program simply requires deploying the compiled program at the same program ID. + +```shell title="Terminal" +deploy +``` + + + + +```bash +$ deploy +Deploying... This could take a while depending on the program size and network conditions. +Deployment successful. Completed in 17s. +``` + + + + +Only the upgrade authority of the program can update it. The upgrade authority +is set when the program is deployed, and it's the only account with permission +to modify or close the program. If the upgrade authority is revoked, then the +program becomes immutable and can never be closed or upgraded. + +When deploying programs on Solana Playground, your Playground wallet is the +upgrade authority for all your programs. + + + + + + + +### Update Test File + +Next, we'll update our `anchor.test.ts` file to include the new vault account in +our instructions. This requires deriving the vault PDA and including it in our +update and delete instruction calls. + +#### Derive Vault PDA + +First, add the vault PDA derivation: + +```ts title="anchor.test.ts" +const [vaultPda, vaultBump] = PublicKey.findProgramAddressSync( + [Buffer.from("vault"), wallet.publicKey.toBuffer()], + program.programId, +); +``` + + + + +```diff +describe("pda", () => { + const program = pg.program; + const wallet = pg.wallet; + + const [messagePda, messageBump] = PublicKey.findProgramAddressSync( + [Buffer.from("message"), wallet.publicKey.toBuffer()], + program.programId + ); + ++ const [vaultPda, vaultBump] = PublicKey.findProgramAddressSync( ++ [Buffer.from("vault"), wallet.publicKey.toBuffer()], ++ program.programId ++ ); + + // ...tests + }); +``` + + + + +#### Modify Update Test + +Then, update the update instruction to include the `vaultAccount`. + +```ts title="anchor.test.ts" {5} +const transactionSignature = await program.methods + .update(message) + .accounts({ + messageAccount: messagePda, + vaultAccount: vaultPda, + }) + .rpc({ commitment: "confirmed" }); +``` + + + + +```diff + const transactionSignature = await program.methods + .update(message) + .accounts({ + messageAccount: messagePda, ++ vaultAccount: vaultPda, + }) + .rpc({ commitment: "confirmed" }); +``` + + + + +#### Modify Delete Test + +Then, update the delete instruction to include the `vaultAccount`. + +```ts title="anchor.test.ts" {5} +const transactionSignature = await program.methods + .delete() + .accounts({ + messageAccount: messagePda, + vaultAccount: vaultPda, + }) + .rpc({ commitment: "confirmed" }); +``` + + + + +```diff + const transactionSignature = await program.methods + .delete() + .accounts({ + messageAccount: messagePda, ++ vaultAccount: vaultPda, + }) + .rpc({ commitment: "confirmed" }); +``` + + + + + + + +### Rerun Test + +After making these changes, run the tests to ensure everything is working as +expected: + +```shell title="Terminal" +test +``` + + + + +```bash +$ test +Running tests... + anchor.test.ts: + pda + { + "user": "3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R", + "message": "Hello, World!", + "bump": 254 +} + Transaction Signature: https://solana.fm/tx/qGsYb87mUUjeyh7Ha7r9VXkACw32HxVBujo2NUxqHiUc8qxRMFB7kdH2D4JyYtPBx171ddS91VyVrFXypgYaKUr?cluster=devnet-solana + ✔ Create Message Account (842ms) + { + "user": "3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R", + "message": "Hello, Solana!", + "bump": 254 +} + Transaction Signature: https://solana.fm/tx/3KCDnNSfDDfmSy8kpiSrJsGGkzgxx2mt18KejuV2vmJjeyenkSoEfs2ghUQ6cMoYYgd9Qax9CbnYRcvF2zzumNt8?cluster=devnet-solana + ✔ Update Message Account (946ms) + Expect Null: null + Transaction Signature: https://solana.fm/tx/3M7Z7Mea3TtQc6m9z386B9QuEgvLKxD999mt2RyVtJ26FgaAzV1QA5mxox3eXie3bpBkNpDQ4mEANr3trVHCWMC2?cluster=devnet-solana + ✔ Delete Message Account (859ms) + 3 passing (3s) +``` + + + + +You can then inspect the SolanFM links to view the transaction details, where +you'll find the CPIs for the transfer instructions within the update and delete +instructions. + +![Update CPI](/assets/docs/intro/quickstart/cpi-update.png) + +![Delete CPI](/assets/docs/intro/quickstart/cpi-delete.png) + +If you encounter any errors, you can reference the +[final code](https://beta.solpg.io/668304cfcffcf4b13384d20a). + + + + +## Next Steps + +You've completed the Solana Quickstart guide! You've learned about accounts, +transactions, PDAs, CPIs, and deployed your own programs. + +Visit the [Core Concepts](/docs/core/accounts) pages for more comprehensive +explanations of the topics covered in this guide. + +Additional learning resources can be found on the +[Developer Resources](/developers) page. + +### Explore More Examples + +If you prefer learning by example, check out the +[Program Examples Repository](https://github.com/solana-developers/program-examples) +for a variety of example programs. + +Solana Playground offers a convenient feature allowing you to import or view +projects using their GitHub links. For example, open this +[Solana Playground link](https://beta.solpg.io/https://github.com/solana-developers/program-examples/tree/main/basics/hello-solana/anchor) +to view the Anchor project from this +[Github repo](https://github.com/solana-developers/program-examples/tree/main/basics/hello-solana/anchor). + +Click the `Import` button and enter a project name to add it to your list of +projects in Solana Playground. Once a project is imported, all changes are +automatically saved and persisted within the Playground environment. diff --git a/content/docs/intro/quick-start/deploying-programs.mdx b/content/docs/intro/quick-start/deploying-programs.mdx new file mode 100644 index 000000000..85a0ed13f --- /dev/null +++ b/content/docs/intro/quick-start/deploying-programs.mdx @@ -0,0 +1,391 @@ +--- +title: Deploying Programs +description: + Learn how to build, deploy, and test your first Solana program using the + Anchor framework and Solana Playground. This beginner-friendly guide walks + through creating a simple program, deploying it to devnet, running tests, and + closing the program. +h1: Deploying Your First Solana Program +--- + +In this section, we'll build, deploy, and test a simple Solana program using the +Anchor framework. By the end, you'll have deployed your first program to the +Solana blockchain! + +The purpose of this section is to familiarize you with the Solana Playground. +We'll walk through a more detailed example in the PDA and CPI sections. For more +details, refer to the [Programs on Solana](/docs/core/programs) page. + + + + + +### Create Anchor Project + +First, open https://beta.solpg.io in a new browser tab. + +- Click the "Create a new project" button on the left-side panel. + +- Enter a project name, select Anchor as the framework, then click the "Create" + button. + +![New Project](/assets/docs/intro/quickstart/pg-new-project.gif) + +You'll see a new project created with the program code in the `src/lib.rs` file. + +```rust title="lib.rs" +use anchor_lang::prelude::*; + +// This is your program's public key and it will update +// automatically when you build the project. +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); // Message will show up in the tx logs + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + // We must specify the space in order to initialize an account. + // First 8 bytes are default account discriminator, + // next 8 bytes come from NewAccount.data being type u64. + // (u64 = 64 bits unsigned integer = 8 bytes) + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64 +} +``` + + + + +For now, we'll only cover the high-level overview of the program code: + +- The `declare_id!` macro specifies the on-chain address of your program. It + will be automatically updated when we build the program in the next step. + + ```rs + declare_id!("11111111111111111111111111111111"); + ``` + +- The `#[program]` macro annotates a module containing functions that represent + the program's instructions. + + ```rs + #[program] + mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); // Message will show up in the tx logs + Ok(()) + } + } + ``` + + In this example, the `initialize` instruction takes two parameters: + + 1. `ctx: Context` - Provides access to the accounts required for + this instruction, as specified in the `Initialize` struct. + 2. `data: u64` - An instruction parameter that will be passed in when the + instruction is invoked. + + The function body sets the `data` field of `new_account` to the provided + `data` argument and then prints a message to the program logs. + +- The `#[derive(Accounts)]` macro is used to define a struct that specifies the + accounts required for a particular instruction, where each field represents a + separate account. + + The field types (ex. `Signer<'info>`) and constraints (ex. `#[account(mut)]`) + are used by Anchor to automatically handle common security checks related to + account validation. + + ```rs + #[derive(Accounts)] + pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, + } + ``` + +- The `#[account]` macro is used to define a struct that represents the data + structure of an account created and owned by the program. + + ```rs + #[account] + pub struct NewAccount { + data: u64 + } + ``` + + + + + + + +### Build and Deploy Program + +To build the program, simply run `build` in the terminal. + +```shell title="Terminal" +build +``` + +Notice that the address in `declare_id!()` has been updated. This is your +program's on-chain address. + + + + +```shell title="Terminal" +$ build +Building... +Build successful. Completed in 1.46s. +``` + + + + +Once the program is built, run `deploy` in the terminal to deploy the program to +the network (devnet by default). To deploy a program, SOL must be allocated to +the on-chain account that stores the program. + +Before deployment, ensure you have enough SOL. You can get devnet SOL by either +running `solana airdrop 5` in the Playground terminal or using the +[Web Faucet](https://faucet.solana.com/). + +```shell title="Terminal" +deploy +``` + + + + +```shell title="Terminal" +$ deploy +Deploying... This could take a while depending on the program size and network conditions. +Warning: 1 transaction not confirmed, retrying... +Deployment successful. Completed in 19s. +``` + + + + +Alternatively, you can also use the `Build` and `Deploy` buttons on the +left-side panel. + +![Build and Deploy](/assets/docs/intro/quickstart/pg-build-deploy.png) + +Once the program is deployed, you can now invoke its instructions. + + + + +### Test Program + +Included with the starter code is a test file found in `tests/anchor.test.ts`. +This file demonstrates how to invoke the `initialize` instruction on the starter +program from the client. + +```ts title="anchor.test.ts" +// No imports needed: web3, anchor, pg and more are globally available + +describe("Test", () => { + it("initialize", async () => { + // Generate keypair for the new account + const newAccountKp = new web3.Keypair(); + + // Send transaction + const data = new BN(42); + const txHash = await pg.program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: pg.wallet.publicKey, + systemProgram: web3.SystemProgram.programId, + }) + .signers([newAccountKp]) + .rpc(); + console.log(`Use 'solana confirm -v ${txHash}' to see the logs`); + + // Confirm transaction + await pg.connection.confirmTransaction(txHash); + + // Fetch the created account + const newAccount = await pg.program.account.newAccount.fetch( + newAccountKp.publicKey, + ); + + console.log("On-chain data is:", newAccount.data.toString()); + + // Check whether the data on-chain is equal to local 'data' + assert(data.eq(newAccount.data)); + }); +}); +``` + +To run the test file once the program is deployed, run `test` in the terminal. + +```shell title="Terminal" +test +``` + +You should see an output indicating that the test passed successfully. + + + + +```shell title="Terminal" +$ test +Running tests... + hello_anchor.test.ts: + hello_anchor + Use 'solana confirm -v 3TewJtiUz1EgtT88pLJHvKFzqrzDNuHVi8CfD2mWmHEBAaMfC5NAaHdmr19qQYfTiBace6XUmADvR4Qrhe8gH5uc' to see the logs + On-chain data is: 42 + ✔ initialize (961ms) + 1 passing (963ms) +``` + + + + +You can also use the `Test` button on the left-side panel. + +![Run Test](/assets/docs/intro/quickstart/pg-test.png) + +You can then view the transaction logs by running the `solana confirm -v` +command and specifying the transaction hash (signature) from the test output: + +```shell title="Terminal" +solana confirm -v [TxHash] +``` + +For example: + +```shell title="Terminal" +solana confirm -v 3TewJtiUz1EgtT88pLJHvKFzqrzDNuHVi8CfD2mWmHEBAaMfC5NAaHdmr19qQYfTiBace6XUmADvR4Qrhe8gH5uc +``` + + + + +```shell title="Terminal" {29-35} +$ solana confirm -v 3TewJtiUz1EgtT88pLJHvKFzqrzDNuHVi8CfD2mWmHEBAaMfC5NAaHdmr19qQYfTiBace6XUmADvR4Qrhe8gH5uc +RPC URL: https://api.devnet.solana.com +Default Signer: Playground Wallet +Commitment: confirmed + +Transaction executed in slot 308150984: + Block Time: 2024-06-25T12:52:05-05:00 + Version: legacy + Recent Blockhash: 7AnZvY37nMhCybTyVXJ1umcfHSZGbngnm4GZx6jNRTNH + Signature 0: 3TewJtiUz1EgtT88pLJHvKFzqrzDNuHVi8CfD2mWmHEBAaMfC5NAaHdmr19qQYfTiBace6XUmADvR4Qrhe8gH5uc + Signature 1: 3TrRbqeMYFCkjsxdPExxBkLAi9SB2pNUyg87ryBaTHzzYtGjbsAz9udfT9AkrjSo1ZjByJgJHBAdRVVTZv6B87PQ + Account 0: srw- 3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R (fee payer) + Account 1: srw- c7yy8zdP8oeZ2ewbSb8WWY2yWjDpg3B43jk3478Nv7J + Account 2: -r-- 11111111111111111111111111111111 + Account 3: -r-x 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r + Instruction 0 + Program: 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r (3) + Account 0: c7yy8zdP8oeZ2ewbSb8WWY2yWjDpg3B43jk3478Nv7J (1) + Account 1: 3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R (0) + Account 2: 11111111111111111111111111111111 (2) + Data: [175, 175, 109, 31, 13, 152, 155, 237, 42, 0, 0, 0, 0, 0, 0, 0] + Status: Ok + Fee: ◎0.00001 + Account 0 balance: ◎5.47001376 -> ◎5.46900152 + Account 1 balance: ◎0 -> ◎0.00100224 + Account 2 balance: ◎0.000000001 + Account 3 balance: ◎0.00139896 + Log Messages: + Program 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r invoke [1] + Program log: Instruction: Initialize + Program 11111111111111111111111111111111 invoke [2] + Program 11111111111111111111111111111111 success + Program log: Changed data to: 42! + Program 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r consumed 5661 of 200000 compute units + Program 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r success + +Confirmed +``` + + + + +Alternatively, you can view the transaction details on +[SolanaFM](https://solana.fm/) or +[Solana Explorer](https://explorer.solana.com/?cluster=devnet) by searching for +the transaction signature (hash). + + + Reminder to update the cluster (network) connection on the Explorer you are + using to match Solana Playground. Solana Playground's default cluster is devnet. + + + + + +### Close Program + +Lastly, the SOL allocated to the on-chain program can be fully recovered by +closing the program. + +You can close a program by running the following command and specifying the +program address found in `declare_id!()`: + +```shell title="Terminal" +solana program close [ProgramID] +``` + +For example: + +```shell title="Terminal" +solana program close 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r +``` + + + + +```shell title="Terminal" +$ solana program close 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r +Closed Program Id 2VvQ11q8xrn5tkPNyeraRsPaATdiPx8weLAD8aD4dn2r, 2.79511512 SOL reclaimed +``` + + + + +Only the upgrade authority of the program can close it. The upgrade authority is +set when the program is deployed, and it's the only account with permission to +modify or close the program. If the upgrade authority is revoked, then the +program becomes immutable and can never be closed or upgraded. + +When deploying programs on Solana Playground, your Playground wallet is the +upgrade authority for all your programs. + + + + +Congratulations! You've just built and deployed your first Solana program using +the Anchor framework! + + + + diff --git a/content/docs/intro/quick-start/index.mdx b/content/docs/intro/quick-start/index.mdx new file mode 100644 index 000000000..adcbb7cfb --- /dev/null +++ b/content/docs/intro/quick-start/index.mdx @@ -0,0 +1,125 @@ +--- +title: Quick Start +description: + Learn Solana development basics. Create your first program, understand + accounts, send transactions, and explore PDAs and CPIs using Solana Playground + - no installation required. +h1: Solana Quick Start Guide +--- + +Welcome to the Solana Quick Start Guide! This hands-on guide will introduce you +to the core concepts for building on Solana, regardless of your prior +experience. By the end of this tutorial, you'll have a basic foundation in +Solana development and be ready to explore more advanced topics. + +## What You'll Learn + +In this tutorial, you'll learn about: + +- Understanding Accounts: Explore how data is stored on the Solana network. +- Sending Transactions: Learn to interact with the Solana network by sending + transactions. +- Building and Deploying Programs: Create your first Solana program and deploy + it to the network. +- Program Derived Addresses (PDAs): Learn how to use PDAs to create + deterministic addresses for accounts. +- Cross-Program Invocations (CPIs): Learn how to make your programs interact + with other programs on Solana. + +The best part? You don't need to install anything! We'll be using Solana +Playground, a browser-based development environment, for all our examples. This +means you can follow along, copy and paste code, and see results immediately, +all from your web browser. Basic programming knowledge is helpful but not +required. + +Let's dive in and start building on Solana! + +## Solana Playground + +Solana Playground (Solpg) is a browser-based development environment that allows +you to quickly develop, deploy, and test Solana programs! + +Open a new tab in your web browser and navigate to https://beta.solpg.io/. + + + + + +### Create Playground Wallet + +If you're new to Solana Playground, the first step is to create your Playground +Wallet. This wallet will allow you to interact with the Solana network right +from your browser. + +#### Step 1. Connect to Playground + +Click the "Not connected" button at the bottom left of the screen. + +![Not Connected](/assets/docs/intro/quickstart/pg-not-connected.png) + +#### Step 2. Create Your Wallet + +You'll see an option to save your wallet's keypair. Optionally, save your +wallet's keypair for backup and then click "Continue". + +![Create Playground Wallet](/assets/docs/intro/quickstart/pg-create-wallet.png) + +You should now see your wallet's address, SOL balance, and connected cluster +(devnet by default) at the bottom of the window. + +![Connected](/assets/docs/intro/quickstart/pg-connected.png) + + + Your Playground Wallet will be saved in your browser's local storage. Clearing + your browser cache will remove your saved wallet. + + +Some definitions you may find helpful: + +- _wallet address_: a unique identifier for a digital wallet, used to send or + receive crypto assets on a blockchain. Each wallet address is a string of + alphanumeric characters that represents a specific destination on the network. + Think of it like an email address or bank account number—if someone wants to + send you cryptocurrency, they need your wallet address to direct the funds. +- _connected cluster_: a set of network nodes that work together to maintain a + synchronized copy of the blockchain. These clusters are essential for + providing a decentralized, distributed ledger and powering the Solana network + by validating transactions, securing the chain, and executing programs (smart + contracts). + + + + +### Get Devnet SOL + +Before we start building, we first need some devnet SOL. + +From a developer's perspective, SOL is required for two main use cases: + +- To create accounts where we can store data or deploy programs +- To pay for transaction fees when we interact with the network + +Below are two methods to fund your wallet with devnet SOL: + +#### Option 1: Using the Playground Terminal + +To fund your Playground wallet with devnet SOL. In the Playground terminal, run: + +```shell title="Terminal" +solana airdrop 5 +``` + +#### Option 2: Using the Devnet Faucet + +If the airdrop command doesn't work (due to rate limits or errors), you can use +the [Web Faucet](https://faucet.solana.com/). + +- Enter your wallet address (found at the bottom of the Playground screen) and + select an amount +- Click "Confirm Airdrop" to receive your devnet SOL + +![Faucet Airdrop](/assets/docs/intro/quickstart/faucet-airdrop.gif) + + + + diff --git a/content/docs/intro/quick-start/meta.json b/content/docs/intro/quick-start/meta.json new file mode 100644 index 000000000..f01c173da --- /dev/null +++ b/content/docs/intro/quick-start/meta.json @@ -0,0 +1,10 @@ +{ + "title": "Quick Start", + "pages": [ + "reading-from-network", + "writing-to-network", + "deploying-programs", + "program-derived-address", + "cross-program-invocation" + ] +} diff --git a/content/docs/intro/quick-start/program-derived-address.mdx b/content/docs/intro/quick-start/program-derived-address.mdx new file mode 100644 index 000000000..8209267ff --- /dev/null +++ b/content/docs/intro/quick-start/program-derived-address.mdx @@ -0,0 +1,1130 @@ +--- +title: Program Derived Address +description: + Learn how to build a CRUD (Create, Read, Update, Delete) Solana program using + Program Derived Addresses (PDAs) and the Anchor framework. This step-by-step + guide demonstrates how to create, update, and delete on-chain message accounts + using PDAs, implement account validation, and write tests. Perfect for + developers looking to understand how to use PDAs in Solana programs. +--- + +In this section, we'll walk through how to build a basic CRUD (Create, Read, +Update, Delete) program. The program will store a user's message using a Program +Derived Address (PDA) as the account's address. + +The purpose of this section is to guide you through the steps for building and +testing a Solana program using the Anchor framework and demonstrating how to use +PDAs within a program. For more details, refer to the +[Programs Derived Address](/docs/core/pda) page. + +For reference, here is the +[final code](https://beta.solpg.io/668304cfcffcf4b13384d20a) after completing +both the PDA and CPI sections. + + + + + +### Starter Code + +Begin by opening this +[Solana Playground link](https://beta.solpg.io/66734b7bcffcf4b13384d1ad) with +the starter code. Then click the "Import" button, which will add the program to +your list of projects on Solana Playground. + +![Import](/assets/docs/intro/quickstart/pg-import.png) + +In the `lib.rs` file, you'll find a program scaffolded with the `create`, +`update`, and `delete` instructions we'll implement in the following steps. + +```rs title="lib.rs" +use anchor_lang::prelude::*; + +declare_id!("8KPzbM2Cwn4Yjak7QYAEH9wyoQh86NcBicaLuzPaejdw"); + +#[program] +pub mod pda { + use super::*; + + pub fn create(_ctx: Context) -> Result<()> { + Ok(()) + } + + pub fn update(_ctx: Context) -> Result<()> { + Ok(()) + } + + pub fn delete(_ctx: Context) -> Result<()> { + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Create {} + +#[derive(Accounts)] +pub struct Update {} + +#[derive(Accounts)] +pub struct Delete {} + +#[account] +pub struct MessageAccount {} +``` + +Before we begin, run `build` in the Playground terminal to check the starter +program builds successfully. + +```shell title="Terminal" +build +``` + + + + +```shell title="Terminal" +$ build +Building... +Build successful. Completed in 3.50s. +``` + + + + + + + +### Define Message Account Type + +First, let's define the structure for the message account that our program will +create. This is the data that we'll store in the account created by the program. + +In `lib.rs`, update the `MessageAccount` struct with the following: + +```rs title="lib.rs" +#[account] +pub struct MessageAccount { + pub user: Pubkey, + pub message: String, + pub bump: u8, +} +``` + + + + +```diff +- #[account] +- pub struct MessageAccount {} + ++ #[account] ++ pub struct MessageAccount { ++ pub user: Pubkey, ++ pub message: String, ++ pub bump: u8, ++ } +``` + + + + +The `#[account]` macro in an Anchor program is used to annotate structs that +represent account data (data type to store in the AccountInfo's data field). + +In this example, we're defining a `MessageAccount` struct to store a message +created by users that contains three fields: + +- `user` - A Pubkey representing the user who created the message account. +- `message` - A String containing the user's message. +- `bump` - A u8 storing the ["bump" seed](/docs/core/pda#canonical-bump) used in + deriving the program derived address (PDA). Storing this value saves compute + by eliminating the need to rederive it for each use in subsequent + instructions. When an account is created, the `MessageAccount` data will be + serialized and stored in the new account's data field. + +Later, when reading from the account, this data can be deserialized back into +the `MessageAccount` data type. The process of creating and reading the account +data will be demonstrated in the testing section. + + + + +Build the program again by running `build` in the terminal. + +```shell title="Terminal" +build +``` + +We've defined what our message account will look like. Next, we'll implement the +program instructions. + + + + +### Implement Create Instruction + +Now, let's implement the `create` instruction to create and initialize the +`MessageAccount`. + +Start by defining the accounts required for the instruction by updating the +`Create` struct with the following: + +```rs title="lib.rs" +#[derive(Accounts)] +#[instruction(message: String)] +pub struct Create<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + init, + seeds = [b"message", user.key().as_ref()], + bump, + payer = user, + space = 8 + 32 + 4 + message.len() + 1 + )] + pub message_account: Account<'info, MessageAccount>, + pub system_program: Program<'info, System>, +} +``` + + + + +```diff +- #[derive(Accounts)] +- pub struct Create {} + ++ #[derive(Accounts)] ++ #[instruction(message: String)] ++ pub struct Create<'info> { ++ #[account(mut)] ++ pub user: Signer<'info>, ++ ++ #[account( ++ init, ++ seeds = [b"message", user.key().as_ref()], ++ bump, ++ payer = user, ++ space = 8 + 32 + 4 + message.len() + 1 ++ )] ++ pub message_account: Account<'info, MessageAccount>, ++ pub system_program: Program<'info, System>, ++ } +``` + + + + +The `#[derive(Accounts)]` macro in an Anchor program is used to annotate structs +that represent a list of accounts required by an instruction where each field in +the struct is an account. + +Each account (field) in the struct is annotated with an account type (ex. +`Signer<'info>`) and can be further annotated with constraints (ex. +`#[account(mut)]`). The account type along with account constraints are used to +perform security checks on the accounts passed to the instruction. + +The naming of each field is only for our understanding and has no effect on +account validation, however, it is recommended to use descriptive account names. + +--- + +The `Create` struct defines the accounts required for the `create` instruction. + +1. `user: Signer<'info>` + + - Represents the user creating the message account + - Marked as mutable (`#[account(mut)]`) as it pays for the new account + - Must be a signer to approve the transaction, as lamports are deducted from + the account + +2. `message_account: Account<'info, MessageAccount>` + + - The new account created to store the user's message + - `init` constraint indicates the account will be created in the instruction + - `seeds` and `bump` constraints indicate the address of the account is a + Program Derived Address (PDA) + - `payer = user` specifies the account paying for the creation of the new + account + - `space` specifies the number of bytes allocated to the new account's data + field + +3. `system_program: Program<'info, System>` + + - Required for creating new accounts + - Under the hood, the `init` constraint invokes the System Program to create + a new account allocated with the specified `space` and reassigns the + program owner to the current program. + +--- + +The `#[instruction(message: String)]` annotation enables the `Create` struct to +access the `message` parameter from the `create` instruction. + +--- + +The `seeds` and `bump` constraints are used together to specify that an +account's address is a Program Derived Address (PDA). + +```rs title="lib.rs" +seeds = [b"message", user.key().as_ref()], +bump, +``` + +The `seeds` constraint defines the optional inputs used to derive the PDA. + +- `b"message"` - A hardcoded string as the first seed. +- `user.key().as_ref()` - The public key of the `user` account as the second + seed. + +The `bump` constraint tells Anchor to automatically find and use the correct +bump seed. Anchor will use the `seeds` and `bump` to derive the PDA. + +--- + +The `space` calculation (8 + 32 + 4 + message.len() + 1) allocates space for +`MessageAccount` data type: + +- Anchor Account discriminator (identifier): 8 bytes +- User Address (Pubkey): 32 bytes +- User Message (String): 4 bytes for length + variable message length +- PDA Bump seed (u8): 1 byte + +```rs title="lib.rs" +#[account] +pub struct MessageAccount { + pub user: Pubkey, + pub message: String, + pub bump: u8, +} +``` + +All accounts created through an Anchor program require 8 bytes for an account +discriminator, which is an identifier for the account type that is automatically +generated when the account is created. + +A `String` type requires 4 bytes to store the length of the string, and the +remaining length is the actual data. + + + + +Next, implement the business logic for the `create` instruction by updating the +`create` function with the following: + +```rs title="lib.rs" +pub fn create(ctx: Context, message: String) -> Result<()> { + msg!("Create Message: {}", message); + let account_data = &mut ctx.accounts.message_account; + account_data.user = ctx.accounts.user.key(); + account_data.message = message; + account_data.bump = ctx.bumps.message_account; + Ok(()) +} +``` + + + + +```diff +- pub fn create(_ctx: Context) -> Result<()> { +- Ok(()) +- } + ++ pub fn create(ctx: Context, message: String) -> Result<()> { ++ msg!("Create Message: {}", message); ++ let account_data = &mut ctx.accounts.message_account; ++ account_data.user = ctx.accounts.user.key(); ++ account_data.message = message; ++ account_data.bump = ctx.bumps.message_account; ++ Ok(()) ++ } +``` + + + + +The `create` function implements the logic for initializing a new message +account's data. It takes two parameters: + +1. `ctx: Context` - Provides access to the accounts specified in the + `Create` struct. +2. `message: String` - The user's message to be stored. + +The body of the function then performs the following logic: + +1. Print a message to program logs using the `msg!()` macro. + + ```rs + msg!("Create Message: {}", message); + ``` + +2. Initializing Account Data: + + - Accesses the `message_account` from the context. + + ```rs + let account_data = &mut ctx.accounts.message_account; + ``` + + - Sets the `user` field to the public key of the `user` account. + + ```rs + account_data.user = ctx.accounts.user.key(); + ``` + + - Sets the `message` field to the `message` from the function argument. + + ```rs + account_data.message = message; + ``` + + - Sets the `bump` value used to derive the PDA, retrieved from + `ctx.bumps.message_account`. + + ```rs + account_data.bump = ctx.bumps.message_account; + ``` + + + + +Rebuild the program. + +```shell title="Terminal" +build +``` + + + + +### Implement Update Instruction + +Next, implement the `update` instruction to update the `MessageAccount` with a +new message. + +Just as before, the first step is to specify the accounts required by the +`update` instruction. + +Update the `Update` struct with the following: + +```rs title="lib.rs" +#[derive(Accounts)] +#[instruction(message: String)] +pub struct Update<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + mut, + seeds = [b"message", user.key().as_ref()], + bump = message_account.bump, + realloc = 8 + 32 + 4 + message.len() + 1, + realloc::payer = user, + realloc::zero = true, + )] + pub message_account: Account<'info, MessageAccount>, + pub system_program: Program<'info, System>, +} +``` + + + + +```diff +- #[derive(Accounts)] +- pub struct Update {} + ++ #[derive(Accounts)] ++ #[instruction(message: String)] ++ pub struct Update<'info> { ++ #[account(mut)] ++ pub user: Signer<'info>, ++ ++ #[account( ++ mut, ++ seeds = [b"message", user.key().as_ref()], ++ bump = message_account.bump, ++ realloc = 8 + 32 + 4 + message.len() + 1, ++ realloc::payer = user, ++ realloc::zero = true, ++ )] ++ pub message_account: Account<'info, MessageAccount>, ++ pub system_program: Program<'info, System>, ++ } +``` + + + + +The `Update` struct defines the accounts required for the `update` instruction. + +1. `user: Signer<'info>` + + - Represents the user updating the message account + - Marked as mutable (`#[account(mut)]`) as it may pay for additional space + for the `message_account` if needed + - Must be a signer to approve the transaction + +2. `message_account: Account<'info, MessageAccount>` + + - The existing account storing the user's message that will be updated + - `mut` constraint indicates this account's data will be modified + - `realloc` constraint allows for resizing the account's data + - `seeds` and `bump` constraints ensure the account is the correct PDA + +3. `system_program: Program<'info, System>` + - Required for potential reallocation of account space + - The `realloc` constraint invokes the System Program to adjust the account's + data size + +--- + +Note that the `bump = message_account.bump` constraint uses the bump seed stored +on the `message_account`, rather than having Anchor recalculate it. + +--- + +`#[instruction(message: String)]` annotation enables the `Update` struct to +access the `message` parameter from the `update` instruction. + + + + +Next, implement the logic for the `update` instruction. + +```rs title="lib.rs" +pub fn update(ctx: Context, message: String) -> Result<()> { + msg!("Update Message: {}", message); + let account_data = &mut ctx.accounts.message_account; + account_data.message = message; + Ok(()) +} +``` + + + + +```diff +- pub fn update(_ctx: Context) -> Result<()> { +- Ok(()) +- } + ++ pub fn update(ctx: Context, message: String) -> Result<()> { ++ msg!("Update Message: {}", message); ++ let account_data = &mut ctx.accounts.message_account; ++ account_data.message = message; ++ Ok(()) ++ } +``` + + + + +The `update` function implements the logic for modifying an existing message +account. It takes two parameters: + +1. `ctx: Context` - Provides access to the accounts specified in the + `Update` struct. +2. `message: String` - The new message to replace the existing one. + +The body of the function then: + +1. Print a message to program logs using the `msg!()` macro. + +2. Updates Account Data: + - Accesses the `message_account` from the context. + - Sets the `message` field to the new `message` from the function argument. + + + + +Rebuild the program + +```shell title="Terminal" +build +``` + + + + +### Implement Delete Instruction + +Next, implement the `delete` instruction to close the `MessageAccount`. + +Update the `Delete` struct with the following: + +```rs title="lib.rs" +#[derive(Accounts)] +pub struct Delete<'info> { + #[account(mut)] + pub user: Signer<'info>, + + #[account( + mut, + seeds = [b"message", user.key().as_ref()], + bump = message_account.bump, + close= user, + )] + pub message_account: Account<'info, MessageAccount>, +} +``` + + + + +```diff +- #[derive(Accounts)] +- pub struct Delete {} + ++ #[derive(Accounts)] ++ pub struct Delete<'info> { ++ #[account(mut)] ++ pub user: Signer<'info>, ++ ++ #[account( ++ mut, ++ seeds = [b"message", user.key().as_ref()], ++ bump = message_account.bump, ++ close = user, ++ )] ++ pub message_account: Account<'info, MessageAccount>, ++ } +``` + + + + +The `Delete` struct defines the accounts required for the `delete` instruction: + +1. `user: Signer<'info>` + + - Represents the user closing the message account + - Marked as mutable (`#[account(mut)]`) as it will receive the lamports from + the closed account + - Must be a signer to ensure only the correct user can close their message + account + +2. `message_account: Account<'info, MessageAccount>` + + - The account being closed + - `mut` constraint indicates this account will be modified + - `seeds` and `bump` constraints ensure the account is the correct PDA + - `close = user` constraint specifies that this account will be closed and + its lamports transferred to the `user` account + + + + +Next, implement the logic for the `delete` instruction. + +```rs title="lib.rs" +pub fn delete(_ctx: Context) -> Result<()> { + msg!("Delete Message"); + Ok(()) +} +``` + + + + +```diff +- pub fn delete(_ctx: Context) -> Result<()> { +- Ok(()) +- } + ++ pub fn delete(_ctx: Context) -> Result<()> { ++ msg!("Delete Message"); ++ Ok(()) ++ } +``` + + + + +The `delete` function takes one parameter: + +1. `_ctx: Context` - Provides access to the accounts specified in the + `Delete` struct. The `_ctx` syntax indicates we won't be using the Context in + the body of the function. + +The body of the function only prints a message to program logs using the +`msg!()` macro. The function does not require any additional logic because the +actual closing of the account is handled by the `close` constraint in the +`Delete` struct. + + + + +Rebuild the program. + +```shell title="Terminal" +build +``` + + + + +### Deploy Program + +The basic CRUD program is now complete. Deploy the program by running `deploy` +in the Playground terminal. + +```shell title="Terminal" +deploy +``` + + + + +```bash +$ deploy +Deploying... This could take a while depending on the program size and network conditions. +Deployment successful. Completed in 17s. +``` + + + + + + + +### Set Up Test File + +Included with the starter code is also a test file in `anchor.test.ts`. + +```ts title="anchor.test.ts" +import { PublicKey } from "@solana/web3.js"; + +describe("pda", () => { + it("Create Message Account", async () => {}); + + it("Update Message Account", async () => {}); + + it("Delete Message Account", async () => {}); +}); +``` + +Add the code below inside `describe`, but before the `it` sections. + +```ts title="anchor.test.ts" +const program = pg.program; +const wallet = pg.wallet; + +const [messagePda, messageBump] = PublicKey.findProgramAddressSync( + [Buffer.from("message"), wallet.publicKey.toBuffer()], + program.programId, +); +``` + + + + +```diff + import { PublicKey } from "@solana/web3.js"; + + describe("pda", () => { ++ const program = pg.program; ++ const wallet = pg.wallet; ++ ++ const [messagePda, messageBump] = PublicKey.findProgramAddressSync( ++ [Buffer.from("message"), wallet.publicKey.toBuffer()], ++ program.programId ++ ); + + it("Create Message Account", async () => {}); + + it("Update Message Account", async () => {}); + + it("Delete Message Account", async () => {}); + }); +``` + + + + +In this section, we are simply setting up the test file. + +Solana Playground removes some boilerplate setup where `pg.program` allows us to +access the client library for interacting with the program, while `pg.wallet` is +your playground wallet. + +```ts title="anchor.test.ts" +const program = pg.program; +const wallet = pg.wallet; +``` + +As part of the setup, we derive the message account PDA. This demonstrates how +to derive the PDA in Javascript using the seeds specified in the program. + +```ts title="anchor.test.ts" +const [messagePda, messageBump] = PublicKey.findProgramAddressSync( + [Buffer.from("message"), wallet.publicKey.toBuffer()], + program.programId, +); +``` + + + + +Run the test file by running `test` in the Playground terminal to check the file +runs as expected. We will implement the tests in the following steps. + +```shell title="Terminal" +test +``` + + + + +```bash +$ test +Running tests... + anchor.test.ts: + pda + ✔ Create Message Account + ✔ Update Message Account + ✔ Delete Message Account + 3 passing (4ms) +``` + + + + + + + +### Invoke Create Instruction + +Update the first test with the following: + +```ts title="anchor.test.ts" +it("Create Message Account", async () => { + const message = "Hello, World!"; + const transactionSignature = await program.methods + .create(message) + .accounts({ + messageAccount: messagePda, + }) + .rpc({ commitment: "confirmed" }); + + const messageAccount = await program.account.messageAccount.fetch( + messagePda, + "confirmed", + ); + + console.log(JSON.stringify(messageAccount, null, 2)); + console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + + + + +```diff +- it("Create Message Account", async () => {}); + ++ it("Create Message Account", async () => { ++ const message = "Hello, World!"; ++ const transactionSignature = await program.methods ++ .create(message) ++ .accounts({ ++ messageAccount: messagePda, ++ }) ++ .rpc({ commitment: "confirmed" }); ++ ++ const messageAccount = await program.account.messageAccount.fetch( ++ messagePda, ++ "confirmed" ++ ); ++ ++ console.log(JSON.stringify(messageAccount, null, 2)); ++ console.log( ++ "Transaction Signature:", ++ `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana` ++ ); ++ }); +``` + + + + +First, we send a transaction that invokes the `create` instruction, passing in +"Hello, World!" as the message. + +```ts title="anchor.test.ts" +const message = "Hello, World!"; +const transactionSignature = await program.methods + .create(message) + .accounts({ + messageAccount: messagePda, + }) + .rpc({ commitment: "confirmed" }); +``` + +Once the transaction is sent and the account is created, we then fetch the +account using its address (`messagePda`). + +```ts title="anchor.test.ts" +const messageAccount = await program.account.messageAccount.fetch( + messagePda, + "confirmed", +); +``` + +Lastly, we log the account data and a link to view the transaction details. + +```ts title="anchor.test.ts" +console.log(JSON.stringify(messageAccount, null, 2)); +console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, +); +``` + + + + + + + +### Invoke Update Instruction + +Update the second test with the following: + +```ts title="anchor.test.ts" +it("Update Message Account", async () => { + const message = "Hello, Solana!"; + const transactionSignature = await program.methods + .update(message) + .accounts({ + messageAccount: messagePda, + }) + .rpc({ commitment: "confirmed" }); + + const messageAccount = await program.account.messageAccount.fetch( + messagePda, + "confirmed", + ); + + console.log(JSON.stringify(messageAccount, null, 2)); + console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + + + + +```diff +- it("Update Message Account", async () => {}); + ++ it("Update Message Account", async () => { ++ const message = "Hello, Solana!"; ++ const transactionSignature = await program.methods ++ .update(message) ++ .accounts({ ++ messageAccount: messagePda, ++ }) ++ .rpc({ commitment: "confirmed" }); ++ ++ const messageAccount = await program.account.messageAccount.fetch( ++ messagePda, ++ "confirmed" ++ ); ++ ++ console.log(JSON.stringify(messageAccount, null, 2)); ++ console.log( ++ "Transaction Signature:", ++ `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana` ++ ); ++ }); +``` + + + + +First, we send a transaction that invokes the `update` instruction, passing in +"Hello, Solana!" as the new message. + +```ts title="anchor.test.ts" +const message = "Hello, Solana!"; +const transactionSignature = await program.methods + .update(message) + .accounts({ + messageAccount: messagePda, + }) + .rpc({ commitment: "confirmed" }); +``` + +Once the transaction is sent and the account is updated, we then fetch the +account using its address (`messagePda`). + +```ts title="anchor.test.ts" +const messageAccount = await program.account.messageAccount.fetch( + messagePda, + "confirmed", +); +``` + +Lastly, we log the account data and a link to view the transaction details. + +```ts title="anchor.test.ts" +console.log(JSON.stringify(messageAccount, null, 2)); +console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, +); +``` + + + + + + + +### Invoke Delete Instruction + +Update the third test with the following: + +```ts title="anchor.test.ts" +it("Delete Message Account", async () => { + const transactionSignature = await program.methods + .delete() + .accounts({ + messageAccount: messagePda, + }) + .rpc({ commitment: "confirmed" }); + + const messageAccount = await program.account.messageAccount.fetchNullable( + messagePda, + "confirmed", + ); + + console.log("Expect Null:", JSON.stringify(messageAccount, null, 2)); + console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + + + + +```diff +- it("Delete Message Account", async () => {}); + ++ it("Delete Message Account", async () => { ++ const transactionSignature = await program.methods ++ .delete() ++ .accounts({ ++ messageAccount: messagePda, ++ }) ++ .rpc({ commitment: "confirmed" }); ++ ++ const messageAccount = await program.account.messageAccount.fetchNullable( ++ messagePda, ++ "confirmed" ++ ); ++ ++ console.log("Expect Null:", JSON.stringify(messageAccount, null, 2)); ++ console.log( ++ "Transaction Signature:", ++ `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana` ++ ); ++ }); +``` + + + + +First, we send a transaction that invokes the `delete` instruction to close the +message account. + +```ts title="anchor.test.ts" +const transactionSignature = await program.methods + .delete() + .accounts({ + messageAccount: messagePda, + }) + .rpc({ commitment: "confirmed" }); +``` + +Once the transaction is sent and the account is closed, we attempt to fetch the +account using its address (`messagePda`) using `fetchNullable` since we expect +the return value to be null because the account is closed. + +```ts title="anchor.test.ts" +const messageAccount = await program.account.messageAccount.fetchNullable( + messagePda, + "confirmed", +); +``` + +Lastly, we log the account data and a link to view the transaction details where +the account data should be logged as null. + +```ts title="anchor.test.ts" +console.log(JSON.stringify(messageAccount, null, 2)); +console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, +); +``` + + + + + + + +### Run Test + +Once the tests are set up, run the test file by running `test` in the Playground +terminal. + +```shell title="Terminal" +test +``` + + + + +```bash +$ test +Running tests... + anchor.test.ts: + pda + { + "user": "3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R", + "message": "Hello, World!", + "bump": 254 +} + Transaction Signature: https://solana.fm/tx/5oBT4jEdUR6CRYsFNGoqvyMBTRDvFqRWTAAmCGM9rEvYRBWy3B2bkb6GVFpVPKBnkr714UCFUurBSDKSa7nLHo8e?cluster=devnet-solana + ✔ Create Message Account (1025ms) + { + "user": "3z9vL1zjN6qyAFHhHQdWYRTFAcy69pJydkZmSFBKHg1R", + "message": "Hello, Solana!", + "bump": 254 +} + Transaction Signature: https://solana.fm/tx/42veGAsQjHbJP1SxWBGcfYF7EdRN9X7bACNv23NSZNe4U7w2dmaYgSv8UUWXYzwgJPoNHejhtWdKZModHiMaTWYK?cluster=devnet-solana + ✔ Update Message Account (713ms) + Expect Null: null + Transaction Signature: https://solana.fm/tx/Sseog2i2X7uDEn2DyDMMJKVHeZEzmuhnqUwicwGhnGhstZo8URNwUZgED8o6HANiojJkfQbhXVbGNLdhsFtWrd6?cluster=devnet-solana + ✔ Delete Message Account (812ms) + 3 passing (3s) +``` + + + + + + + diff --git a/content/docs/intro/quick-start/reading-from-network.mdx b/content/docs/intro/quick-start/reading-from-network.mdx new file mode 100644 index 000000000..78a4727af --- /dev/null +++ b/content/docs/intro/quick-start/reading-from-network.mdx @@ -0,0 +1,423 @@ +--- +title: Reading from Network +description: + Learn how to read data from the Solana blockchain network. This guide covers + fetching wallet accounts, program accounts, and token mint accounts using + JavaScript/TypeScript, with practical examples using the Solana web3.js + library. +--- + +Now, let's explore how to read data from the Solana network. We'll fetch a few +different accounts to understand the structure of a Solana account. + +On Solana, all data is contained in what we call "accounts". You can think of +data on Solana as a public database with a single "Accounts" table, where each +entry in this table is an individual account. + +Accounts on Solana can store "state" or "executable" programs, all of which can +be thought of as entries in the same "Accounts" table. Each account has an +"address" (public key) that serves as its unique ID used to locate its +corresponding on-chain data. + +Solana accounts contain either: + +- State: This is data that's meant to be read from and persisted. It could be + information about tokens, user data, or any other type of data defined within + a program. +- Executable Programs: These are accounts that contain the actual code of Solana + programs. They contain the instructions that can be executed on the network. + +This separation of program code and program state is a key feature of Solana's +Account Model. For more details, refer to the +[Solana Account Model](/docs/core/accounts) page. + +## Fetch Playground Wallet + +Let's start by looking at a familiar account - your own Playground Wallet! We'll +fetch this account and examine its structure to understand what a basic Solana +account looks like. + + + + + +### Open Example 1 + +Click this [link](https://beta.solpg.io/6671c5e5cffcf4b13384d198) to open the +example in Solana Playground. You'll see this code: + +```ts title="client.ts" +const address = pg.wallet.publicKey; +const accountInfo = await pg.connection.getAccountInfo(address); + +console.log(JSON.stringify(accountInfo, null, 2)); +``` + + + + +This code does three simple things: + +- Gets your Playground wallet's address + + ```ts + const address = pg.wallet.publicKey; + ``` + +- Fetches the `AccountInfo` for the account at that address + + ```ts + const accountInfo = await pg.connection.getAccountInfo(address); + ``` + +- Prints out the `AccountInfo` to the Playground terminal + + ```ts + console.log(JSON.stringify(accountInfo, null, 2)); + ``` + + + + + + + +### Run Example 1 + +In the Playground terminal, type the `run` command and hit enter: + +```shell title="Terminal" +run +``` + +You should see details about your wallet account, including its balance in +lamports, with output similar to the following: + + + + +```shell title="Terminal" +$ run +Running client... + client.ts: + { + "data": { + "type": "Buffer", + "data": [] + }, + "executable": false, + "lamports": 5000000000, + "owner": "11111111111111111111111111111111", + "rentEpoch": 18446744073709552000, + "space": 0 +} +``` + + + + +Your wallet is actually just an account owned by the System Program, where the +main purpose of the wallet account is to store your SOL balance (amount in the +`lamports` field). + +--- + +At its core, all Solana accounts are represented in a standard format called the +`AccountInfo`. The +[AccountInfo](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/account_info.rs#L19-L36) +data type is the base data structure for all Solana Accounts. + +Let's break down the fields in the output: + +- `data` - This field contains what we generally refer to as the account "data". + For a wallet, it's empty (0 bytes), but other accounts use this field to store + any arbitrary data as a serialized buffer of bytes. + +> When data is "buffered" in this way, it maintains its integrity and can be +> later deserialized back into its original form for use in applications. This +> process is widely used in blockchain for efficient data handling. + +- `executable` - A flag that indicates whether the account is an executable + program. For wallets and any accounts that store state, this is `false`. +- `owner` - This field shows which program controls the account. For wallets, + it's always the System Program, with the address + `11111111111111111111111111111111`. +- `lamports` - The account's balance in lamports (1 SOL = 1,000,000,000 + lamports). +- `rentEpoch` - A legacy field related to Solana's deprecated rent collection + mechanism (currently not in use). +- `space` - Indicates byte capacity (length) of the `data` field, but is not a + field in the `AccountInfo` type + + + + + + + + +## Fetch Token Program + +Next, we'll examine the Token Extensions program, an executable program for +interacting with tokens on Solana. + + + + + +### Open Example 2 + +Click this [link](https://beta.solpg.io/6671c6e7cffcf4b13384d199) to open the +example in Solana Playground. You'll see this code: + +```ts title="client.ts" {3} +import { PublicKey } from "@solana/web3.js"; + +const address = new PublicKey("TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb"); +const accountInfo = await pg.connection.getAccountInfo(address); + +console.log(JSON.stringify(accountInfo, null, 2)); +``` + +Instead of fetching your Playground wallet, here we fetch the address of the +Token Extensions Program account. + + + + +### Run Example 2 + +Run the code using the `run` command in the terminal. + +```shell title="Terminal" +run +``` + +Examine the output and how this program account differs from your wallet +account. + + + + +```shell title="Terminal" {15, 17} +$ run +Running client... + client.ts: + { + "data": { + "type": "Buffer", + "data": [ + 2, + 0, + //... additional bytes + 86, + 51 + ] + }, + "executable": true, + "lamports": 1141440, + "owner": "BPFLoaderUpgradeab1e11111111111111111111111", + "rentEpoch": 18446744073709552000, + "space": 36 +} +``` + + + + +The Token Extensions program is an executable program account, but note that it +has the same `AccountInfo` structure. + +Key differences in the `AccountInfo`: + +- `executable` - Set to `true`, indicating this account represents an executable + program. +- `data` - Contains serialized data (unlike the empty data in a wallet account). + The data for a program account stores the address of another account (Program + Executable Data Account) that contains the program's bytecode. +- `owner` - The account is owned by the Upgradable BPF Loader + (`BPFLoaderUpgradeab1e11111111111111111111111`), a special program that + manages executable accounts. + +--- + +You can inspect the Solana Explorer for the +[Token Extensions Program Account](https://explorer.solana.com/address/TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb) +and its corresponding +[Program Executable Data Account](https://explorer.solana.com/address/DoU57AYuPFu2QU514RktNPG22QhApEjnKxnBcu4BHDTY). + +The Program Executable Data Account contains the compiled bytecode for the Token +Extensions Program +[source code](https://github.com/solana-labs/solana-program-library/tree/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program-2022/src). + + + + + + + + +## Fetch Mint Account + +In this step, we'll examine a Mint account, which represents a unique token on +the Solana network. + + + + + +### Open Example 3 + +Click this [link](https://beta.solpg.io/6671c9aecffcf4b13384d19a) to open the +example in Solana Playground. You'll see this code: + +```ts title="client.ts" {3} +import { PublicKey } from "@solana/web3.js"; + +const address = new PublicKey("C33qt1dZGZSsqTrHdtLKXPZNoxs6U1ZBfyDkzmj6mXeR"); +const accountInfo = await pg.connection.getAccountInfo(address); + +console.log(JSON.stringify(accountInfo, null, 2)); +``` + +In this example, we'll fetch the address of an existing Mint account on devnet. + + + + +### Run Example 3 + +Run the code using the `run` command. + +```shell title="Terminal" +run +``` + + + + +```shell title="Terminal" {17} +$ run +Running client... + client.ts: + { + "data": { + "type": "Buffer", + "data": [ + 1, + 0, + //... additional bytes + 0, + 0 + ] + }, + "executable": false, + "lamports": 4176000, + "owner": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb", + "rentEpoch": 18446744073709552000, + "space": 430 +} +``` + + + + +Key differences in the `AccountInfo`: + +- `owner` - The mint account is owned by the Token Extensions program + (`TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb`). +- `executable` - Set to `false`, as this account stores state rather than + executable code. +- `data`: Contains serialized data about the token (mint authority, supply, + decimals, etc.). + + + + + + + +### Deserialize Mint Account Data + +To read the `data` field from any account, you need to deserialize the data +buffer into the expected data type. This is often done using helper functions +from client libraries for a particular program. + +**Deserialization** is the process of converting data from a stored format (like +raw bytes or JSON) back into a usable, structured format in a program. In +blockchain, it involves taking raw, encoded data from the network and +transforming it back into objects, classes, or readable structures so developers +can access and manipulate specific information within a program. Deserialization +is essential for interpreting account or transaction data received from a +network in a form that a program can process and display meaningfully. + +Open this next [example](https://beta.solpg.io/6671cd8acffcf4b13384d19b) in +Solana Playground. You'll see this code: + +```ts title="client.ts" +import { PublicKey } from "@solana/web3.js"; +import { getMint, TOKEN_2022_PROGRAM_ID } from "@solana/spl-token"; + +const address = new PublicKey("C33qt1dZGZSsqTrHdtLKXPZNoxs6U1ZBfyDkzmj6mXeR"); +const mintData = await getMint( + pg.connection, + address, + "confirmed", + TOKEN_2022_PROGRAM_ID, +); + +console.log(mintData); +``` + +This example uses the `getMint` helper function to automatically deserialize the +data field of the Mint account. + +Run the code using the `run` command. + +```shell title="Terminal" +run +``` + +You should see the following deserialized Mint account data. + + + + +```shell title="Terminal" +Running client... + client.ts: + { address: { _bn: { negative: 0, words: [Object], length: 10, red: null } }, + mintAuthority: { _bn: { negative: 0, words: [Object], length: 10, red: null } }, + supply: {}, + decimals: 2, + isInitialized: true, + freezeAuthority: null, + tlvData: } +``` + + + + +The `getMint` function deserializes the account data into the +[Mint](https://github.com/solana-labs/solana-program-library/blob/b1c44c171bc95e6ee74af12365cb9cbab68be76c/token/program/src/state.rs#L18-L32) +data type defined in the Token Extensions program source code. + +- `address` - The Mint account's address +- `mintAuthority` - The authority allowed to mint new tokens +- `supply` - The total supply of tokens +- `decimals` - The number of decimal places for the token +- `isInitialized` - Whether the Mint data has been initialized +- `freezeAuthority` - The authority allowed to freeze token accounts +- `tlvData` - Additional data for Token Extensions (requires further + deserialization) + +You can view the fully deserialized +[Mint Account](https://explorer.solana.com/address/C33qt1dZGZSsqTrHdtLKXPZNoxs6U1ZBfyDkzmj6mXeR?cluster=devnet) +data, including enabled Token Extensions, on the Solana Explorer. + + + + + + + diff --git a/content/docs/intro/quick-start/writing-to-network.mdx b/content/docs/intro/quick-start/writing-to-network.mdx new file mode 100644 index 000000000..60d499672 --- /dev/null +++ b/content/docs/intro/quick-start/writing-to-network.mdx @@ -0,0 +1,377 @@ +--- +title: Writing to Network +description: + Learn how to interact with the Solana network by sending transactions and + instructions. Follow step-by-step examples to transfer SOL tokens and create + new tokens using the System Program and Token Extensions Program. +--- + +Now that we've explored reading from the Solana network, let's learn how to +write data to it. On Solana, we interact with the network by sending +transactions made up of instructions. These instructions are defined by +programs, which contain the business logic for how accounts should be updated. + +Let's walk through two common operations, transferring SOL and creating a token, +to demonstrate how to build and send transactions. For more details, refer to +the [Transactions and Instructions](/docs/core/transactions) and +[Fees on Solana](/docs/core/fees) pages. + +## Transfer SOL + +We'll start with a simple SOL transfer from your wallet to another account. This +requires invoking the transfer instruction on the System Program. + + + + + +### Open Example 1 + +Click this [link](https://beta.solpg.io/6671d85ecffcf4b13384d19e) to open the +example in Solana Playground. You'll see this code: + +```ts title="client.ts" +import { + LAMPORTS_PER_SOL, + SystemProgram, + Transaction, + sendAndConfirmTransaction, + Keypair, +} from "@solana/web3.js"; + +const sender = pg.wallet.keypair; +const receiver = new Keypair(); + +const transferInstruction = SystemProgram.transfer({ + fromPubkey: sender.publicKey, + toPubkey: receiver.publicKey, + lamports: 0.01 * LAMPORTS_PER_SOL, +}); + +const transaction = new Transaction().add(transferInstruction); + +const transactionSignature = await sendAndConfirmTransaction( + pg.connection, + transaction, + [sender], +); + +console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, +); +``` + + + + +This script does the following: + +- Set your Playground wallet as the sender + + ```ts + const sender = pg.wallet.keypair; + ``` + +- Creates a new keypair as the receiver + + ```ts + const receiver = new Keypair(); + ``` + +- Constructs a transfer instruction to transfer 0.01 SOL + + ```ts + const transferInstruction = SystemProgram.transfer({ + fromPubkey: sender.publicKey, + toPubkey: receiver.publicKey, + lamports: 0.01 * LAMPORTS_PER_SOL, + }); + ``` + +- Builds a transaction including the transfer instruction + + ```ts + const transaction = new Transaction().add(transferInstruction); + ``` + +- Sends and confirms the transaction + + ```ts + const transactionSignature = await sendAndConfirmTransaction( + pg.connection, + transaction, + [sender], + ); + ``` + +- Prints out a link to the SolanaFM explorer in the Playground terminal to view + the transaction details + + ```ts + console.log( + "Transaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); + ``` + + + + + + + +### Run Example 1 + +Run the code using the `run` command. + +```shell title="Terminal" +run +``` + +Click on the output link to view the transaction details on the SolanaFM +explorer. + + + + +```shell title="Terminal" +Running client... + client.ts: + Transaction Signature: https://solana.fm/tx/he9dBwrEPhrfrx2BaX4cUmUbY22DEyqZ837zrGrFRnYEBmKhCb5SvoaUeRKSeLFXiGxC8hFY5eDbHqSJ7NYYo42?cluster=devnet-solana +``` + + + + +![Transfer SOL](/assets/docs/intro/quickstart/transfer-sol.png) + +You've just sent your first transaction on Solana! Notice how we created an +instruction, added it to a transaction, and then sent that transaction to the +network. This is the basic process for building any transaction. + + + + + +## Create a Token + +Now, let's create a new token by creating and initializing a Mint account. This +requires two instructions: + +- Invoke the System Program to create a new account +- Invoke the Token Extensions Program to initialize the account data + + + + + +### Open Example 2 + +Click this [link](https://beta.solpg.io/6671da4dcffcf4b13384d19f) to open the +example in Solana Playground. You'll see the following code: + +```ts title="client.ts" +import { + Connection, + Keypair, + SystemProgram, + Transaction, + clusterApiUrl, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + MINT_SIZE, + TOKEN_2022_PROGRAM_ID, + createInitializeMint2Instruction, + getMinimumBalanceForRentExemptMint, +} from "@solana/spl-token"; + +const wallet = pg.wallet; +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + +// Generate keypair to use as address of mint account +const mint = new Keypair(); + +// Calculate minimum lamports for space required by mint account +const rentLamports = await getMinimumBalanceForRentExemptMint(connection); + +// Instruction to create new account with space for new mint account +const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: wallet.publicKey, + newAccountPubkey: mint.publicKey, + space: MINT_SIZE, + lamports: rentLamports, + programId: TOKEN_2022_PROGRAM_ID, +}); + +// Instruction to initialize mint account +const initializeMintInstruction = createInitializeMint2Instruction( + mint.publicKey, + 2, // decimals + wallet.publicKey, // mint authority + wallet.publicKey, // freeze authority + TOKEN_2022_PROGRAM_ID, +); + +// Build transaction with instructions to create new account and initialize mint account +const transaction = new Transaction().add( + createAccountInstruction, + initializeMintInstruction, +); + +const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [ + wallet.keypair, // payer + mint, // mint address keypair + ], +); + +console.log( + "\nTransaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, +); + +console.log( + "\nMint Account:", + `https://solana.fm/address/${mint.publicKey}?cluster=devnet-solana`, +); +``` + + + + +This script performs the following steps: + +- Sets up your Playground wallet and a connection to the Solana devnet + + ```ts + const wallet = pg.wallet; + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + ``` + +- Generates a new keypair for the mint account + + ```ts + const mint = new Keypair(); + ``` + +- Calculates the minimum lamports needed for a Mint account + + ```ts + const rentLamports = await getMinimumBalanceForRentExemptMint(connection); + ``` + +- Creates an instruction to create a new account for the mint, specifying the + Token Extensions program (`TOKEN_2022_PROGRAM_ID`) as the owner of the new + account + + ```ts + const createAccountInstruction = SystemProgram.createAccount({ + fromPubkey: wallet.publicKey, + newAccountPubkey: mint.publicKey, + space: MINT_SIZE, + lamports: rentLamports, + programId: TOKEN_2022_PROGRAM_ID, + }); + ``` + +- Creates an instruction to initialize the mint account data + + ```ts + const initializeMintInstruction = createInitializeMint2Instruction( + mint.publicKey, + 2, + wallet.publicKey, + wallet.publicKey, + TOKEN_2022_PROGRAM_ID, + ); + ``` + +- Adds both instructions to a single transaction + + ```ts + const transaction = new Transaction().add( + createAccountInstruction, + initializeMintInstruction, + ); + ``` + +- Sends and confirms the transaction. Both the wallet and mint keypair are + passed in as signers on the transaction. The wallet is required to pay for the + creation of the new account. The mint keypair is required because we are using + its publickey as the address of the new account. + + ```ts + const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [wallet.keypair, mint], + ); + ``` + +- Prints out links to view the transaction and mint account details on SolanaFM + + ```ts + console.log( + "\nTransaction Signature:", + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); + + console.log( + "\nMint Account:", + `https://solana.fm/address/${mint.publicKey}?cluster=devnet-solana`, + ); + ``` + + + + + + + +### Run Example 2 + +Run the code using the `run` command. + +```shell title="Terminal" +run +``` + +You'll see two links printed to the Playground terminal: + +- One for the transaction details +- One for the newly created mint account + +Click the links to inspect the transaction details and the newly created mint +account on SolanaFM. + + + + +```shell title="Terminal" +Running client... + client.ts: + +Transaction Signature: https://solana.fm/tx/3BEjFxqyGwHXWSrEBnc7vTSaXUGDJFY1Zr6L9iwLrjH8KBZdJSucoMrFUEJgWrWVRYzrFvbjX8TmxKUV88oKr86g?cluster=devnet-solana + +Mint Account: https://solana.fm/address/CoZ3Nz488rmATDhy1hPk5fvwSZaipCngvf8rYBYVc4jN?cluster=devnet-solana +``` + + + + +![Create Token](/assets/docs/intro/quickstart/create-token.png) + +![Mint Account](/assets/docs/intro/quickstart/mint-account.png) + +Notice how we built a transaction with multiple instructions this time. We first +created a new account and then initialized its data as a mint. This is how you +build more complex transactions that involve instructions from multiple +programs. + + + + diff --git a/content/docs/intro/wallets.mdx b/content/docs/intro/wallets.mdx new file mode 100644 index 000000000..f16d71d1e --- /dev/null +++ b/content/docs/intro/wallets.mdx @@ -0,0 +1,68 @@ +--- +title: Wallets +h1: Solana Wallet Guide +--- + +This document describes the different wallet options that are available to users +of Solana who want to be able to send, receive and interact with SOL tokens on +the Solana blockchain. + +## What is a Wallet? + +A crypto wallet is a device or application that stores a collection of keys and +can be used to send, receive, and track ownership of cryptocurrencies. Wallets +can take many forms. A wallet might be a directory or file in your computer's +file system, a piece of paper, or a specialized device called a _hardware +wallet_. There are also various smartphone apps and computer programs that +provide a user-friendly way to create and manage wallets. + +### Keypair + +A [_keypair_](/docs/terminology#keypair) is a securely generated +[_secret key_](#secret-key) and its cryptographically-derived +[_public key_](#public-key). A secret key and its corresponding public key are +together known as a _keypair_. A wallet contains a collection of one or more +keypairs and provides some means to interact with them. + +### Public key + +The [_public key_](/docs/terminology#public-key-pubkey) (commonly shortened +to _pubkey_) is known as the wallet's _receiving address_ or simply its +_address_. The wallet address **may be shared and displayed freely**. When +another party is going to send some amount of cryptocurrency to a wallet, they +need to know the wallet's receiving address. Depending on a blockchain's +implementation, the address can also be used to view certain information about a +wallet, such as viewing the balance, but has no ability to change anything about +the wallet or withdraw any tokens. + +### Secret key + +The [_secret key_](/docs/terminology#private-key) (also referred to as +_private key_) is required to digitally sign any transactions to send +cryptocurrencies to another address or to make any changes to the wallet. The +secret key **must never be shared**. If someone gains access to the secret key +to a wallet, they can withdraw all the tokens it contains. If the secret key for +a wallet is lost, any tokens that have been sent to that wallet's address are +**permanently lost**. + +## Security + +Different wallet solutions offer different approaches to keypair security, +interacting with the keypair, and signing transactions to use/spend the tokens. +Some are easier to use than others. Some store and back up secret keys more +securely. Solana supports multiple types of wallets so you can choose the right +balance of security and convenience. + +**If you want to be able to receive SOL tokens on the Solana blockchain, you +first will need to create a wallet.** + +## Supported Wallets + +Several browser and mobile app based wallets support Solana. Find some options +that might be right for you on the [Solana Wallets](/wallets) +page. + +For advanced users or developers, the +[command-line wallets](https://docs.anza.xyz/cli/wallets) may be more +appropriate, as new features on the Solana blockchain will always be supported +on the command line first before being integrated into third-party solutions. diff --git a/content/docs/meta.json b/content/docs/meta.json new file mode 100644 index 000000000..deb05a131 --- /dev/null +++ b/content/docs/meta.json @@ -0,0 +1,14 @@ +{ + "title": "Solana Documentation", + "pages": [ + "intro", + "rpc", + "core", + "toolkit", + "clients", + "programs", + "advanced", + "economics", + "more" + ] +} diff --git a/content/docs/more/exchange.mdx b/content/docs/more/exchange.mdx new file mode 100644 index 000000000..72581e0f7 --- /dev/null +++ b/content/docs/more/exchange.mdx @@ -0,0 +1,1294 @@ +--- +title: Add Solana to Your Exchange +--- + +This guide describes how to add Solana's native token SOL to your cryptocurrency +exchange. + +## Node Setup + +We highly recommend setting up at least two nodes on high-grade computers/cloud +instances, upgrading to newer versions promptly, and keeping an eye on service +operations with a bundled monitoring tool. + +This setup enables you: + +- to have a self-administered gateway to the Solana mainnet-beta cluster to get + data and submit withdrawal transactions +- to have full control over how much historical block data is retained +- to maintain your service availability even if one node fails + +Solana nodes demand relatively high computing power to handle our fast blocks +and high TPS. For specific requirements, please see +[hardware recommendations](https://docs.anza.xyz/operations/requirements). + +To run an api node: + +1. [Install the Solana command-line tool suite](/docs/intro/installation) +2. Start the validator with at least the following parameters: + +```shell +solana-validator \ + --ledger \ + --identity \ + --entrypoint \ + --expected-genesis-hash \ + --rpc-port 8899 \ + --no-voting \ + --enable-rpc-transaction-history \ + --limit-ledger-size \ + --known-validator \ + --only-known-rpc +``` + +Customize `--ledger` to your desired ledger storage location, and `--rpc-port` +to the port you want to expose. + +The `--entrypoint` and `--expected-genesis-hash` parameters are all specific to +the cluster you are joining. +[Current parameters for Mainnet Beta](https://docs.anza.xyz/clusters/available#example-solana-validator-command-line-2) + +The `--limit-ledger-size` parameter allows you to specify how many ledger +[shreds](/docs/terminology#shred) your node retains on disk. If you do not +include this parameter, the validator will keep the entire ledger until it runs +out of disk space. The default value attempts to keep the ledger disk usage +under 500GB. More or less disk usage may be requested by adding an argument to +`--limit-ledger-size` if desired. Check `solana-validator --help` for the +default limit value used by `--limit-ledger-size`. More information about +selecting a custom limit value is +[available here](https://github.com/solana-labs/solana/blob/583cec922b6107e0f85c7e14cb5e642bc7dfb340/core/src/ledger_cleanup_service.rs#L15-L26). + +Specifying one or more `--known-validator` parameters can protect you from +booting from a malicious snapshot. +[More on the value of booting with known validators](https://docs.anza.xyz/operations/guides/validator-start#known-validators) + +Optional parameters to consider: + +- `--private-rpc` prevents your RPC port from being published for use by other + nodes +- `--rpc-bind-address` allows you to specify a different IP address to bind the + RPC port + +### Automatic Restarts and Monitoring + +We recommend configuring each of your nodes to restart automatically on exit, to +ensure you miss as little data as possible. Running the solana software as a +systemd service is one great option. + +For monitoring, we provide +[`solana-watchtower`](https://github.com/solana-labs/solana/blob/master/watchtower/README.md), +which can monitor your validator and detect with the `solana-validator` process +is unhealthy. It can directly be configured to alert you via Slack, Telegram, +Discord, or Twillio. For details, run `solana-watchtower --help`. + +```shell +solana-watchtower --validator-identity +``` + +> You can find more information about the +> [best practices for Solana Watchtower](https://docs.anza.xyz/operations/best-practices/monitoring#solana-watchtower) +> here in the docs. + +#### New Software Release Announcements + +We release new software frequently (around 1 release / week). Sometimes newer +versions include incompatible protocol changes, which necessitate timely +software update to avoid errors in processing blocks. + +Our official release announcements for all kinds of releases (normal and +security) are communicated via a [discord](/discord) channel +called `#mb-announcement` (`mb` stands for `mainnet-beta`). + +Like staked validators, we expect any exchange-operated validators to be updated +at your earliest convenience within a business day or two after a normal release +announcement. For security-related releases, more urgent action may be needed. + +### Ledger Continuity + +By default, each of your nodes will boot from a snapshot provided by one of your +known validators. This snapshot reflects the current state of the chain, but +does not contain the complete historical ledger. If one of your node exits and +boots from a new snapshot, there may be a gap in the ledger on that node. In +order to prevent this issue, add the `--no-snapshot-fetch` parameter to your +`solana-validator` command to receive historical ledger data instead of a +snapshot. + +Do not pass the `--no-snapshot-fetch` parameter on your initial boot as it's not +possible to boot the node all the way from the genesis block. Instead boot from +a snapshot first and then add the `--no-snapshot-fetch` parameter for reboots. + +It is important to note that the amount of historical ledger available to your +nodes from the rest of the network is limited at any point in time. Once +operational if your validators experience significant downtime they may not be +able to catch up to the network and will need to download a new snapshot from a +known validator. In doing so your validators will now have a gap in its +historical ledger data that cannot be filled. + +### Minimizing Validator Port Exposure + +The validator requires that various UDP and TCP ports be open for inbound +traffic from all other Solana validators. While this is the most efficient mode +of operation, and is strongly recommended, it is possible to restrict the +validator to only require inbound traffic from one other Solana validator. + +First add the `--restricted-repair-only-mode` argument. This will cause the +validator to operate in a restricted mode where it will not receive pushes from +the rest of the validators, and instead will need to continually poll other +validators for blocks. The validator will only transmit UDP packets to other +validators using the _Gossip_ and _ServeR_ ("serve repair") ports, and only +receive UDP packets on its _Gossip_ and _Repair_ ports. + +The _Gossip_ port is bi-directional and allows your validator to remain in +contact with the rest of the cluster. Your validator transmits on the _ServeR_ +to make repair requests to obtaining new blocks from the rest of the network, +since Turbine is now disabled. Your validator will then receive repair responses +on the _Repair_ port from other validators. + +To further restrict the validator to only requesting blocks from one or more +validators, first determine the identity pubkey for that validator and add the +`--gossip-pull-validator PUBKEY --repair-validator PUBKEY` arguments for each +PUBKEY. This will cause your validator to be a resource drain on each validator +that you add, so please do this sparingly and only after consulting with the +target validator. + +Your validator should now only be communicating with the explicitly listed +validators and only on the _Gossip_, _Repair_ and _ServeR_ ports. + +## Setting up Deposit Accounts + +Solana accounts do not require any on-chain initialization; once they contain +some SOL, they exist. To set up a deposit account for your exchange, simply +generate a Solana keypair using any of our +[wallet tools](https://docs.anza.xyz/cli/wallets). + +We recommend using a unique deposit account for each of your users. + +Solana accounts must be made rent-exempt by containing 2-years worth of +[rent](/docs/core/fees#rent) in SOL. In order to find the minimum rent-exempt +balance for your deposit accounts, query the +[`getMinimumBalanceForRentExemption` endpoint](/docs/rpc/http/getminimumbalanceforrentexemption): + +```shell +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "getMinimumBalanceForRentExemption", + "params": [0] +}' +``` + +##### Result + +```json +{ "jsonrpc": "2.0", "result": 890880, "id": 1 } +``` + +### Offline Accounts + +You may wish to keep the keys for one or more collection accounts offline for +greater security. If so, you will need to move SOL to hot accounts using our +[offline methods](https://docs.anza.xyz/cli/examples/offline-signing). + +## Listening for Deposits + +When a user wants to deposit SOL into your exchange, instruct them to send a +transfer to the appropriate deposit address. + +### Versioned Transaction Migration + +When the Mainnet Beta network starts processing versioned transactions, +exchanges **MUST** make changes. If no changes are made, deposit detection will +no longer work properly because fetching a versioned transaction or a block +containing versioned transactions will return an error. + +- `{"maxSupportedTransactionVersion": 0}` + + The `maxSupportedTransactionVersion` parameter must be added to `getBlock` and + `getTransaction` requests to avoid disruption to deposit detection. The latest + transaction version is `0` and should be specified as the max supported + transaction version value. + +It's important to understand that versioned transactions allow users to create +transactions that use another set of account keys loaded from on-chain address +lookup tables. + +- `{"encoding": "jsonParsed"}` + + When fetching blocks and transactions, it's now recommended to use the + `"jsonParsed"` encoding because it includes all transaction account keys + (including those from lookup tables) in the message `"accountKeys"` list. This + makes it straightforward to resolve balance changes detailed in `preBalances` + / `postBalances` and `preTokenBalances` / `postTokenBalances`. + + If the `"json"` encoding is used instead, entries in `preBalances` / + `postBalances` and `preTokenBalances` / `postTokenBalances` may refer to + account keys that are **NOT** in the `"accountKeys"` list and need to be + resolved using `"loadedAddresses"` entries in the transaction metadata. + +### Poll for Blocks + +To track all the deposit accounts for your exchange, poll for each confirmed +block and inspect for addresses of interest, using the JSON-RPC service of your +Solana API node. + +- To identify which blocks are available, send a + [`getBlocks`](/docs/rpc/http/getblocks) request, passing the last block + you have already processed as the start-slot parameter: + +```shell +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "getBlocks", + "params": [160017005, 160017015] +}' +``` + +##### Result + +```json +{ + "jsonrpc": "2.0", + "result": [ + 160017005, 160017006, 160017007, 160017012, 160017013, 160017014, 160017015 + ], + "id": 1 +} +``` + +Not every slot produces a block, so there may be gaps in the sequence of +integers. + +- For each block, request its contents with a + [`getBlock`](/docs/rpc/http/getblock) request: + +### Block Fetching Tips + +- `{"rewards": false}` + +By default, fetched blocks will return information about validator fees on each +block and staking rewards on epoch boundaries. If you don't need this +information, disable it with the "rewards" parameter. + +- `{"transactionDetails": "accounts"}` + +By default, fetched blocks will return a lot of transaction info and metadata +that isn't necessary for tracking account balances. Set the "transactionDetails" +parameter to speed up block fetching. + +```shell +curl https://api.devnet.solana.com -X POST -H 'Content-Type: application/json' -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "getBlock", + "params": [ + 166974442, + { + "encoding": "jsonParsed", + "maxSupportedTransactionVersion": 0, + "transactionDetails": "accounts", + "rewards": false + } + ] +}' +``` + +##### Result + +```json +{ + "jsonrpc": "2.0", + "result": { + "blockHeight": 157201607, + "blockTime": 1665070281, + "blockhash": "HKhao674uvFc4wMK1Cm3UyuuGbKExdgPFjXQ5xtvsG3o", + "parentSlot": 166974441, + "previousBlockhash": "98CNLU4rsYa2HDUyp7PubU4DhwYJJhSX9v6pvE7SWsAo", + "transactions": [ + ... (omit) + { + "meta": { + "err": null, + "fee": 5000, + "postBalances": [ + 1110663066, + 1, + 1040000000 + ], + "postTokenBalances": [], + "preBalances": [ + 1120668066, + 1, + 1030000000 + ], + "preTokenBalances": [], + "status": { + "Ok": null + } + }, + "transaction": { + "accountKeys": [ + { + "pubkey": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde", + "signer": true, + "source": "transaction", + "writable": true + }, + { + "pubkey": "11111111111111111111111111111111", + "signer": false, + "source": "transaction", + "writable": false + }, + { + "pubkey": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", + "signer": false, + "source": "lookupTable", + "writable": true + } + ], + "signatures": [ + "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM" + ] + }, + "version": 0 + }, + ... (omit) + ] + }, + "id": 1 +} +``` + +The `preBalances` and `postBalances` fields allow you to track the balance +changes in every account without having to parse the entire transaction. They +list the starting and ending balances of each account in +[lamports](/docs/terminology#lamport), indexed to the `accountKeys` list. For +example, if the deposit address of interest is +`G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o`, this transaction represents a +transfer of 1040000000 - 1030000000 = 10,000,000 lamports = 0.01 SOL + +If you need more information about the transaction type or other specifics, you +can request the block from RPC in binary format, and parse it using either our +[Rust SDK](https://github.com/solana-labs/solana) or +[Javascript SDK](https://github.com/solana-labs/solana-web3.js). + +### Address History + +You can also query the transaction history of a specific address. This is +generally _not_ a viable method for tracking all your deposit addresses over all +slots, but may be useful for examining a few accounts for a specific period of +time. + +- Send a [`getSignaturesForAddress`](/docs/rpc/http/getsignaturesforaddress) + request to the api node: + +```shell +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "getSignaturesForAddress", + "params": [ + "3M2b3tLji7rvscqrLAHMukYxDK2nB96Q9hwfV6QkdzBN", + { + "limit": 3 + } + ] +}' +``` + +##### Result + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "blockTime": 1662064640, + "confirmationStatus": "finalized", + "err": null, + "memo": null, + "signature": "3EDRvnD5TbbMS2mCusop6oyHLD8CgnjncaYQd5RXpgnjYUXRCYwiNPmXb6ZG5KdTK4zAaygEhfdLoP7TDzwKBVQp", + "slot": 148697216 + }, + { + "blockTime": 1662064434, + "confirmationStatus": "finalized", + "err": null, + "memo": null, + "signature": "4rPQ5wthgSP1kLdLqcRgQnkYkPAZqjv5vm59LijrQDSKuL2HLmZHoHjdSLDXXWFwWdaKXUuryRBGwEvSxn3TQckY", + "slot": 148696843 + }, + { + "blockTime": 1662064341, + "confirmationStatus": "finalized", + "err": null, + "memo": null, + "signature": "36Q383JMiqiobuPV9qBqy41xjMsVnQBm9rdZSdpbrLTGhSQDTGZJnocM4TQTVfUGfV2vEX9ZB3sex6wUBUWzjEvs", + "slot": 148696677 + } + ], + "id": 1 +} +``` + +- For each signature returned, get the transaction details by sending a + [`getTransaction`](/docs/rpc/http/gettransaction) request: + +```shell +curl https://api.devnet.solana.com -X POST -H 'Content-Type: application/json' -d '{ + "jsonrpc":"2.0", + "id":1, + "method":"getTransaction", + "params":[ + "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM", + { + "encoding":"jsonParsed", + "maxSupportedTransactionVersion":0 + } + ] +}' +``` + +##### Result + +```json +{ + "jsonrpc": "2.0", + "result": { + "blockTime": 1665070281, + "meta": { + "err": null, + "fee": 5000, + "innerInstructions": [], + "logMessages": [ + "Program 11111111111111111111111111111111 invoke [1]", + "Program 11111111111111111111111111111111 success" + ], + "postBalances": [1110663066, 1, 1040000000], + "postTokenBalances": [], + "preBalances": [1120668066, 1, 1030000000], + "preTokenBalances": [], + "rewards": [], + "status": { + "Ok": null + } + }, + "slot": 166974442, + "transaction": { + "message": { + "accountKeys": [ + { + "pubkey": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde", + "signer": true, + "source": "transaction", + "writable": true + }, + { + "pubkey": "11111111111111111111111111111111", + "signer": false, + "source": "transaction", + "writable": false + }, + { + "pubkey": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", + "signer": false, + "source": "lookupTable", + "writable": true + } + ], + "addressTableLookups": [ + { + "accountKey": "4syr5pBaboZy4cZyF6sys82uGD7jEvoAP2ZMaoich4fZ", + "readonlyIndexes": [], + "writableIndexes": [3] + } + ], + "instructions": [ + { + "parsed": { + "info": { + "destination": "G1wZ113tiUHdSpQEBcid8n1x8BAvcWZoZgxPKxgE5B7o", + "lamports": 10000000, + "source": "9aE476sH92Vz7DMPyq5WLPkrKWivxeuTKEFKd2sZZcde" + }, + "type": "transfer" + }, + "program": "system", + "programId": "11111111111111111111111111111111" + } + ], + "recentBlockhash": "BhhivDNgoy4L5tLtHb1s3TP19uUXqKiy4FfUR34d93eT" + }, + "signatures": [ + "2CxNRsyRT7y88GBwvAB3hRg8wijMSZh3VNYXAdUesGSyvbRJbRR2q9G1KSEpQENmXHmmMLHiXumw4dp8CvzQMjrM" + ] + }, + "version": 0 + }, + "id": 1 +} +``` + +## Sending Withdrawals + +To accommodate a user's request to withdraw SOL, you must generate a Solana +transfer transaction, and send it to the api node to be forwarded to your +cluster. + +### Synchronous + +Sending a synchronous transfer to the Solana cluster allows you to easily ensure +that a transfer is successful and finalized by the cluster. + +Solana's command-line tool offers a simple command, `solana transfer`, to +generate, submit, and confirm transfer transactions. By default, this method +will wait and track progress on stderr until the transaction has been finalized +by the cluster. If the transaction fails, it will report any transaction errors. + +```shell +solana transfer --allow-unfunded-recipient --keypair --url http://localhost:8899 +``` + +The [Solana Javascript SDK](https://github.com/solana-labs/solana-web3.js) +offers a similar approach for the JS ecosystem. Use the `SystemProgram` to build +a transfer transaction, and submit it using the `sendAndConfirmTransaction` +method. + +### Asynchronous + +For greater flexibility, you can submit withdrawal transfers asynchronously. In +these cases, it is your responsibility to verify that the transaction succeeded +and was finalized by the cluster. + +**Note:** Each transaction contains a +[recent blockhash](/docs/core/transactions#recent-blockhash) to indicate its +liveness. It is **critical** to wait until this blockhash expires before +retrying a withdrawal transfer that does not appear to have been confirmed or +finalized by the cluster. Otherwise, you risk a double spend. See more on +[blockhash expiration](#blockhash-expiration) below. + +First, get a recent blockhash using the +[`getFees`](/docs/rpc/deprecated/getfees) endpoint or the CLI command: + +```shell +solana fees --url http://localhost:8899 +``` + +In the command-line tool, pass the `--no-wait` argument to send a transfer +asynchronously, and include your recent blockhash with the `--blockhash` +argument: + +```shell +solana transfer --no-wait --allow-unfunded-recipient --blockhash --keypair --url http://localhost:8899 +``` + +You can also build, sign, and serialize the transaction manually, and fire it +off to the cluster using the JSON-RPC +[`sendTransaction`](/docs/rpc/http/sendtransaction) endpoint. + +#### Transaction Confirmations & Finality + +Get the status of a batch of transactions using the +[`getSignatureStatuses`](/docs/rpc/http/getsignaturestatuses) JSON-RPC +endpoint. The `confirmations` field reports how many +[confirmed blocks](/docs/terminology#confirmed-block) have elapsed since the +transaction was processed. If `confirmations: null`, it is +[finalized](/docs/terminology#finality). + +```shell +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d '{ + "jsonrpc":"2.0", + "id":1, + "method":"getSignatureStatuses", + "params":[ + [ + "5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", + "5j7s6NiJS3JAkvgkoc18WVAsiSaci2pxB2A6ueCJP4tprA2TFg9wSyTLeYouxPBJEMzJinENTkpA52YStRW5Dia7" + ] + ] +}' +``` + +##### Result + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 82 + }, + "value": [ + { + "slot": 72, + "confirmations": 10, + "err": null, + "status": { + "Ok": null + } + }, + { + "slot": 48, + "confirmations": null, + "err": null, + "status": { + "Ok": null + } + } + ] + }, + "id": 1 +} +``` + +#### Blockhash Expiration + +You can check whether a particular blockhash is still valid by sending a +[`getFeeCalculatorForBlockhash`](/docs/rpc/deprecated/getfeecalculatorforblockhash) +request with the blockhash as a parameter. If the response value is `null`, the +blockhash is expired, and the withdrawal transaction using that blockhash should +never succeed. + +### Validating User-supplied Account Addresses for Withdrawals + +As withdrawals are irreversible, it may be a good practice to validate a +user-supplied account address before authorizing a withdrawal in order to +prevent accidental loss of user funds. + +#### Basic verification + +Solana addresses a 32-byte array, encoded with the bitcoin base58 alphabet. This +results in an ASCII text string matching the following regular expression: + +```text +[1-9A-HJ-NP-Za-km-z]{32,44} +``` + +This check is insufficient on its own as Solana addresses are not checksummed, +so typos cannot be detected. To further validate the user's input, the string +can be decoded and the resulting byte array's length confirmed to be 32. +However, there are some addresses that can decode to 32 bytes despite a typo +such as a single missing character, reversed characters and ignored case + +#### Advanced verification + +Due to the vulnerability to typos described above, it is recommended that the +balance be queried for candidate withdraw addresses and the user prompted to +confirm their intentions if a non-zero balance is discovered. + +#### Valid ed25519 pubkey check + +The address of a normal account in Solana is a Base58-encoded string of a +256-bit ed25519 public key. Not all bit patterns are valid public keys for the +ed25519 curve, so it is possible to ensure user-supplied account addresses are +at least correct ed25519 public keys. + +#### Java + +Here is a Java example of validating a user-supplied address as a valid ed25519 +public key: + +The following code sample assumes you're using the Maven. + +`pom.xml`: + +```xml + + ... + + spring + https://repo.spring.io/libs-release/ + + + +... + + + ... + + io.github.novacrypto + Base58 + 0.1.3 + + + cafe.cryptography + curve25519-elisabeth + 0.1.0 + + +``` + +```java +import io.github.novacrypto.base58.Base58; +import cafe.cryptography.curve25519.CompressedEdwardsY; + +public class PubkeyValidator +{ + public static boolean verifyPubkey(String userProvidedPubkey) + { + try { + return _verifyPubkeyInternal(userProvidedPubkey); + } catch (Exception e) { + return false; + } + } + + public static boolean _verifyPubkeyInternal(String maybePubkey) throws Exception + { + byte[] bytes = Base58.base58Decode(maybePubkey); + return !(new CompressedEdwardsY(bytes)).decompress().isSmallOrder(); + } +} +``` + +## Minimum Deposit & Withdrawal Amounts + +Every deposit and withdrawal of SOL must be greater or equal to the minimum +rent-exempt balance for the account at the wallet address (a basic SOL account +holding no data), currently: 0.000890880 SOL + +Similarly, every deposit account must contain at least this balance. + +```shell +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d '{ + "jsonrpc": "2.0", + "id": 1, + "method": "getMinimumBalanceForRentExemption", + "params": [0] +}' +``` + +##### Result + +```json +{ "jsonrpc": "2.0", "result": 890880, "id": 1 } +``` + +## Prioritization Fees and Compute Units + +In periods of high demand, it's possible for a transaction to expire before a +validator has included such transactions in their block because they chose other +transactions with higher economic value. Valid Transactions on Solana may be +delayed or dropped if Prioritization Fees are not implemented properly. + +[Prioritization Fees](/docs/terminology#prioritization-fee) are additional +fees that can be added on top of the +[base Transaction Fee](/docs/core/fees#transaction-fees) to ensure +transaction inclusion within blocks and in these situations and help ensure +deliverability. + +These priority fees are added to transaction by adding a special Compute Budget +instruction that sets the desired priority fee to be paid. + + + +Failure to implement these instructions may result in network disruptions and +dropped transactions. It is strongly recommended that every exchange supporting +Solana make use of priority fees to avoid disruption. + + + +### What is a Prioritization Fee? + +Prioritization Fees are priced in micro-lamports per Compute Unit (e.g. small +amounts of SOL) prepended to transactions to make them economically compelling +for validator nodes to include within blocks on the network. + +### How much should the Prioritization Fee be? + +The method for setting your prioritization fee should involve querying recent +prioritization fees to set a fee which is likely to be compelling for the +network. Using the +[`getRecentPrioritizationFees`](/docs/rpc/http/getrecentprioritizationfees) RPC +method, you can query for the prioritization fees required to land a transaction +in a recent block. + +Pricing strategy for these priority fees will vary based on your use case. There +is no canonical way to do so. One strategy for setting your Prioritization Fees +might be to calculate your transaction success rate and then increase your +Prioritization Fee against a query to the recent transaction fees API and adjust +accordingly. Pricing for Prioritization Fees will be dynamic based on the +activity on the network and bids placed by other participants, only knowable +after the fact. + +One challenge with using the `getRecentPrioritizationFees` API call is that it +may only return the lowest fee for each block. This will often be zero, which is +not a fully useful approximation of what Prioritization Fee to use in order to +avoid being rejected by validator nodes. + +The `getRecentPrioritizationFees` API takes accounts' pubkeys as parameters, and +then returns the highest of the minimum prioritization fees for these accounts. +When no account is specified, the API will return the lowest fee to land to +block, which is usually zero (unless the block is full). + +Exchanges and applications should query the RPC endpoint with the accounts that +a transaction is going to write-lock. The RPC endpoint will return the +`max(account_1_min_fee, account_2_min_fee, ... account_n_min_fee)`, which should +be the base point for the user to set the prioritization fee for that +transaction. + +There are different approaches to setting Prioritization Fees and some +[third-party APIs](https://docs.helius.dev/solana-rpc-nodes/alpha-priority-fee-api) +are available to determine the best fee to apply. Given the dynamic nature of +the network, there will not be a “perfect” way to go about pricing your +Prioritization fees and careful analysis should be applied before choosing a +path forward. + +### How to Implement Prioritization Fees + +Adding priority fees on a transaction consists of prepending two Compute Budget +instructions on a given transaction: + +- one to set the compute unit price, and +- another to set the compute unit limit + +> Here, you can also find a more detailed developer +> [guide on how to use priority fees](/developers/guides/advanced/how-to-use-priority-fees) +> which includes more information about implementing priority fees. + +Create a `setComputeUnitPrice` instruction to add a Prioritization Fee above the +Base Transaction Fee (5,000 Lamports). + +```typescript +// import { ComputeBudgetProgram } from "@solana/web3.js" +ComputeBudgetProgram.setComputeUnitPrice({ microLamports: number }); +``` + +The value provided in micro-lamports will be multiplied by the Compute Unit (CU) +budget to determine the Prioritization Fee in Lamports. For example, if your CU +budget is 1M CU, and you add `1 microLamport/CU`, the Prioritization Fee will be +1 lamport (1M \* 0. 000001). The total fee will then be 5001 lamports. + +To set a new compute unit budget for the transaction, create a +`setComputeUnitLimit` instruction + +```typescript +// import { ComputeBudgetProgram } from "@solana/web3.js" +ComputeBudgetProgram.setComputeUnitLimit({ units: number }); +``` + +The `units` value provided will replace the Solana runtime's default compute +budget value. + + + +Transactions should request the minimum amount of compute units (CU) required +for execution to maximize throughput and minimize overall fees. + +You can get the CU consumed by a transaction by sending the transaction on a +different Solana cluster, like devnet. For example, a +[simple token transfer](https://explorer.solana.com/tx/5scDyuiiEbLxjLUww3APE9X7i8LE3H63unzonUwMG7s2htpoAGG17sgRsNAhR1zVs6NQAnZeRVemVbkAct5myi17) +takes 300 CU. + + + +```typescript +// import { ... } from "@solana/web3.js" + +const modifyComputeUnits = ComputeBudgetProgram.setComputeUnitLimit({ + // note: set this to be the lowest actual CU consumed by the transaction + units: 300, +}); + +const addPriorityFee = ComputeBudgetProgram.setComputeUnitPrice({ + microLamports: 1, +}); + +const transaction = new Transaction() + .add(modifyComputeUnits) + .add(addPriorityFee) + .add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount, + lamports: 10000000, + }), + ); +``` + +### Prioritization Fees And Durable Nonces + +If your setup uses Durable Nonce Transactions, it is important to properly +implement Prioritization Fees in combination with Durable Transaction Nonces to +ensure successful transactions. Failure to do so will cause intended Durable +Nonce transactions not to be detected as such. + +If you ARE using Durable Transaction Nonces, the `AdvanceNonceAccount` +instruction MUST be specified FIRST in the instructions list, even when the +compute budget instructions are used to specify priority fees. + +You can find a specific code example +[using durable nonces and priority fees together](/developers/guides/advanced/how-to-use-priority-fees#special-considerations) +in this developer guide. + +## Supporting the SPL Token Standard + +[SPL Token](https://spl.solana.com/token) is the standard for wrapped/synthetic +token creation and exchange on the Solana blockchain. + +The SPL Token workflow is similar to that of native SOL tokens, but there are a +few differences which will be discussed in this section. + +### Token Mints + +Each _type_ of SPL Token is declared by creating a _mint_ account. This account +stores metadata describing token features like the supply, number of decimals, +and various authorities with control over the mint. Each SPL Token account +references its associated mint and may only interact with SPL Tokens of that +type. + +### Installing the `spl-token` CLI Tool + +SPL Token accounts are queried and modified using the `spl-token` command line +utility. The examples provided in this section depend upon having it installed +on the local system. + +`spl-token` is distributed from Rust +[crates.io](https://crates.io/crates/spl-token) via the Rust `cargo` command +line utility. The latest version of `cargo` can be installed using a handy +one-liner for your platform at [rustup.rs](https://rustup.rs). Once `cargo` is +installed, `spl-token` can be obtained with the following command: + +```shell +cargo install spl-token-cli +``` + +You can then check the installed version to verify + +```shell +spl-token --version +``` + +Which should result in something like + +```text +spl-token-cli 2.0.1 +``` + +### Account Creation + +SPL Token accounts carry additional requirements that native System Program +accounts do not: + +1. SPL Token accounts must be created before an amount of tokens can be + deposited. Token accounts can be created explicitly with the + `spl-token create-account` command, or implicitly by the + `spl-token transfer --fund-recipient ...` command. +1. SPL Token accounts must remain [rent-exempt](/docs/core/fees#rent-exempt) + for the duration of their existence and therefore require a small amount of + native SOL tokens be deposited at account creation. For SPL Token accounts, + this amount is 0.00203928 SOL (2,039,280 lamports). + +#### Command Line + +To create an SPL Token account with the following properties: + +1. Associated with the given mint +1. Owned by the funding account's keypair + +```shell +spl-token create-account +``` + +#### Example + +```shell +spl-token create-account AkUFCWTXb3w9nY2n6SFJvBV6VwvFUCe4KBMCcgLsa2ir +``` + +Giving an output similar to: + +``` +Creating account 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV +Signature: 4JsqZEPra2eDTHtHpB4FMWSfk3UgcCVmkKkP7zESZeMrKmFFkDkNd91pKP3vPVVZZPiu5XxyJwS73Vi5WsZL88D7 +``` + +Or to create an SPL Token account with a specific keypair: + +```shell +solana-keygen new -o token-account.json + +spl-token create-account AkUFCWTXb3w9nY2n6SFJvBV6VwvFUCe4KBMCcgLsa2ir token-account.json +``` + +Giving an output similar to: + +```shell +Creating account 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV +Signature: 4JsqZEPra2eDTHtHpB4FMWSfk3UgcCVmkKkP7zESZeMrKmFFkDkNd91pKP3vPVVZZPiu5XxyJwS73Vi5WsZL88D7 +``` + +### Checking an Account's Balance + +#### Command Line + +```shell +spl-token balance +``` + +#### Example + +```shell +solana balance 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV +``` + +Giving an output similar to: + +``` +0 +``` + +### Token Transfers + +The source account for a transfer is the actual token account that contains the +amount. + +The recipient address however can be a normal wallet account. If an associated +token account for the given mint does not yet exist for that wallet, the +transfer will create it provided that the `--fund-recipient` argument as +provided. + +#### Command Line + +```shell +spl-token transfer --fund-recipient +``` + +#### Example + +```shell +spl-token transfer 6B199xxzw3PkAm25hGJpjj3Wj3WNYNHzDAnt1tEqg5BN 1 +``` + +Giving an output similar to: + +```shell +6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV +Transfer 1 tokens + Sender: 6B199xxzw3PkAm25hGJpjj3Wj3WNYNHzDAnt1tEqg5BN + Recipient: 6VzWGL51jLebvnDifvcuEDec17sK6Wupi4gYhm5RzfkV +Signature: 3R6tsog17QM8KfzbcbdP4aoMfwgo6hBggJDVy7dZPVmH2xbCWjEj31JKD53NzMrf25ChFjY7Uv2dfCDq4mGFFyAj +``` + +### Depositing + +Since each `(wallet, mint)` pair requires a separate account onchain. It is +recommended that the addresses for these accounts be derived from SOL deposit +wallets using the +[Associated Token Account](https://spl.solana.com/associated-token-account) +(ATA) scheme and that _only_ deposits from ATA addresses be accepted. + +Monitoring for deposit transactions should follow the +[block polling](#poll-for-blocks) method described above. Each new block should +be scanned for successful transactions referencing user token-account derived +addresses. The `preTokenBalance` and `postTokenBalance` fields from the +transaction's metadata must then be used to determine the effective balance +change. These fields will identify the token mint and account owner (main wallet +address) of the affected account. + +Note that if a receiving account is created during the transaction, it will have +no `preTokenBalance` entry as there is no existing account state. In this case, +the initial balance can be assumed to be zero. + +### Withdrawing + +The withdrawal address a user provides must be that of their SOL wallet. + +Before executing a withdrawal [transfer](#token-transfers), the exchange should +check the address as +[described above](#validating-user-supplied-account-addresses-for-withdrawals). +Additionally this address must be owned by the System Program and have no +account data. If the address has no SOL balance, user confirmation should be +obtained before proceeding with the withdrawal. All other withdrawal addresses +must be rejected. + +From the withdrawal address, the +[Associated Token Account](https://spl.solana.com/associated-token-account) +(ATA) for the correct mint is derived and the transfer issued to that account +via a +[TransferChecked](https://github.com/solana-labs/solana-program-library/blob/fc0d6a2db79bd6499f04b9be7ead0c400283845e/token/program/src/instruction.rs#L268) +instruction. Note that it is possible that the ATA address does not yet exist, +at which point the exchange should fund the account on behalf of the user. For +SPL Token accounts, funding the withdrawal account will require 0.00203928 SOL +(2,039,280 lamports). + +Template `spl-token transfer` command for a withdrawal: + +```shell +spl-token transfer --fund-recipient +``` + +### Other Considerations + +#### Freeze Authority + +For regulatory compliance reasons, an SPL Token issuing entity may optionally +choose to hold "Freeze Authority" over all accounts created in association with +its mint. This allows them to +[freeze](https://spl.solana.com/token#freezing-accounts) the assets in a given +account at will, rendering the account unusable until thawed. If this feature is +in use, the freeze authority's pubkey will be registered in the SPL Token's mint +account. + +### Basic Support for the SPL Token-2022 (Token-Extensions) Standard + +[SPL Token-2022](https://spl.solana.com/token-2022) is the newest standard for +wrapped/synthetic token creation and exchange on the Solana blockchain. + +Also known as "Token Extensions", the standard contains many new features that +token creators and account holders may optionally enable. These features include +confidential transfers, fees on transfer, closing mints, metadata, permanent +delegates, immutable ownership, and much more. Please see the +[extension guide](https://spl.solana.com/token-2022/extensions) for more +information. + +If your exchange supports SPL Token, there isn't a lot more work required to +support SPL Token-2022: + +- the CLI tool works seamlessly with both programs starting with version 3.0.0. +- `preTokenBalances` and `postTokenBalances` include SPL Token-2022 balances +- RPC indexes SPL Token-2022 accounts, but they must be queried separately with + program id `TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb` + +The Associated Token Account works the same way, and properly calculates the +required deposit amount of SOL for the new account. + +Because of extensions, however, accounts may be larger than 165 bytes, so they +may require more than 0.00203928 SOL to fund. + +For example, the Associated Token Account program always includes the "immutable +owner" extension, so accounts take a minimum of 170 bytes, which requires +0.00207408 SOL. + +### Extension-Specific Considerations + +The previous section outlines the most basic support for SPL Token-2022. Since +the extensions modify the behavior of tokens, exchanges may need to change how +they handle tokens. + +It is possible to see all extensions on a mint or token account: + +```shell +spl-token display +``` + +#### Transfer Fee + +A token may be configured with a transfer fee, where a portion of transferred +tokens are withheld at the destination for future collection. + +If your exchange transfers these tokens, beware that they may not all arrive at +the destination due to the withheld amount. + +It is possible to specify the expected fee during a transfer to avoid any +surprises: + +```shell +spl-token transfer --expected-fee --fund-recipient +``` + +#### Mint Close Authority + +With this extension, a token creator may close a mint, provided the supply of +tokens is zero. + +When a mint is closed, there may still be empty token accounts in existence, and +they will no longer be associated to a valid mint. + +It is safe to simply close these token accounts: + +```shell +spl-token close --address +``` + +#### Confidential Transfer + +Mints may be configured for confidential transfers, so that token amounts are +encrypted, but the account owners are still public. + +Exchanges may configure token accounts to send and receive confidential +transfers, to hide user amounts. It is not required to enable confidential +transfers on token accounts, so exchanges can force users to send tokens +non-confidentially. + +To enable confidential transfers, the account must be configured for it: + +```shell +spl-token configure-confidential-transfer-account --address +``` + +And to transfer: + +```shell +spl-token transfer --confidential +``` + +During a confidential transfer, the `preTokenBalance` and `postTokenBalance` +fields will show no change. In order to sweep deposit accounts, you must decrypt +the new balance to withdraw the tokens: + +```shell +spl-token apply-pending-balance --address +spl-token withdraw-confidential-tokens --address +``` + +#### Default Account State + +Mints may be configured with a default account state, such that all new token +accounts are frozen by default. These token creators may require users to go +through a separate process to thaw the account. + +#### Non-Transferable + +Some tokens are non-transferable, but they may still be burned and the account +can be closed. + +#### Permanent Delegate + +Token creators may designate a permanent delegate for all of their tokens. The +permanent delegate may transfer or burn tokens from any account, potentially +stealing funds. + +This is a legal requirement for stablecoins in certain jurisdictions, or could +be used for token repossession schemes. + +Beware that these tokens may be transferred without your exchange's knowledge. + +#### Transfer Hook + +Tokens may be configured with an additional program that must be called during +transfers, in order to validate the transfer or perform any other logic. + +Since the Solana runtime requires all accounts to be explicitly passed to a +program, and transfer hooks require additional accounts, the exchange needs to +create transfer instructions differently for these tokens. + +The CLI and instruction creators such as +`createTransferCheckedWithTransferHookInstruction` add the extra accounts +automatically, but the additional accounts may also be specified explicitly: + +```shell +spl-token transfer --transfer-hook-account --transfer-hook-account ... +``` + +#### Required Memo on Transfer + +Users may configure their token accounts to require a memo on transfer. + +Exchanges may need to prepend a memo instruction before transferring tokens back +to users, or they may require users to prepend a memo instruction before sending +to the exchange: + +```shell +spl-token transfer --with-memo +``` + +## Testing the Integration + +Be sure to test your complete workflow on Solana devnet and testnet +[clusters](/docs/core/clusters) before moving to production on mainnet-beta. +Devnet is the most open and flexible, and ideal for initial development, while +testnet offers more realistic cluster configuration. Both devnet and testnet +support a faucet, run `solana airdrop 1` to obtain some devnet or testnet SOL +for development and testing. diff --git a/content/docs/more/meta.json b/content/docs/more/meta.json new file mode 100644 index 000000000..24edece23 --- /dev/null +++ b/content/docs/more/meta.json @@ -0,0 +1,5 @@ +{ + "title": "More Information", + "pages": ["exchange"], + "defaultOpen": true +} diff --git a/content/docs/programs/anchor/client-typescript.mdx b/content/docs/programs/anchor/client-typescript.mdx new file mode 100644 index 000000000..087dc6225 --- /dev/null +++ b/content/docs/programs/anchor/client-typescript.mdx @@ -0,0 +1,352 @@ +--- +title: JS/TS Client +description: + Learn how to use Anchor's TypeScript client library to interact with Solana + program +--- + +Anchor provides a Typescript client library +([@coral-xyz/anchor](https://github.com/coral-xyz/anchor/tree/v0.30.1/ts/packages/anchor)) +that simplifies the process of interacting with Solana programs from the client +in JavaScript or TypeScript. + +## Client Program + +To use the client library, first create an instance of a +[`Program`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/index.ts#L58) +using the [IDL file](/docs/programs/anchor/idl) generated by Anchor. + +Creating an instance of the `Program` requires the program's IDL and an +[`AnchorProvider`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/provider.ts#L55). +An `AnchorProvider` is an abstraction that combines two things: + +- `Connection` - the connection to a [Solana cluster](/docs/core/clusters) + (i.e. localhost, devnet, mainnet) +- `Wallet` - (optional) a default wallet used to pay and sign transactions + +{/* prettier-ignore */} + + + +When integrating with a frontend using the +[wallet adapter](/developers/guides/wallets/add-solana-wallet-adapter-to-nextjs), +you'll need to set up the `AnchorProvider` and `Program`. + +```ts {9-10, 12-14} +import { Program, AnchorProvider, setProvider } from "@coral-xyz/anchor"; +import { useAnchorWallet, useConnection } from "@solana/wallet-adapter-react"; +import type { HelloAnchor } from "./idlType"; +import idl from "./idl.json"; + +const { connection } = useConnection(); +const wallet = useAnchorWallet(); + +const provider = new AnchorProvider(connection, wallet, {}); +setProvider(provider); + +export const program = new Program(idl as HelloAnchor, { + connection, +}); +``` + +In the code snippet above: + +- `idl.json` is the IDL file generated by Anchor, found at + `/target/idl/.json` in an Anchor project. +- `idlType.ts` is the IDL type (for use with TS), found at + `/target/types/.ts` in an Anchor project. + +Alternatively, you can create an instance of the `Program` using only the IDL +and the `Connection` to a Solana cluster. This means there is no default +`Wallet`, but allows you to use the `Program` to fetch accounts or build +instructions without a connected wallet. + +```ts {8-10} +import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; +import { Program } from "@coral-xyz/anchor"; +import type { HelloAnchor } from "./idlType"; +import idl from "./idl.json"; + +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + +export const program = new Program(idl as HelloAnchor, { + connection, +}); +``` + + + + +Anchor automatically sets up a `Program` instance in the default test file of +new projects. However, this setup differs from how you'd initialize a `Program` +outside the Anchor workspace, such as in React or Node.js applications. + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; + +describe("hello_anchor", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + + const program = anchor.workspace.HelloAnchor as Program; + + it("Is initialized!", async () => { + // Add your test here. + const tx = await program.methods.initialize().rpc(); + console.log("Your transaction signature", tx); + }); +}); +``` + + + + +## Invoke Instructions + +Once the `Program` is set up using a program IDL, you can use the Anchor +[`MethodsBuilder`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L155) +to: + +- Build individual instructions +- Build transactions +- Build and send transactions + +The basic format looks like the following: + +{/* prettier-ignore */} + + + +`program.methods` - This is the builder API for creating instruction calls from +the program's IDL + +```ts /methods/ {1} +await program.methods + .instructionName(instructionData) + .accounts({}) + .signers([]) + .rpc(); +``` + + + + +Following `.methods`, specify the name of an instruction from the program IDL, +passing in any required arguments as comma-separated values. + +```ts /instructionName/ /instructionData1/ /instructionData2/ {2} +await program.methods + .instructionName(instructionData1, instructionData2) + .accounts({}) + .signers([]) + .rpc(); +``` + + + + +`.accounts` - Pass in the address of the accounts required by the instruction as +specified in the IDL + +```ts /accounts/ {3} +await program.methods + .instructionName(instructionData) + .accounts({}) + .signers([]) + .rpc(); +``` + +Note that certain account addresses don't need to be explicitly provided, as the +Anchor client can automatically resolve them. These typically include: + +- Common accounts (ex. the System Program) +- Accounts where the address is a PDA (Program Derived Address) + + + + +`.signers` - Optionally pass in an array of keypairs required as additional +signers by the instruction. This is commonly used when creating new accounts +where the account address is the public key of a newly generated keypair. + +```ts /signers/ {4} +await program.methods + .instructionName(instructionData) + .accounts({}) + .signers([]) + .rpc(); +``` + +Note that `.signers` should only be used when also using `.rpc()`. When using +`.transaction()` or `.instruction()`, signers should be added to the transaction +before sending. + + + + +Anchor provides multiple methods for building program instructions: + +{/* prettier-ignore */} + + + +The +[`rpc()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L283) +method +[sends a signed transaction](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/rpc.ts#L29) +with the specified instruction and returns a `TransactionSignature`. + +When using `.rpc`, the `Wallet` from the `Provider` is automatically included as +a signer. + +```ts {13} +// Generate keypair for the new account +const newAccountKp = new Keypair(); + +const data = new BN(42); +const transactionSignature = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + systemProgram: SystemProgram.programId, + }) + .signers([newAccountKp]) + .rpc(); +``` + + + + +The +[`transaction()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L382) +method +[builds a `Transaction`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/transaction.ts#L18-L26) +with the specified instruction without sending the transaction. + +```ts {12} /transaction()/1,2,4 +// Generate keypair for the new account +const newAccountKp = new Keypair(); + +const data = new BN(42); +const transaction = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + systemProgram: SystemProgram.programId, + }) + .transaction(); + +const transactionSignature = await connection.sendTransaction(transaction, [ + wallet.payer, + newAccountKp, +]); +``` + + + + +The +[`instruction()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L348) +method +[builds a `TransactionInstruction`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/instruction.ts#L57-L61) +using the specified instruction. This is useful if you want to manually add the +instruction to a transaction and combine it with other instructions. + +```ts {12} /instruction()/ +// Generate keypair for the new account +const newAccountKp = new Keypair(); + +const data = new BN(42); +const instruction = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + systemProgram: SystemProgram.programId, + }) + .instruction(); + +const transaction = new Transaction().add(instruction); + +const transactionSignature = await connection.sendTransaction(transaction, [ + wallet.payer, + newAccountKp, +]); +``` + + + + +## Fetch Accounts + +The `Program` client simplifies the process of fetching and deserializing +accounts created by your Anchor program. + +Use `program.account` followed by the name of the account type defined in the +IDL. Anchor provides multiple methods for fetching accounts. + +{/* prettier-ignore */} + + + +Use +[`all()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/account.ts#L251) +to fetch all existing accounts for a specific account type. + +```ts /all/ +const accounts = await program.account.newAccount.all(); +``` + + + + +Use `memcmp` (memory compare) to filter for account data that matches a specific +value at a specific offset. Using `memcmp` requires you to understand the byte +layout of the data field for the account type you are fetching. + +When calculating the offset, remember that the first 8 bytes in accounts created +by an Anchor program are reserved for the account discriminator. + +```ts /memcmp/ +const accounts = await program.account.newAccount.all([ + { + memcmp: { + offset: 8, + bytes: "", + }, + }, +]); +``` + + + + +Use +[`fetch()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/account.ts#L165) +to fetch the account data for a single account + +```ts /fetch/ +const account = await program.account.newAccount.fetch(ACCOUNT_ADDRESS); +``` + + + + +Use +[`fetchMultiple()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/account.ts#L200) +to fetch the account data for multiple accounts by passing in an array of +account addresses + +```ts /fetchMultiple/ +const accounts = await program.account.newAccount.fetchMultiple([ + ACCOUNT_ADDRESS_ONE, + ACCOUNT_ADDRESS_TWO, +]); +``` + + + diff --git a/content/docs/programs/anchor/cpi.mdx b/content/docs/programs/anchor/cpi.mdx new file mode 100644 index 000000000..d49a23f30 --- /dev/null +++ b/content/docs/programs/anchor/cpi.mdx @@ -0,0 +1,549 @@ +--- +title: CPIs with Anchor +description: + Learn how to implement Cross Program Invocations (CPIs) in Anchor programs, + enabling interaction between different programs on Solana +--- + +[Cross Program Invocations (CPI)](/docs/core/cpi) refer to the process of one +program invoking instructions of another program, which enables the +composability of programs on Solana. + +This section will cover the basics of implementing CPIs in an Anchor program, +using a simple SOL transfer instruction as a practical example. Once you +understand the basics of how to implement a CPI, you can apply the same concepts +for any instruction. + +## Cross Program Invocations + +Let's examine a program that implements a CPI to the System Program's transfer +instruction. Here is the example program on +[Solana Playground](https://beta.solpg.io/66df2751cffcf4b13384d35a). + +The `lib.rs` file includes a single `sol_transfer` instruction. When the +`sol_transfer` instruction on the Anchor program is invoked, the program +internally invokes the transfer instruction of the System Program. + +```rs title="lib.rs" /sol_transfer/ /transfer/ {23} +use anchor_lang::prelude::*; +use anchor_lang::system_program::{transfer, Transfer}; + +declare_id!("9AvUNHjxscdkiKQ8tUn12QCMXtcnbR9BVGq3ULNzFMRi"); + +#[program] +pub mod cpi { + use super::*; + + pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ); + + transfer(cpi_context, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account(mut)] + sender: Signer<'info>, + #[account(mut)] + recipient: SystemAccount<'info>, + system_program: Program<'info, System>, +} +``` + +The `cpi.test.ts` file shows how to invoke the Anchor program's `sol_transfer` +instruction and logs a link to the transaction details on SolanaFM. + +```ts title="cpi.test.ts" +it("SOL Transfer Anchor", async () => { + const transactionSignature = await program.methods + .solTransfer(new BN(transferAmount)) + .accounts({ + sender: sender.publicKey, + recipient: recipient.publicKey, + }) + .rpc(); + + console.log( + `\nTransaction Signature:` + + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + +You can build, deploy, and run the test for this example on Playground to view +the transaction details on the [SolanaFM explorer](https://solana.fm/). + +The transaction details will show that the Anchor program was first invoked +(instruction 1), which then invokes the System Program (instruction 1.1), +resulting in a successful SOL transfer. + +![Transaction Details](/assets/docs/core/cpi/transaction-details.png) + +### Example 1 Explanation + +Implementing a CPI follows the same pattern as building an instruction to add to +a transaction. When implementing a CPI, we must specify the program ID, +accounts, and instruction data for the instruction being called. + +The System Program's transfer instruction requires two accounts: + +- `from`: The account sending SOL. +- `to`: The account receiving SOL. + +In the example program, the `SolTransfer` struct specifies the accounts required +by the transfer instruction. The System Program is also included because the CPI +invokes the System Program. + +```rust /sender/ /recipient/ /system_program/ +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account(mut)] + sender: Signer<'info>, // from account + #[account(mut)] + recipient: SystemAccount<'info>, // to account + system_program: Program<'info, System>, // program ID +} +``` + +The following tabs present three approaches to implementing Cross Program +Invocations (CPIs), each at a different level of abstraction. All examples are +functionally equivalent. The main purpose is to illustrate the implementation +details of the CPI. + +{/* prettier-ignore */} + + + +The `sol_transfer` instruction included in the example code shows a typical +approach for constructing CPIs using the Anchor framework. + +This approach involves creating a +[`CpiContext`](https://docs.rs/anchor-lang/latest/anchor_lang/context/struct.CpiContext.html), +which includes the `program_id` and accounts required for the instruction being +called, followed by a helper function (`transfer`) to invoke a specific +instruction. + +```rust +use anchor_lang::system_program::{transfer, Transfer}; +``` + +```rust /cpi_context/ {14} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ); + + transfer(cpi_context, amount)?; + Ok(()) +} +``` + +The `cpi_context` variable specifies the program ID (System Program) and +accounts (sender and recipient) required by the transfer instruction. + +```rust /program_id/ /from_pubkey/ /to_pubkey/ +let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, +); +``` + +The `cpi_context` and `amount` are then passed into the `transfer` function to +execute the CPI invoking the transfer instruction of the System Program. + +```rust +transfer(cpi_context, amount)?; +``` + + + + +This example shows a different approach to implementing a CPI using the `invoke` +function and +[`system_instruction::transfer`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/system_instruction.rs#L881), +which is generally seen in native Rust programs. + +Under the hood, the previous example is an abstraction of this implementation. +The example below is functionally equivalent to the previous example. + +```rust +use anchor_lang::solana_program::{program::invoke, system_instruction}; +``` + +```rust /instruction/1,3 {9} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let instruction = + &system_instruction::transfer(&from_pubkey.key(), &to_pubkey.key(), amount); + + invoke(instruction, &[from_pubkey, to_pubkey, program_id])?; + Ok(()) +} +``` + + + + +You can also manually build the instruction to pass into the `invoke()` +function. This is useful when there is no crate available to help build the +instruction you want to invoke. This approach requires you to specify the +`AccountMeta`s for the instruction and correctly create the instruction data +buffer. + +The `sol_transfer` instruction below is a manual implementation of a CPI to the +System Program's transfer instruction. + +```rust /instruction/10,13 {28} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + // Prepare instruction AccountMetas + let account_metas = vec![ + AccountMeta::new(from_pubkey.key(), true), + AccountMeta::new(to_pubkey.key(), false), + ]; + + // SOL transfer instruction discriminator + let instruction_discriminator: u32 = 2; + + // Prepare instruction data + let mut instruction_data = Vec::with_capacity(4 + 8); + instruction_data.extend_from_slice(&instruction_discriminator.to_le_bytes()); + instruction_data.extend_from_slice(&amount.to_le_bytes()); + + // Create instruction + let instruction = Instruction { + program_id: program_id.key(), + accounts: account_metas, + data: instruction_data, + }; + + // Invoke instruction + invoke(&instruction, &[from_pubkey, to_pubkey, program_id])?; + Ok(()) +} +``` + +The `sol_transfer` instruction above replicates this +[example](/docs/core/transactions#manual-sol-transfer) of manually building a +SOL transfer instruction. It follows the same pattern as building an +[instruction](/docs/core/transactions#instruction) to add to a transaction. + +When building an instruction in Rust, use the following syntax to specify the +`AccountMeta` for each account: + +```rust +AccountMeta::new(account1_pubkey, true), // writable, signer +AccountMeta::new(account2_pubkey, false), // writable, not signer +AccountMeta::new_readonly(account3_pubkey, false), // not writable, not signer +AccountMeta::new_readonly(account4_pubkey, true), // writable, signer +``` + + + + +Here is a reference program on +[Solana Playground](https://beta.solpg.io/github.com/ZYJLiu/doc-examples/tree/main/cpi) +which includes all 3 examples. + +## Cross Program Invocations with PDA Signers + +Next, let's examine a program that implements a CPI to the System Program's +transfer instruction where the sender is a Program Derived Address (PDA) that +must be "signed" for by the program. Here is the example program on +[Solana Playground](https://beta.solpg.io/66df2bd2cffcf4b13384d35b). + +The `lib.rs` file includes the following program with a single `sol_transfer` +instruction. + +```rust title="lib.rs" +use anchor_lang::prelude::*; +use anchor_lang::system_program::{transfer, Transfer}; + +declare_id!("3455LkCS85a4aYmSeNbRrJsduNQfYRY82A7eCD3yQfyR"); + +#[program] +pub mod cpi { + use super::*; + + pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.pda_account.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let seed = to_pubkey.key(); + let bump_seed = ctx.bumps.pda_account; + let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ) + .with_signer(signer_seeds); + + transfer(cpi_context, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account( + mut, + seeds = [b"pda", recipient.key().as_ref()], + bump, + )] + pda_account: SystemAccount<'info>, + #[account(mut)] + recipient: SystemAccount<'info>, + system_program: Program<'info, System>, +} +``` + +The `cpi.test.ts` file shows how to invoke the Anchor program's `sol_transfer` +instruction and logs a link to the transaction details on SolanaFM. + +It shows how to derive the PDA using the seeds specified in the program: + +```ts /pda/ /wallet.publicKey/ +const [PDA] = PublicKey.findProgramAddressSync( + [Buffer.from("pda"), wallet.publicKey.toBuffer()], + program.programId, +); +``` + +The first step in this example is to fund the PDA account with a basic SOL +transfer from the Playground wallet. + +```ts title="cpi.test.ts" +it("Fund PDA with SOL", async () => { + const transferInstruction = SystemProgram.transfer({ + fromPubkey: wallet.publicKey, + toPubkey: PDA, + lamports: transferAmount, + }); + + const transaction = new Transaction().add(transferInstruction); + + const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [wallet.payer], // signer + ); + + console.log( + `\nTransaction Signature:` + + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + +Once the PDA is funded with SOL, invoke the `sol_transfer` instruction. This +instruction transfers SOL from the PDA account back to the `wallet` account via +a CPI to the System Program, which is "signed" for by the program. + +```ts +it("SOL Transfer with PDA signer", async () => { + const transactionSignature = await program.methods + .solTransfer(new BN(transferAmount)) + .accounts({ + pdaAccount: PDA, + recipient: wallet.publicKey, + }) + .rpc(); + + console.log( + `\nTransaction Signature: https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + +You can build, deploy, and run the test to view the transaction details on the +[SolanaFM explorer](https://solana.fm/). + +The transaction details will show that the custom program was first invoked +(instruction 1), which then invokes the System Program (instruction 1.1), +resulting in a successful SOL transfer. + +![Transaction Details](/assets/docs/core/cpi/transaction-details-pda.png) + +### Example 2 Explanation + +In the example code, the `SolTransfer` struct specifies the accounts required by +the transfer instruction. + +The sender is a PDA that the program must sign for. The `seeds` to derive the +address for the `pda_account` include the hardcoded string "pda" and the address +of the `recipient` account. This means the address for the `pda_account` is +unique for each `recipient`. + +```rust /pda_account/ /recipient/2 /system_program/ +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account( + mut, + seeds = [b"pda", recipient.key().as_ref()], + bump, + )] + pda_account: SystemAccount<'info>, + #[account(mut)] + recipient: SystemAccount<'info>, + system_program: Program<'info, System>, +} +``` + +The Javascript equivalent to derive the PDA is included in the test file. + +```ts /pda/ /wallet.publicKey/ +const [PDA] = PublicKey.findProgramAddressSync( + [Buffer.from("pda"), wallet.publicKey.toBuffer()], + program.programId, +); +``` + +The following tabs present two approaches to implementing Cross Program +Invocations (CPIs), each at a different level of abstraction. Both examples are +functionally equivalent. The main purpose is to illustrate the implementation +details of the CPI. + +{/* prettier-ignore */} + + + +The `sol_transfer` instruction included in the example code shows a typical +approach for constructing CPIs using the Anchor framework. + +This approach involves creating a +[`CpiContext`](https://docs.rs/anchor-lang/latest/anchor_lang/context/struct.CpiContext.html), +which includes the `program_id` and accounts required for the instruction being +called, followed by a helper function (`transfer`) to invoke a specific +instruction. + +```rust /cpi_context/ {19} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.pda_account.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let seed = to_pubkey.key(); + let bump_seed = ctx.bumps.pda_account; + let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ) + .with_signer(signer_seeds); + + transfer(cpi_context, amount)?; + Ok(()) +} +``` + +When signing with PDAs, the seeds and bump seed are included in the +`cpi_context` as `signer_seeds` using `with_signer()`. The bump seed for a PDA +can be accessed using `ctx.bumps` followed by the name of the PDA account. + +```rust /signer_seeds/ /bump_seed/ {3} +let seed = to_pubkey.key(); +let bump_seed = ctx.bumps.pda_account; +let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + +let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, +) +.with_signer(signer_seeds); +``` + +The `cpi_context` and `amount` are then passed into the `transfer` function to +execute the CPI. + +```rust +transfer(cpi_context, amount)?; +``` + +When the CPI is processed, the Solana runtime will validate that the provided +seeds and caller program ID derive a valid PDA. The PDA is then added as a +signer on the invocation. This mechanism allows for programs to sign for PDAs +that are derived from their program ID. + + + + +Under the hood, the previous example is a wrapper around the `invoke_signed()` +function which uses +[`system_instruction::transfer`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/system_instruction.rs#L881) +to build the instruction. + +This example shows how to use the `invoke_signed()` function to make a CPI +signed for by a PDA. + +```rust +use anchor_lang::solana_program::{program::invoke_signed, system_instruction}; +``` + +```rust /instruction/1,3 {13} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.pda_account.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let seed = to_pubkey.key(); + let bump_seed = ctx.bumps.pda_account; + let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + + let instruction = + &system_instruction::transfer(&from_pubkey.key(), &to_pubkey.key(), amount); + + invoke_signed(instruction, &[from_pubkey, to_pubkey, program_id], signer_seeds)?; + Ok(()) +} +``` + +This implementation is functionally equivalent to the previous example. The +`signer_seeds` are passed into the `invoke_signed` function. + + + + +Here is a reference program on +[Solana Playground](https://beta.solpg.io/github.com/ZYJLiu/doc-examples/tree/main/cpi-pda) +which includes both examples. diff --git a/content/docs/programs/anchor/idl.mdx b/content/docs/programs/anchor/idl.mdx new file mode 100644 index 000000000..d5a910c5b --- /dev/null +++ b/content/docs/programs/anchor/idl.mdx @@ -0,0 +1,514 @@ +--- +title: IDL File +description: + Learn about the Interface Definition Language (IDL) file in Anchor, its + purpose, benefits, and how it simplifies program-client interactions +--- + +An Interface Definition Language (IDL) file provides a standardized JSON file +describing the program's instructions and accounts. This file simplifies the +process of integrating your on-chain program with client applications. + +Key Benefits of the IDL: + +- Standardization: Provides a consistent format for describing the program's + instructions and accounts +- Client Generation: Used to generate client code to interact with the program + +The `anchor build` command generates an IDL file located at +`/target/idl/.json`. + +The code snippets below highlights how the program, IDL, and client relate to +each other. + +## Program Instructions + +The `instructions` array in the IDL corresponds directly to the instructions +defined in your program. It specifies the required accounts and parameters for +each instruction. + +{/* prettier-ignore */} + + + +The program below includes an `initialize` instruction, specifying the accounts +and parameters it requires. + +```rust {8-12, 15-22} +use anchor_lang::prelude::*; + +declare_id!("BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + + + + +The generated IDL file includes the instruction in a standardized JSON format, +including its name, accounts, arguments, and discriminator. + +```json title="JSON" {11-12, 14-27, 30-33} +{ + "address": "BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd", + "metadata": { + "name": "hello_anchor", + "version": "0.1.0", + "spec": "0.1.0", + "description": "Created with Anchor" + }, + "instructions": [ + { + "name": "initialize", + "discriminator": [175, 175, 109, 31, 13, 152, 155, 237], + "accounts": [ + { + "name": "new_account", + "writable": true, + "signer": true + }, + { + "name": "signer", + "writable": true, + "signer": true + }, + { + "name": "system_program", + "address": "11111111111111111111111111111111" + } + ], + "args": [ + { + "name": "data", + "type": "u64" + } + ] + } + ], + "accounts": [ + { + "name": "NewAccount", + "discriminator": [176, 95, 4, 118, 91, 177, 125, 232] + } + ], + "types": [ + { + "name": "NewAccount", + "type": { + "kind": "struct", + "fields": [ + { + "name": "data", + "type": "u64" + } + ] + } + } + ] +} +``` + + + + +The IDL file is then used to generate a client for interacting with the program, +simplifying the process of invoking the program instruction. + +```ts {19-26} +import * as anchor from "@coral-xyz/anchor"; +import { Program, BN } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; +import { Keypair } from "@solana/web3.js"; +import assert from "assert"; + +describe("hello_anchor", () => { + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + const wallet = provider.wallet as anchor.Wallet; + const program = anchor.workspace.HelloAnchor as Program; + + it("initialize", async () => { + // Generate keypair for the new account + const newAccountKp = new Keypair(); + + // Send transaction + const data = new BN(42); + const transactionSignature = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + }) + .signers([newAccountKp]) + .rpc(); + + // Fetch the created account + const newAccount = await program.account.newAccount.fetch( + newAccountKp.publicKey, + ); + + console.log("Transaction signature: ", transactionSignature); + console.log("On-chain data is:", newAccount.data.toString()); + assert(data.eq(newAccount.data)); + }); +}); +``` + + + + +## Program Accounts + +The `accounts` array in the IDL corresponds to the structs in a program +annotated with the `#[account]` macro. These structs define the data stored in +accounts created by the program. + +{/* prettier-ignore */} + + + +The program below defines a `NewAccount` struct with a single `data` field of +type `u64`. + +```rust {24-27} +use anchor_lang::prelude::*; + +declare_id!("BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + + + + +The generated IDL file includes the account in a standardized JSON format, +including its name, discriminator, and fields. + +```json title="JSON" {39-40, 45-54} +{ + "address": "BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd", + "metadata": { + "name": "hello_anchor", + "version": "0.1.0", + "spec": "0.1.0", + "description": "Created with Anchor" + }, + "instructions": [ + { + "name": "initialize", + "discriminator": [175, 175, 109, 31, 13, 152, 155, 237], + "accounts": [ + { + "name": "new_account", + "writable": true, + "signer": true + }, + { + "name": "signer", + "writable": true, + "signer": true + }, + { + "name": "system_program", + "address": "11111111111111111111111111111111" + } + ], + "args": [ + { + "name": "data", + "type": "u64" + } + ] + } + ], + "accounts": [ + { + "name": "NewAccount", + "discriminator": [176, 95, 4, 118, 91, 177, 125, 232] + } + ], + "types": [ + { + "name": "NewAccount", + "type": { + "kind": "struct", + "fields": [ + { + "name": "data", + "type": "u64" + } + ] + } + } + ] +} +``` + + + + +The IDL file is then used to generate a client for interacting with the program, +simplifying the process of fetching and deserializing account data. + +```ts {29-31} +import * as anchor from "@coral-xyz/anchor"; +import { Program, BN } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; +import { Keypair } from "@solana/web3.js"; +import assert from "assert"; + +describe("hello_anchor", () => { + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + const wallet = provider.wallet as anchor.Wallet; + const program = anchor.workspace.HelloAnchor as Program; + + it("initialize", async () => { + // Generate keypair for the new account + const newAccountKp = new Keypair(); + + // Send transaction + const data = new BN(42); + const transactionSignature = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + }) + .signers([newAccountKp]) + .rpc(); + + // Fetch the created account + const newAccount = await program.account.newAccount.fetch( + newAccountKp.publicKey, + ); + + console.log("Transaction signature: ", transactionSignature); + console.log("On-chain data is:", newAccount.data.toString()); + assert(data.eq(newAccount.data)); + }); +}); +``` + + + + +## Discriminators + +Anchor assigns a unique 8 byte discriminator to each instruction and account +type in a program. These discriminators serve as identifiers to distinguish +between different instructions or account types. + +The discriminator is generated using the first 8 bytes of the Sha256 hash of a +prefix combined with the instruction or account name. As of Anchor v0.30, these +discriminators are included in the IDL file. + +Note that when working with Anchor, you typically won't need to interact +directly with these discriminators. This section is primarily to provide context +on how the discriminator is generated and used. + +{/* prettier-ignore */} + + + +The instruction discriminator is used by the program to determine which specific +instruction to execute when called. + +When an Anchor program instruction is invoked, the discriminator is included as +the first 8 bytes of the instruction data. This is done automatically by the +Anchor client. + +```json title="IDL" {4} + "instructions": [ + { + "name": "initialize", + "discriminator": [175, 175, 109, 31, 13, 152, 155, 237], + ... + } + ] +``` + +The discriminator for an instruction is the first 8 bytes of the Sha256 hash of +the prefix `global` plus the instruction name. + +For example: + +``` +sha256("global:initialize") +``` + +Hexadecimal output: + +``` +af af 6d 1f 0d 98 9b ed d4 6a 95 07 32 81 ad c2 1b b5 e0 e1 d7 73 b2 fb bd 7a b5 04 cd d4 aa 30 +``` + +The first 8 bytes are used as the discriminator for the instruction. + +``` +af = 175 +af = 175 +6d = 109 +1f = 31 +0d = 13 +98 = 152 +9b = 155 +ed = 237 +``` + +You can find the implementation of the discriminator generation in the Anchor +codebase +[here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/syn/src/codegen/program/common.rs#L5-L19), +which is used +[here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/syn/src/codegen/program/instruction.rs#L27). + + + + +The account discriminator is used to identify the specific account type when +deserializing on-chain data and is set when the account is created. + +```json title="IDL" {4} + "accounts": [ + { + "name": "NewAccount", + "discriminator": [176, 95, 4, 118, 91, 177, 125, 232] + } + ] +``` + +The discriminator for an account is the first 8 bytes of the Sha256 hash of the +prefix `account` plus the account name. + +For example: + +``` +sha256("account:NewAccount") +``` + +Hexadecimal output: + +``` +b0 5f 04 76 5b b1 7d e8 a1 93 57 2a d3 5e b1 ae e5 f0 69 e2 09 7e 5c d2 64 56 55 2a cb 4a e9 57 +``` + +The first 8 bytes are used as the discriminator for the account. + +``` +b0 = 176 +5f = 95 +04 = 4 +76 = 118 +5b = 91 +b1 = 177 +7d = 125 +e8 = 232 +``` + +You can find the implementation of the discriminator generation in the Anchor +codebase +[here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L101-L117). + +Note that different programs using identical account names will generate the +same discriminator. When deserializing account data, Anchor programs will also +check an account is owned by the expected program for a specified account type. + + + + +The event discriminator is used to identify the specific event type when +deserializing on-chain data on event emission. + +```json title="IDL" {4} + "events": [ + { + "name": "NewEvent", + "discriminator": [113, 21, 185, 70, 164, 99, 232, 201] + } + ] +``` + +The discriminator for an event is the first 8 bytes of the Sha256 hash of the +prefix `event` plus the event name. + +For example: + +``` +sha256("event:NewEvent") +``` + +Hexadecimal output: + +``` +71 15 b9 46 a4 63 e8 c9 2a 3c 4d 83 87 16 cd 9b 66 28 cb e2 cb 7c 5d 70 59 f3 42 2b dc 35 03 53 +``` + +The first 8 bytes are used as the discriminator for the account. + +Hex to decimal gives us: + +``` +71 = 113 +15 = 21 +b9 = 185 +46 = 70 +a4 = 164 +63 = 99 +e8 = 232 +c9 = 201 +``` + +You can find the implementation of the discriminator generation in the Anchor +codebase +[here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/event/src/lib.rs#L23-L27). + +Note that different programs using identical event names will generate the same +discriminator. When deserializing event data, Anchor programs will also check an +event is owned by the expected program for a specified event type. + + + diff --git a/content/docs/programs/anchor/index.mdx b/content/docs/programs/anchor/index.mdx new file mode 100644 index 000000000..de46bffa2 --- /dev/null +++ b/content/docs/programs/anchor/index.mdx @@ -0,0 +1,402 @@ +--- +title: Anchor Framework +description: + Learn how to build Solana programs using the Anchor framework. This + comprehensive guide covers creating, building, testing, and deploying Solana + smart contracts with Anchor. +altRoutes: + - /docs/programs/debugging + - /docs/programs/lang-c + - /docs/programs/overview +h1: Getting Started with Anchor +--- + +The Anchor framework is a tool that simplifies the process of building Solana +programs. Whether you're new to blockchain development or an experienced +programmer, Anchor simplifies the process of writing, testing, and deploying +Solana programs. + +In this section, we'll walk through: + +- Creating a new Anchor project +- Building and testing your program +- Deploying to Solana clusters +- Understanding the project file structure + +## Prerequisites + +For detailed installation instructions, visit the +[installation](/docs/intro/installation) page. + +Before you begin, ensure you have the following installed: + +- Rust: The programming language for building Solana programs. +- Solana CLI: Command-line tool for Solana development. +- Anchor CLI: Command-line tool for the Anchor framework. + +To verify Anchor CLI installation, open your terminal and run: + +```shell title="Terminal" +anchor --version +``` + +Expected output: + +```shell title="Terminal" +anchor-cli 0.30.1 +``` + +## Getting Started + +This section covers the basic steps to create, build, and test your first local +Anchor program. + + + + + +### Create a new Project + +To start a new project, use the `anchor init` command followed by your project's +name. This command creates a new directory with the specified name and sets up a +default program and test file. + +```shell title="Terminal" +anchor init my-project +``` + +Navigate to the new project directory and open it in your code editor. + +```shell title="Terminal" copy +cd my-project +``` + +The default Anchor program is located at `programs/my-project/src/lib.rs`. + + + + +The value in the `declare_id!` macro is the program ID, a unique identifier for +your program. + +By default, it is the public key of the keypair generated in +`/target/deploy/my_project-keypair.json`. + +```rs title="lib.rs" +use anchor_lang::prelude::*; + +declare_id!("3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg"); + +#[program] +pub mod my_project { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + msg!("Greetings from: {:?}", ctx.program_id); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize {} +``` + + + + +The default Typescript test file is located at `/tests/my-project.ts`. + + + + +This file demonstrates how to invoke the default program's `initialize` +instruction in Typescript. + +```ts title="my-project.ts" +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { MyProject } from "../target/types/my_project"; + +describe("my-project", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + + const program = anchor.workspace.MyProject as Program; + + it("Is initialized!", async () => { + // Add your test here. + const tx = await program.methods.initialize().rpc(); + console.log("Your transaction signature", tx); + }); +}); +``` + + + + +If you prefer Rust for testing, initialize your project with the +`--test-template rust` flag. + +```shell +anchor init --test-template rust my-project +``` + +The Rust test file will be at `/tests/src/test_initialize.rs`. + + + + +```rust title="test_initialize.rs" +use std::str::FromStr; + +use anchor_client::{ + solana_sdk::{ + commitment_config::CommitmentConfig, pubkey::Pubkey, signature::read_keypair_file, + }, + Client, Cluster, +}; + +#[test] +fn test_initialize() { + let program_id = "3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg"; + let anchor_wallet = std::env::var("ANCHOR_WALLET").unwrap(); + let payer = read_keypair_file(&anchor_wallet).unwrap(); + + let client = Client::new_with_options(Cluster::Localnet, &payer, CommitmentConfig::confirmed()); + let program_id = Pubkey::from_str(program_id).unwrap(); + let program = client.program(program_id).unwrap(); + + let tx = program + .request() + .accounts(my_program::accounts::Initialize {}) + .args(my_program::instruction::Initialize {}) + .send() + .expect(""); + + println!("Your transaction signature {}", tx); +} +``` + + + + + + + +### Build the Program + +Build the program by running `anchor build`. + +```shell title="Terminal" copy +anchor build +``` + +The compiled program will be at `/target/deploy/my_project.so`. The content of +this file is what gets stored on the Solana network (as an executable account) +when you deploy your program. + + + + +### Test the Program + +To test the program, run `anchor test`. + +```shell title="Terminal" copy +anchor test +``` + +By default, the `Anchor.toml` config file specifies the `localnet` cluster. When +developing on `localnet`, `anchor test` will automatically: + +1. Start a local Solana validator +2. Build and deploy your program to the local cluster +3. Run the tests in the `tests` folder +4. Stop the local Solana validator + +Alternatively, you can manually start a local Solana validator and run tests +against it. This is useful if you want to keep the validator running while you +iterate on your program. It allows you to inspect accounts and transaction logs +on the [Solana Explorer](https://explorer.solana.com/?cluster=custom) while +developing locally. + +Open a new terminal and start a local Solana validator by running the +`solana-test-validator` command. + +```shell title="Terminal" copy +solana-test-validator +``` + +In a separate terminal, run the tests against the local cluster. Use the +`--skip-local-validator` flag to skip starting the local validator since it's +already running. + +```shell title="Terminal" copy +anchor test --skip-local-validator +``` + + + + +### Deploy to Devnet + +By default, the `Anchor.toml` config file in an Anchor project specifies the +localnet cluster. + +```toml title="Anchor.toml" {14} +[toolchain] + +[features] +resolution = true +skip-lint = false + +[programs.localnet] +my_program = "3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg" + +[registry] +url = "https://api.apr.dev" + +[provider] +cluster = "Localnet" +wallet = "~/.config/solana/id.json" + +[scripts] +test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts" +``` + +To deploy your program to devnet, change the `cluster` value to `Devnet`. Note +that this requires your wallet to have enough SOL on Devnet to cover deployment +cost. + +```diff +-cluster = "Localnet" ++cluster = "Devnet" +``` + +```toml title="Anchor.toml" +[provider] +cluster = "Devnet" +wallet = "~/.config/solana/id.json" +``` + +Now when you run `anchor deploy`, your program will be deployed to the devnet +cluster. The `anchor test` command will also use the cluster specified in the +`Anchor.toml` file. + +```shell +anchor deploy +``` + +To deploy to mainnet, simply update the `Anchor.toml` file to specify the +mainnet cluster. + +```toml title="Anchor.toml" +[provider] +cluster = "Mainnet" +wallet = "~/.config/solana/id.json" +``` + + + + +### Update the Program + +Solana programs can be updated by redeploying the program to the same program +ID. + +To update a program, simply make changes to your program's code and run the +`anchor build` command to generated an updated `.so` file. + +```shell +anchor build +``` + +Then run the `anchor deploy` command to redeploy the updated program. + +```shell +anchor deploy +``` + + + + +### Close the Program + +To reclaim the SOL allocated to a program account, you can close your Solana +program. + +To close a program, use the `solana program close ` command. For +example: + +```shell +solana program close 3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg --bypass-warning +``` + +Note that once a program is closed, the program ID cannot be reused to deploy a +new program. + + + + + +## Project File Structure + +Below is an overview of default file structure in an Anchor workspace: + +``` +. +├── .anchor +│ └── program-logs +├── app +├── migrations +├── programs +│ └── [project-name] +│ └── src +│ ├── lib.rs +│ ├── Cargo.toml +│ └── Xargo.toml +├── target +│ ├── deploy +│ │ └── [project-name]-keypair.json +│ ├── idl +│ │ └── [project-name].json +│ └── types +│ └── [project-name].ts +├── tests +│ └── [project-name].ts +├── Anchor.toml +├── Cargo.toml +└── package.json +``` + +### Programs Folder + +The `/programs` folder contains your project's Anchor programs. A single +workspace can contain multiple programs. + +### Tests Folder + +The `/tests` folder contains test files for your project. A default test file is +created for you when you create your project. + +### Target Folder + +The `/target` folder contains build outputs. The main subfolders include: + +- `/deploy`: Contains the keypair and program binary for your programs. +- `/idl`: Contains the JSON IDL for your programs. +- `/types`: Contains the TypeScript type for the IDL. + +### Anchor.toml File + +The `Anchor.toml` file configures workspace settings for your project. + +### .anchor Folder + +Includes a `program-logs` file that contains transaction logs from the last run +of test files. + +### App Folder + +The `/app` folder is an empty folder that can be optionally used for your +frontend code. diff --git a/content/docs/programs/anchor/meta.json b/content/docs/programs/anchor/meta.json new file mode 100644 index 000000000..8b849ba21 --- /dev/null +++ b/content/docs/programs/anchor/meta.json @@ -0,0 +1,4 @@ +{ + "title": "Anchor Framework", + "pages": ["program-structure", "idl", "client-typescript", "pda", "cpi"] +} diff --git a/content/docs/programs/anchor/pda.mdx b/content/docs/programs/anchor/pda.mdx new file mode 100644 index 000000000..1cd0ca84e --- /dev/null +++ b/content/docs/programs/anchor/pda.mdx @@ -0,0 +1,323 @@ +--- +title: PDAs with Anchor +description: + Learn how to use Program Derived Addresses (PDAs) in Anchor programs, using + constraints, and implementing common PDA patterns +--- + +[Program Derived Addresses (PDA)](/docs/core/pda) refer to a feature of Solana +development that allows you to create a unique address derived deterministically +from pre-defined inputs (seeds) and a program ID. + +This section will cover basic examples of how to use PDAs in an Anchor program. + +## Anchor PDA Constraints + +When using PDAs in an Anchor program, you generally use Anchor's account +constraints to define the seeds used to derive the PDA. These constraints serve +as security checks to ensure that the correct address is derived. + +The constraints used to define the PDA seeds include: + +- `seeds`: An array of optional seeds used to derive the PDA. Seeds can be + static values or dynamic references to account data. +- `bump`: The bump seed used to derive the PDA. Used to ensure the address falls + off the Ed25519 curve and is a valid PDA. +- `seeds::program` - (Optional) The program ID used to derive the PDA address. + This constraint is only used to derive a PDA where the program ID is not the + current program. + +The `seeds` and `bump` constraints are required to be used together. + +### Usage Examples + +Below are examples demonstrating how to use PDA constraints in an Anchor +program. + +{/* prettier-ignore */} + + + +The `seeds` constraint specifies the optional values used to derive the PDA. + +#### No Optional Seeds + +- Use an empty array `[]` to define a PDA without optional seeds. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +#### Single Static Seed + +- Specify optional seeds in the `seeds` constraint. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +#### Multiple Seeds and Account References + +- Multiple seeds can be specified in the `seeds` constraint. The `seeds` + constraint can also reference other account addresses or account data. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + pub signer: Signer<'info>, + #[account( + seeds = [b"hello_world", signer.key().as_ref()], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +The example above uses both a static seed (`b"hello_world"`) and a dynamic seed +(the signer's public key). + + + + +The `bump` constraint specifies the bump seed used to derive the PDA. + +#### Automatic Bump Calculation + +When using the `bump` constraint without a value, the bump is automatically +calculated each time the instruction is invoked. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +#### Specify Bump Value + +You can explicitly provide the bump value, which is useful for optimizing +compute unit usage. This assumes that the PDA account has been created and the +bump seed is stored as a field on an existing account. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump = pda_account.bump_seed, + )] + pub pda_account: Account<'info, CustomAccount>, +} + +#[account] +pub struct CustomAccount { + pub bump_seed: u8, +} +``` + +By storing the bump value in the account's data, the program doesn't need to +recalculate it, saving compute units. The saved bump value can be stored on the +account itself or another account. + + + + +The `seeds::program` constraint specifies the program ID used to derive the PDA. +This constraint is only used when deriving a PDA from a different program. + +Use this constraint when your instruction needs to interact with PDA accounts +created by another program. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump, + seeds::program = other_program.key(), + )] + pub pda_account: SystemAccount<'info>, + pub other_program: Program<'info, OtherProgram>, +} +``` + + + + +The `init` constraint is commonly used with `seeds` and `bump` to create a new +account with an address that is a PDA. Under the hood, the `init` constraint +invokes the System Program to create the account. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account(mut)] + pub signer: Signer<'info>, + #[account( + init, + seeds = [b"hello_world", signer.key().as_ref()], + bump, + payer = signer, + space = 8 + 1, + )] + pub pda_account: Account<'info, CustomAccount>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct CustomAccount { + pub bump_seed: u8, +} +``` + + + + +## PDA seeds in the IDL + +Program Derived Address (PDA) seeds defined in the `seeds` constraint are +included in the program's IDL file. This allows the Anchor client to +automatically resolve accounts using these seeds when constructing instructions. + +This example below shows the relationship between the program, IDL, and client. + +{/* prettier-ignore */} + + + +The program below defines a `pda_account` using a static seed (`b"hello_world"`) +and the signer's public key as a dynamic seed. + +```rs {18} /signer/ +use anchor_lang::prelude::*; + +declare_id!("BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5"); + +#[program] +mod hello_anchor { + use super::*; + pub fn test_instruction(ctx: Context) -> Result<()> { + msg!("PDA: {}", ctx.accounts.pda_account.key()); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + pub signer: Signer<'info>, + #[account( + seeds = [b"hello_world", signer.key().as_ref()], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + + + + +The program's IDL file includes the PDA seeds defined in the `seeds` constraint. + +- The static seed `b"hello_world"` is converted to byte values. +- The dynamic seed is included as reference to the signer account. + +```json {22-29} +{ + "address": "BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5", + "metadata": { + "name": "hello_anchor", + "version": "0.1.0", + "spec": "0.1.0", + "description": "Created with Anchor" + }, + "instructions": [ + { + "name": "test_instruction", + "discriminator": [33, 223, 61, 208, 32, 193, 201, 79], + "accounts": [ + { + "name": "signer", + "signer": true + }, + { + "name": "pda_account", + "pda": { + "seeds": [ + { + "kind": "const", + "value": [104, 101, 108, 108, 111, 95, 119, 111, 114, 108, 100] + }, + { + "kind": "account", + "path": "signer" + } + ] + } + } + ], + "args": [] + } + ] +} +``` + + + + +The Anchor client can automatically resolve the PDA address using the IDL file. + +In the example below, Anchor automatically resolves the PDA address using the +provider wallet as the signer, and its public key as the dynamic seed for PDA +derivation. This removes the need to explicitly derive the PDA when building the +instruction. + +```ts {13} +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; + +describe("hello_anchor", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + + const program = anchor.workspace.HelloAnchor as Program; + + it("Is initialized!", async () => { + // Add your test here. + const tx = await program.methods.testInstruction().rpc(); + console.log("Your transaction signature", tx); + }); +}); +``` + +When the instruction is invoked, the PDA is printed to program logs as defined +in the program instruction. + +```{3} +Program BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5 invoke [1] +Program log: Instruction: TestInstruction +Program log: PDA: 3Hikt5mpKaSS4UNA5Du1TZJ8tp4o8VC8YWW6X9vtfVnJ +Program BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5 consumed 18505 of 200000 compute units +Program BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5 success +``` + + + diff --git a/content/docs/programs/anchor/program-structure.mdx b/content/docs/programs/anchor/program-structure.mdx new file mode 100644 index 000000000..4c5342fd9 --- /dev/null +++ b/content/docs/programs/anchor/program-structure.mdx @@ -0,0 +1,398 @@ +--- +title: Program Structure +description: + Learn about the structure of Anchor programs, including key macros and their + roles in simplifying Solana program development +h1: Anchor Program Structure +--- + +The [Anchor framework](https://www.anchor-lang.com/) uses +[Rust macros](https://doc.rust-lang.org/book/ch19-06-macros.html) to reduce +boilerplate code and simplify the implementation of common security checks +required for writing Solana programs. + +The main macros found in an Anchor program include: + +- [`declare_id`](#declare-id-macro): Specifies the program's on-chain address +- [`#[program]`](#program-macro): Specifies the module containing the program’s + instruction logic +- [`#[derive(Accounts)]`](#derive-accounts-macro): Applied to structs to + indicate a list of accounts required by an instruction +- [`#[account]`](#account-macro): Applied to structs to create custom account + types for the program + +## Example Program + +Let's examine a simple program that demonstrates the usage of the macros +mentioned above to understand the basic structure of an Anchor program. + +The example program below creates a new account (`NewAccount`) that stores a +`u64` value passed to the `initialize` instruction. + +```rust title="lib.rs" +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +## declare_id! macro + +The +[`declare_id`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L430) +macro specifies the on-chain address of the program, known as the program ID. + +```rust title="lib.rs" {3} +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); +``` + +By default, the program ID is the public key of the keypair generated at +`/target/deploy/your_program_name.json`. + +To update the value of the program ID in the `declare_id` macro with the public +key of the keypair in the `/target/deploy/your_program_name.json` file, run the +following command: + +```shell title="Terminal" +anchor keys sync +``` + +The `anchor keys sync` command is useful to run when cloning a repository where +the value of the program ID in a cloned repo's `declare_id` macro won't match +the one generated when you run `anchor build` locally. + +## #[program] macro + +The +[`#[program]`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/program/src/lib.rs#L12) +macro defines the module that contains all the instruction handlers for your +program. Each public function within this module corresponds to an instruction +that can be invoked. + +```rust title="lib.rs" {5, 8-12} +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +### Instruction Context + +Instruction handlers are functions that define the logic executed when an +instruction is invoked. The first parameter of each handler is a `Context` +type, where `T` is a struct implementing the `Accounts` trait and specifies the +accounts the instruction requires. + +The +[`Context`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/src/context.rs#L24) +type provides the instruction with access to the following non-argument inputs: + +```rust +pub struct Context<'a, 'b, 'c, 'info, T> { + /// Currently executing program id. + pub program_id: &'a Pubkey, + /// Deserialized accounts. + pub accounts: &'b mut T, + /// Remaining accounts given but not deserialized or validated. + /// Be very careful when using this directly. + pub remaining_accounts: &'c [AccountInfo<'info>], + /// Bump seeds found during constraint validation. This is provided as a + /// convenience so that handlers don't have to recalculate bump seeds or + /// pass them in as arguments. + pub bumps: BTreeMap, +} +``` + +The `Context` fields can be accessed in an instruction using dot notation: + +- `ctx.accounts`: The accounts required for the instruction +- `ctx.program_id`: The program's public key (address) +- `ctx.remaining_accounts`: Additional accounts not specified in the `Accounts` + struct. +- `ctx.bumps`: Bump seeds for any + [Program Derived Address (PDA)](/docs/core/pda) accounts specified in the + `Accounts` struct + +Additional parameters are optional and can be included to specify arguments that +must be provided when the instruction is invoked. + +```rust title="lib.rs" /Context/ /data/1 +pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) +} +``` + +In this example, the `Initialize` struct implements the `Accounts` trait where +each field in the struct represents an account required by the `initialize` +instruction. + +```rust title="lib.rs" /Initialize/ /Accounts/ +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +## #[derive(Accounts)] macro + +The +[`#[derive(Accounts)]`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/derive/accounts/src/lib.rs#L630) +macro is applied to a struct to specify the accounts that must be provided when +an instruction is invoked. This macro implements the +[`Accounts`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/src/lib.rs#L105) +trait, which simplifies account validation and serialization and deserialization +of account data. + +```rust /Accounts/ {1} +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +Each field in the struct represents an account required by an instruction. The +naming of each field is arbitrary, but it is recommended to use a descriptive +name that indicates the purpose of the account. + +```rust /signer/2 /new_account/ /system_program/ +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +### Account Validation + +To prevent security vulnerabilities, it's important to verify that accounts +provided to an instruction are the expected accounts. Accounts are validated in +Anchor programs in two ways that are generally used together: + +- [Account Constraints](https://www.anchor-lang.com/docs/account-constraints): + Constraints define additional conditions that an account must satisfy to be + considered valid for the instruction. Constraints are applied using the + `#[account(..)]` attribute, which is placed above a field in a struct that + implements the `Accounts` trait. + + You can find the implementation of the constraints + [here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/syn/src/parser/accounts/constraints.rs). + + ```rust {3, 5} + #[derive(Accounts)] + pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, + } + ``` + +- [Account Types](https://www.anchor-lang.com/docs/account-types): Anchor + provides various account types to help ensure that the account provided by the + client matches what the program expects. + + You can find the implementation of the account types + [here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/src/accounts). + + ```rust /Account/2 /Signer/ /Program/ + #[derive(Accounts)] + pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, + } + ``` + +When an instruction in an Anchor program is invoked, the program first validates +the accounts provided before executing the instruction's logic. After +validation, these accounts can be accessed within the instruction using the +`ctx.accounts` syntax. + +```rust title="lib.rs" /ctx.accounts.new_account/ /new_account/ /Initialize/ +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +## #[account] macro + +The +[`#[account]`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L66) +macro is applied to structs that define the data stored in custom accounts +created by your program. + +```rust +#[account] +pub struct NewAccount { + data: u64, +} +``` + +This macro implements various traits +[detailed here](https://docs.rs/anchor-lang/latest/anchor_lang/attr.account.html). +The key functionalities of the `#[account]` macro include: + +- [Assign Program Owner](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L119-L132): + When creating an account, the program owner of the account is automatically + set to the program specified in `declare_id`. +- [Set Discriminator](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L101-L117): + A unique 8 byte discriminator, specific to the account type, is added as the + first 8 bytes of account data during its initialization. This helps in + differentiating account types and is used for account validation. +- [Data Serialization and Deserialization](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L202-L246): + Account data is automatically serialized and deserialized as the account type. + +```rust title="lib.rs" /data/2,6 /NewAccount/ {24-27} +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +### Account Discriminator + +An account discriminator in an Anchor program refers to an 8 byte identifier +unique to each account type. It's derived from the first 8 bytes of the SHA256 +hash of the string `account:`. This discriminator is stored as the +first 8 bytes of account data when an account is created. + +When creating an account in an Anchor program, 8 bytes must be allocated for the +discriminator. + +```rust /8/1 +#[account(init, payer = signer, space = 8 + 8)] +pub new_account: Account<'info, NewAccount>, +``` + +The discriminator is used during the following two scenarios: + +- Initialization: When an account is created, the discriminator is set as the + first 8 bytes of the account's data. +- Deserialization: When account data is deserialized, the first 8 bytes of + account data is checked against the discriminator of the expected account + type. + +If there's a mismatch, it indicates that the client has provided an unexpected +account. This mechanism serves as an account validation check in Anchor +programs. diff --git a/content/docs/programs/deploying.mdx b/content/docs/programs/deploying.mdx new file mode 100644 index 000000000..05212a7dd --- /dev/null +++ b/content/docs/programs/deploying.mdx @@ -0,0 +1,330 @@ +--- +title: "Deploying Programs" +description: + Deploying onchain programs can be done using the Solana CLI using the + Upgradable BPF loader to upload the compiled byte-code to the Solana + blockchain. +--- + +Solana programs are stored in "executable" accounts on the network. These +accounts contain the program's compiled bytecode that define the instructions +users invoke to interact with the program. + +## CLI Commands + +The section is intended as a reference for the basic CLI commands for building +and deploying Solana programs. For a step-by-step guide on creating your first +program, start with [Developing Programs in Rust](/docs/programs/rust). + +### Build Program + +To build your program, use the `cargo build-sbf` command. + +```shell +cargo build-sbf +``` + +This command will: + +1. Compile your program +2. Create a `target/deploy` directory +3. Generate a `.so` file, where `` matches your + program's name in `Cargo.toml` + +The output `.so` file contains your program's compiled bytecode that will be +stored in a Solana account when you deploy your program. + +### Deploy Program + +To deploy your program, use the `solana program deploy` command followed by the +path to the `.so` file created by the `cargo build-sbf` command. + +```shell +solana program deploy ./target/deploy/your_program.so +``` + +During times of congestion, there are a few additional flags you can use to help +with program deployment. + +- `--with-compute-unit-price`: Set compute unit price for transaction, in + increments of 0.000001 lamports (micro-lamports) per compute unit. +- `--max-sign-attempts`: Maximum number of attempts to sign or resign + transactions after blockhash expiration. If any transactions sent during the + program deploy are still unconfirmed after the initially chosen recent + blockhash expires, those transactions will be resigned with a new recent + blockhash and resent. Use this setting to adjust the maximum number of + transaction signing iterations. Each blockhash is valid for about 60 seconds, + which means using the default value of 5 will lead to sending transactions for + at least 5 minutes or until all transactions are confirmed,whichever comes + first. [default: 5] +- `--use-rpc`: Send write transactions to the configured RPC instead of + validator TPUs. This flag requires a stake-weighted RPC connection. + +You can use the flags individually or combine them together. For example: + +```shell +solana program deploy ./target/deploy/your_program.so --with-compute-unit-price 10000 --max-sign-attempts 1000 --use-rpc +``` + +- Use the + [Priority Fee API by Helius](https://docs.helius.dev/guides/priority-fee-api) + to get an estimate of the priority fee to set with the + `--with-compute-unit-price` flag. + +- Get a + [stake-weighted](/developers/guides/advanced/stake-weighted-qos) + RPC connection from [Helius](https://www.helius.dev/) or + [Triton](https://triton.one/) to use with the `--use-rpc` flag. The + `--use-rpc` flag should only be used with a stake-weighted RPC connection. + +To update your default RPC URL with a custom RPC endpoint, use the +`solana config set` command. + +```shell +solana config set --url +``` + +You can view the list of programs you've deployed using the `program show` +subcommand: + +```shell +solana program show --programs +``` + +Example output: + +``` +Program Id | Slot | Authority | Balance +2w3sK6CW7Hy1Ljnz2uqPrQsg4KjNZxD4bDerXDkSX3Q1 | 133132 | 4kh6HxYZiAebF8HWLsUWod2EaQQ6iWHpHYCz8UcmFbM1 | 0.57821592 SOL +``` + +### Update Program + +A program's update authority can modify an existing Solana program by deploying +a new `.so` file to the same program ID. + +To update an existing Solana program: + +- Make changes to your program source code +- Run `cargo build-sbf` to generate an updated `.so` file +- Run `solana program deploy ./target/deploy/your_program.so` to deploy the + updated `.so` file + +The update authority can be changed using the `set-upgrade-authority` subcommand +as follows: + +```shell +solana program set-upgrade-authority --new-upgrade-authority +``` + +### Immutable Program + +A program can be made immutable by removing its update authority. This is an +irreversible action. + +```shell +solana program set-upgrade-authority --final +``` + +You can specify that program should be immutable on deployment by setting the +`--final` flag when deploying the program. + +```shell +solana program deploy ./target/deploy/your_program.so --final +``` + +### Close Program + +You can close your Solana program to reclaim the SOL allocated to the account. +Closing a program is irreversible, so it should be done with caution. To close a +program, use the `program close` subcommand. For example: + +```shell title="Terminal" +solana program close 4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz +--bypass-warning +``` + +Example output: + +``` +Closed Program Id 4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz, 0.1350588 SOL +reclaimed +``` + +Note that once a program is closed, its program ID cannot be reused. Attempting +to deploy a program with a previously closed program ID will result in an error. + +``` +Error: Program 4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz has been closed, use +a new Program Id +``` + +If you need to redeploy a program after closing it, you must generate a new +program ID. To generate a new keypair for the program, run the following +command: + +```shell title="Terminal" +solana-keygen new -o ./target/deploy/your_program-keypair.json --force +``` + +Alternatively, you can delete the existing keypair file and run +`cargo build-sbf` again, which will generate a new keypair file. + +### Program Buffer Accounts + +Deploying a program requires multiple transactions due to the 1232 byte limit +for transactions on Solana. An intermediate step of the deploy process involves +writing the program's byte-code to temporary "buffer account". + +This buffer account is automatically closed after successful program deployment. +However, if the deployment fails, the buffer account remains and you can either: + +- Continue the deployment using the existing buffer account +- Close the buffer account to reclaim the allocated SOL (rent) + +You can check if you have any open buffer accounts by using the `program show` +subcommand as follows: + +```shell +solana program show --buffers +``` + +Example output: + +``` +Buffer Address | Authority | Balance +5TRm1DxYcXLbSEbbxWcQbEUCce7L4tVgaC6e2V4G82pM | 4kh6HxYZiAebF8HWLsUWod2EaQQ6iWHpHYCz8UcmFbM1 | 0.57821592 SOL +``` + +You can continue to the deployment using the `program deploy` subcommand as +follows: + +```shell +solana program deploy --buffer 5TRm1DxYcXLbSEbbxWcQbEUCce7L4tVgaC6e2V4G82pM +``` + +Expected output on successful deployment: + +``` +Program Id: 2w3sK6CW7Hy1Ljnz2uqPrQsg4KjNZxD4bDerXDkSX3Q1 + +Signature: 3fsttJFskUmvbdL5F9y8g43rgNea5tYZeVXbimfx2Up5viJnYehWe3yx45rQJc8Kjkr6nY8D4DP4V2eiSPqvWRNL +``` + +To close buffer accounts, use the `program close` subcommand as follows: + +```shell +solana program close --buffers +``` + +### ELF Dump + +The SBF shared object internals can be dumped to a text file to gain more +insight into a program's composition and what it may be doing at runtime. The +dump will contain both the ELF information as well as a list of all the symbols +and the instructions that implement them. Some of the BPF loader's error log +messages will reference specific instruction numbers where the error occurred. +These references can be looked up in the ELF dump to identify the offending +instruction and its context. + +```shell +cargo build-bpf --dump +``` + +The file will be output to `/target/deploy/your_program-dump.txt`. + +## Program Deployment Process + +Deploying a program on Solana requires multiple transactions, due to the max +size limit of 1232 bytes for Solana transactions. The Solana CLI sends these +transactions with the `solana program deploy` subcommand. The process can be +broken down into the following 3 phases: + +1. [Buffer initialization](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L2113): + First, the CLI sends a transaction which + [creates a buffer account](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L1903) + large enough for the byte-code being deployed. It also invokes the + [initialize buffer instruction](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L320) + to set the buffer authority to restrict writes to the deployer's chosen + address. +2. [Buffer writes](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L2129): + Once the buffer account is initialized, the CLI + [breaks up the program byte-code](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L1940) + into ~1KB chunks and + [sends transactions at a rate of 100 transactions per second](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/client/src/tpu_client.rs#L133) + to write each chunk with + [the write buffer instruction](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L334). + These transactions are sent directly to the current leader's transaction + processing (TPU) port and are processed in parallel with each other. Once all + transactions have been sent, the CLI + [polls the RPC API with batches of transaction signatures](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/client/src/tpu_client.rs#L216) + to ensure that every write was successful and confirmed. +3. [Finalization](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L1807): + Once writes are completed, the CLI + [sends a final transaction](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/cli/src/program.rs#L2150) + to either + [deploy a new program](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L362) + or + [upgrade an existing program](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/programs/bpf_loader/src/lib.rs#L513). + In either case, the byte-code written to the buffer account will be copied + into a program data account and verified. + +## Upgradeable BPF Loader Program + +The BPF loader program is the program that "owns" all executable accounts on +Solana. When you deploy a program, the owner of the program account is set to +the the BPF loader program. + +### State accounts + +The Upgradeable BPF loader program supports three different types of state +accounts: + +1. [Program account](https://github.com/solana-labs/solana/blob/master/sdk/program/src/bpf_loader_upgradeable.rs#L34): + This is the main account of an on-chain program and its address is commonly + referred to as a "program id." Program id's are what transaction instructions + reference in order to invoke a program. Program accounts are immutable once + deployed, so you can think of them as a proxy account to the byte-code and + state stored in other accounts. +2. [Program data account](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/bpf_loader_upgradeable.rs#L39): + This account is what stores the executable byte-code of an on-chain program. + When a program is upgraded, this account's data is updated with new + byte-code. In addition to byte-code, program data accounts are also + responsible for storing the slot when it was last modified and the address of + the sole account authorized to modify the account (this address can be + cleared to make a program immutable). +3. [Buffer accounts](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/bpf_loader_upgradeable.rs#L27): + These accounts temporarily store byte-code while a program is being actively + deployed through a series of transactions. They also each store the address + of the sole account which is authorized to do writes. + +### Instructions + +The state accounts listed above can only be modified with one of the following +instructions supported by the Upgradeable BPF Loader program: + +1. [Initialize buffer](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L21): + Creates a buffer account and stores an authority address which is allowed to + modify the buffer. +2. [Write](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L28): + Writes byte-code at a specified byte offset inside a buffer account. Writes + are processed in small chunks due to a limitation of Solana transactions + having a maximum serialized size of 1232 bytes. +3. [Deploy](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L77): + Creates both a program account and a program data account. It fills the + program data account by copying the byte-code stored in a buffer account. If + the byte-code is valid, the program account will be set as executable, + allowing it to be invoked. If the byte-code is invalid, the instruction will + fail and all changes are reverted. +4. [Upgrade](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L102): + Fills an existing program data account by copying executable byte-code from a + buffer account. Similar to the deploy instruction, it will only succeed if + the byte-code is valid. +5. [Set authority](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L114): + Updates the authority of a program data or buffer account if the account's + current authority has signed the transaction being processed. If the + authority is deleted without replacement, it can never be set to a new + address and the account can never be closed. +6. [Close](https://github.com/solana-labs/solana/blob/7409d9d2687fba21078a745842c25df805cdf105/sdk/program/src/loader_upgradeable_instruction.rs#L127): + Clears the data of a program data account or buffer account and reclaims the + SOL used for the rent exemption deposit. diff --git a/content/docs/programs/examples.mdx b/content/docs/programs/examples.mdx new file mode 100644 index 000000000..74a942731 --- /dev/null +++ b/content/docs/programs/examples.mdx @@ -0,0 +1,156 @@ +--- +title: "Program Examples" +description: + "A list of Solana program examples in different languages and frameworks, + which can help you learn and use as reference for your own projects." +tags: + - quickstart + - program + - anchor + - javascript + - native + - rust + - token22 + - token extensions +keywords: + - rust + - cargo + - toml + - program + - tutorial + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer + - anchor +--- + +The +[Solana Program Examples](https://github.com/solana-developers/program-examples) +repository on GitHub offers several subfolders, each containing code examples +for different Solana programming paradigms and languages, designed to help +developers learn and experiment with Solana blockchain development. + +You can find the examples in the `solana-developers/program-examples` together +with README files that explain you how to run the different examples. Most +examples are self-contained and are available in native Rust (ie, with no +framework) and [Anchor](https://www.anchor-lang.com/docs/installation). It also +contains a list of examples that we would love to +[see as contributions](https://github.com/solana-developers/program-examples?tab=readme-ov-file#examples-wed-love-to-see). + +Within the repo you will find the following subfolder, each with assorted +example programs within them: + +- [Basics](#basics) +- [Compression](#compression) +- [Oracles](#oracles) +- [Tokens](#tokens) +- [Token 2022 (Token Extensions)](#token-2022-token-extensions) +- [Break](#break) + - [Build and Run](#build-and-run) + +## Basics + +Contains a series of examples that demonstrate the foundational steps for +building Solana programs using native Rust libraries. These examples are +designed to help developers understand the core concepts of Solana programming. + +| Example Name | Description | Language | +| ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ------------------------- | +| [Account Data](https://github.com/solana-developers/program-examples/tree/main/basics/account-data) | Saving an address with name, house number, street and city in an account. | Native, Anchor | +| [Checking Accounts](https://github.com/solana-developers/program-examples/tree/main/basics/checking-accounts) | Security lessons that shows how to do account checks | Native, Anchor | +| [Close Account](https://github.com/solana-developers/program-examples/tree/main/basics/close-account) | Show you how to close accounts to get its rent back. | Native, Anchor | +| [Counter](https://github.com/solana-developers/program-examples/tree/main/basics/counter) | A simple counter program in all the different architectures. | Native, Anchor, mpl-stack | +| [Create Account](https://github.com/solana-developers/program-examples/tree/main/basics/create-account) | How to create a system account within a program. | Native, Anchor | +| [Cross Program Invocation](https://github.com/solana-developers/program-examples/tree/main/basics/cross-program-invocation) | Using a hand and lever analogy this shows you how to call another program from within a program. | Native, Anchor | +| [hello solana](https://github.com/solana-developers/program-examples/tree/main/basics/hello-solana) | Hello world example which just prints hello world in the transaction logs. | Native, Anchor | +| [Pda Rent payer](https://github.com/solana-developers/program-examples/tree/main/basics/pda-rent-payer) | Shows you how you can use the lamports from a PDA to pay for a new account. | Native, Anchor | +| [Processing Instructions](https://github.com/solana-developers/program-examples/tree/main/basics/processing-instructions) | Shows you how to handle instruction data string and u32. | Native, Anchor | +| [Program Derived Addresses](https://github.com/solana-developers/program-examples/tree/main/basics/program-derived-addresses) | Shows how to use seeds to refer to a PDA and save data in it. | Native, Anchor | +| [Realloc](https://github.com/solana-developers/program-examples/tree/main/basics/realloc) | Shows you how to increase and decrease the size of an existing account. | Native, Anchor | +| [Rent](https://github.com/solana-developers/program-examples/tree/main/basics/rent) | Here you will learn how to calculate rent requirements within a program. | Native, Anchor | +| [Repository Layout](https://github.com/solana-developers/program-examples/tree/main/basics/repository-layout) | Recommendations on how to structure your program layout. | Native, Anchor | +| [Transfer SOL](https://github.com/solana-developers/program-examples/tree/main/basics/transfer-sol) | Different methods of transferring SOL for system accounts and PDAs. | Native, Anchor, Seahorse | + +## Compression + +Contains a series of examples that demonstrate how to use +[state compression](/docs/advanced/state-compression) on Solana. Mainly +focused on compressed NFTs (cNFTs). + +| Example Name | Description | Language | +| ----------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------- | -------- | +| [cNFT-burn](https://github.com/solana-developers/program-examples/tree/main/compression/cnft-burn) | To destroy a cNFT it can be burnt. This examples shows how to do that in a program. | Anchor | +| [cNFT-Vault](https://github.com/solana-developers/program-examples/tree/main/compression/cnft-vault/anchor) | How to custody a cNFT in a program and send it out again. | Anchor | +| [cutils](https://github.com/solana-developers/program-examples/tree/main/compression/cutils) | A suite utils to for example mint and verify cNFTs in a program. | Anchor | + +## Oracles + +Oracles allow to use off chain data in programs. + +| Example Name | Description | Language | +| ------------------------------------------------------------------------------------ | --------------------------------------------------------------- | -------- | +| [Pyth](https://github.com/solana-developers/program-examples/tree/main/oracles/pyth) | Pyth makes price data of tokens available in on chain programs. | Anchor | + +## Tokens + +Most tokens on Solana use the Solana Program Library (SPL) token standard. Here +you can find many examples on how to mint, transfer, burn tokens and even how to +interact with them in programs. + +| Example Name | Description | Language | +| --------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | -------------- | +| [Create Token](https://github.com/solana-developers/program-examples/tree/main/tokens/create-token) | How to create a token and add metaplex metadata to it. | Anchor, Native | +| [NFT Minter](https://github.com/solana-developers/program-examples/tree/main/tokens/nft-minter) | Minting only one amount of a token and then removing the mint authority. | Anchor, Native | +| [PDA Mint Authority](https://github.com/solana-developers/program-examples/tree/main/tokens/pda-mint-authority) | Shows you how to change the mint authority of a mint, to mint tokens from within a program. | Anchor, Native | +| [SPL Token Minter](https://github.com/solana-developers/program-examples/tree/main/tokens/spl-token-minter) | Explains how to use Associated Token Accounts to be able to keep track of token accounts. | Anchor, Native | +| [Token Swap](https://github.com/solana-developers/program-examples/tree/main/tokens/token-swap) | Extensive example that shows you how to build a AMM (automated market maker) pool for SPL tokens. | Anchor | +| [Transfer Tokens](https://github.com/solana-developers/program-examples/tree/main/tokens/transfer-tokens) | Shows how to transfer SPL token using CPIs into the token program. | Anchor, Native | +| [Token-2022](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022) | See Token 2022 (Token extensions). | Anchor, Native | + +## Token 2022 (Token Extensions) + +Token 2022 is a new standard for tokens on Solana. It is a more flexible and +lets you add 16 different extensions to a token mint to add more functionality +to it. A full list of the extensions can be found in the +[Getting Started Guide](/developers/guides/token-extensions/getting-started) + +| Example Name | Description | Language | +| --------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------- | -------- | +| [Basics](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/basics/anchor) | How to create a token, mint and transfer it. | Anchor | +| [Default account state](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/default-account-state/native) | This extension lets you create token accounts with a certain state, for example frozen. | Native | +| [Mint Close Authority](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/mint-close-authority) | With the old token program it was not possible to close a mint. Now it is. | Native | +| [Multiple Extensions](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/multiple-extensions) | Shows you how you can add multiple extensions to a single mint | Native | +| [NFT Metadata pointer](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/nft-meta-data-pointer) | It is possible to use the metadata extension to create NFTs and add dynamic on chain metadata. | Anchor | +| [Not Transferable](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/non-transferable/native) | Useful for example for achievements, referral programs or any soul bound tokens. | Native | +| [Transfer fee](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/transfer-fees) | Every transfer of the tokens hold some tokens back in the token account which can then be collected. | Native | +| [Transfer Hook](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022/transfer-hook) | Four examples to add additional functionality to your token using a CPI from the token program into your program. | Anchor | + +## Break + +[Break](https://break.solana.com/) is a React app that gives users a visceral +feeling for just how fast and high-performance the Solana network really is. Can +you _break_ the Solana blockchain? During a 15 second play-through, each click +of a button or keystroke sends a new transaction to the cluster. Smash the +keyboard as fast as you can and watch your transactions get finalized in +real-time while the network takes it all in stride! + +Break can be played on our Devnet, Testnet and Mainnet Beta networks. Plays are +free on Devnet and Testnet, where the session is funded by a network faucet. On +Mainnet Beta, users pay to play 0.08 SOL per game. The session account can be +funded by a local keystore wallet or by scanning a QR code from Trust Wallet to +transfer the tokens. + +[Click here to play Break](https://break.solana.com/) + +### Build and Run + +First fetch the latest version of the example code: + +```shell +git clone https://github.com/solana-labs/break.git +cd break +``` + +Next, follow the steps in the git repository's +[README](https://github.com/solana-labs/break/blob/main/README.md). diff --git a/content/docs/programs/faq.mdx b/content/docs/programs/faq.mdx new file mode 100644 index 000000000..f4db1bf42 --- /dev/null +++ b/content/docs/programs/faq.mdx @@ -0,0 +1,193 @@ +--- +title: "FAQ" +--- + +Post your questions on +[StackExchange](https://solana.stackexchange.com/questions/ask). + +## Berkeley Packet Filter (BPF) + +Solana onchain programs are compiled via the +[LLVM compiler infrastructure](https://llvm.org/) to an +[Executable and Linkable Format (ELF)](https://en.wikipedia.org/wiki/Executable_and_Linkable_Format) +containing a variation of the +[Berkeley Packet Filter (BPF)](https://en.wikipedia.org/wiki/Berkeley_Packet_Filter) +bytecode. + +Because Solana uses the LLVM compiler infrastructure, a program may be written +in any programming language that can target the LLVM's BPF backend. + +BPF provides an efficient +[instruction set](https://github.com/iovisor/bpf-docs/blob/master/eBPF.md) that +can be executed in an interpreted virtual machine or as efficient just-in-time +compiled native instructions. + +## Memory map + +The virtual address memory map used by Solana SBF programs is fixed and laid out +as follows + +- Program code starts at 0x100000000 +- Stack data starts at 0x200000000 +- Heap data starts at 0x300000000 +- Program input parameters start at 0x400000000 + +The above virtual addresses are start addresses but programs are given access to +a subset of the memory map. The program will panic if it attempts to read or +write to a virtual address that it was not granted access to, and an +`AccessViolation` error will be returned that contains the address and size of +the attempted violation. + +## InvalidAccountData + +This program error can happen for a lot of reasons. Usually, it's caused by +passing an account to the program that the program is not expecting, either in +the wrong position in the instruction or an account not compatible with the +instruction being executed. + +An implementation of a program might also cause this error when performing a +cross-program instruction and forgetting to provide the account for the program +that you are calling. + +## InvalidInstructionData + +This program error can occur while trying to deserialize the instruction, check +that the structure passed in matches exactly the instruction. There may be some +padding between fields. If the program implements the Rust `Pack` trait then try +packing and unpacking the instruction type `T` to determine the exact encoding +the program expects. + +## MissingRequiredSignature + +Some instructions require the account to be a signer; this error is returned if +an account is expected to be signed but is not. + +An implementation of a program might also cause this error when performing a +[cross-program invocation](/docs/core/cpi) that requires a signed program +address, but the passed signer seeds passed to `invoke_signed` don't match the +signer seeds used to create the program address +[`create_program_address`](/docs/core/pda#createprogramaddress). + +## Stack + +SBF uses stack frames instead of a variable stack pointer. Each stack frame is +4KB in size. + +If a program violates that stack frame size, the compiler will report the +overrun as a warning. + +For example: + +```text +Error: Function _ZN16curve25519_dalek7edwards21EdwardsBasepointTable6create17h178b3d2411f7f082E Stack offset of -30728 exceeded max offset of -4096 by 26632 bytes, please minimize large stack variables +``` + +The message identifies which symbol is exceeding its stack frame, but the name +might be mangled. + +> To demangle a Rust symbol use [rustfilt](https://github.com/luser/rustfilt). + +The above warning came from a Rust program, so the demangled symbol name is: + +```shell +rustfilt _ZN16curve25519_dalek7edwards21EdwardsBasepointTable6create17h178b3d2411f7f082E +curve25519_dalek::edwards::EdwardsBasepointTable::create +``` + +The reason a warning is reported rather than an error is because some dependent +crates may include functionality that violates the stack frame restrictions even +if the program doesn't use that functionality. If the program violates the stack +size at runtime, an `AccessViolation` error will be reported. + +SBF stack frames occupy a virtual address range starting at `0x200000000`. + +## Heap size + +Programs have access to a runtime heap via the Rust `alloc` APIs. To facilitate +fast allocations, a simple 32KB bump heap is utilized. The heap does not support +`free` or `realloc`. + +Internally, programs have access to the 32KB memory region starting at virtual +address 0x300000000 and may implement a custom heap based on the program's +specific needs. + +Rust programs implement the heap directly by defining a custom +[`global_allocator`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint.rs#L72) + +## Loaders + +Programs are deployed with and executed by runtime loaders, currently there are +two supported loaders +[BPF Loader](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader.rs#L17) +and +[BPF loader deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14) + +Loaders may support different application binary interfaces so developers must +write their programs for and deploy them to the same loader. If a program +written for one loader is deployed to a different one the result is usually a +`AccessViolation` error due to mismatched deserialization of the program's input +parameters. + +For all practical purposes program should always be written to target the latest +BPF loader and the latest loader is the default for the command-line interface +and the javascript APIs. + +- [Rust program entrypoints](/docs/programs/lang-rust#program-entrypoint) + +### Deployment + +SBF program deployment is the process of uploading a BPF shared object into a +program account's data and marking the account executable. A client breaks the +SBF shared object into smaller pieces and sends them as the instruction data of +[`Write`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/sdk/program/src/loader_instruction.rs#L13) +instructions to the loader where loader writes that data into the program's +account data. Once all the pieces are received the client sends a +[`Finalize`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/sdk/program/src/loader_instruction.rs#L30) +instruction to the loader, the loader then validates that the SBF data is valid +and marks the program account as _executable_. Once the program account is +marked executable, subsequent transactions may issue instructions for that +program to process. + +When an instruction is directed at an executable SBF program the loader +configures the program's execution environment, serializes the program's input +parameters, calls the program's entrypoint, and reports any errors encountered. + +For further information, see [deploying programs](/docs/programs/deploying). + +### Input Parameter Serialization + +SBF loaders serialize the program input parameters into a byte array that is +then passed to the program's entrypoint, where the program is responsible for +deserializing it on-chain. One of the changes between the deprecated loader and +the current loader is that the input parameters are serialized in a way that +results in various parameters falling on aligned offsets within the aligned byte +array. This allows deserialization implementations to directly reference the +byte array and provide aligned pointers to the program. + +- [Rust program parameter deserialization](/docs/programs/lang-rust#parameter-deserialization) + +The latest loader serializes the program input parameters as follows (all +encoding is little endian): + +- 8 bytes unsigned number of accounts +- For each account + - 1 byte indicating if this is a duplicate account, if not a duplicate then + the value is 0xff, otherwise the value is the index of the account it is a + duplicate of. + - If duplicate: 7 bytes of padding + - If not duplicate: + - 1 byte boolean, true if account is a signer + - 1 byte boolean, true if account is writable + - 1 byte boolean, true if account is executable + - 4 bytes of padding + - 32 bytes of the account public key + - 32 bytes of the account's owner public key + - 8 bytes unsigned number of lamports owned by the account + - 8 bytes unsigned number of bytes of account data + - x bytes of account data + - 10k bytes of padding, used for realloc + - enough padding to align the offset to 8 bytes. + - 8 bytes rent epoch +- 8 bytes of unsigned number of instruction data +- x bytes of instruction data +- 32 bytes of the program id diff --git a/content/docs/programs/limitations.mdx b/content/docs/programs/limitations.mdx new file mode 100644 index 000000000..1cdcf0372 --- /dev/null +++ b/content/docs/programs/limitations.mdx @@ -0,0 +1,111 @@ +--- +title: "Limitations" +--- + +Developing programs on the Solana blockchain have some inherent limitation +associated with them. Below is a list of common limitation that you may run +into. + +## Rust libraries + +Since Rust based onchain programs must run be deterministic while running in a +resource-constrained, single-threaded environment, they have some limitations on +various libraries. + +On-chain Rust programs support most of Rust's libstd, libcore, and liballoc, as +well as many 3rd party crates. + +There are some limitations since these programs run in a resource-constrained, +single-threaded environment, as well as being deterministic: + +- No access to + - `rand` + - `std::fs` + - `std::net` + - `std::future` + - `std::process` + - `std::sync` + - `std::task` + - `std::thread` + - `std::time` +- Limited access to: + - `std::hash` + - `std::os` +- Bincode is extremely computationally expensive in both cycles and call depth + and should be avoided +- String formatting should be avoided since it is also computationally + expensive. +- No support for `println!`, `print!`, use the + [`msg!`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/log.rs#L33) + macro instead. +- The runtime enforces a limit on the number of instructions a program can + execute during the processing of one instruction. See + [computation budget](/docs/core/fees#compute-budget) for more information. + +## Compute budget + +To prevent abuse of the blockchain's computational resources, each transaction +is allocated a [compute budget](/docs/terminology#compute-budget). Exceeding +this compute budget will result in the transaction failing. + +See the [computational constraints](/docs/core/fees#compute-budget) +documentation for more specific details. + +## Call stack depth - `CallDepthExceeded` error + +Solana programs are constrained to run quickly, and to facilitate this, the +program's call stack is limited to a max depth of **64 frames**. + +When a program exceeds the allowed call stack depth limit, it will receive the +`CallDepthExceeded` error. + +## CPI call depth - `CallDepth` error + +Cross-program invocations allow programs to invoke other programs directly, but +the depth is constrained currently to `4`. + +When a program exceeds the allowed +[cross-program invocation call depth](/docs/core/cpi), it will receive a +`CallDepth` error + +## Float Rust types support + +Programs support a limited subset of Rust's float operations. If a program +attempts to use a float operation that is not supported, the runtime will report +an unresolved symbol error. + +Float operations are performed via software libraries, specifically LLVM's float +built-ins. Due to the software emulated, they consume more compute units than +integer operations. In general, fixed point operations are recommended where +possible. + +The +[Solana Program Library math](https://github.com/solana-labs/solana-program-library/tree/master/libraries/math) +tests will report the performance of some math operations. To run the test, sync +the repo and run: + +```shell +cargo test-sbf -- --nocapture --test-threads=1 +``` + +Recent results show the float operations take more instructions compared to +integers equivalents. Fixed point implementations may vary but will also be less +than the float equivalents: + +```text + u64 f32 +Multiply 8 176 +Divide 9 219 +``` + +## Static writable data + +Program shared objects do not support writable shared data. Programs are shared +between multiple parallel executions using the same shared read-only code and +data. This means that developers should not include any static writable or +global variables in programs. In the future a copy-on-write mechanism could be +added to support writable data. + +## Signed division + +The SBF instruction set does not support signed division. diff --git a/content/docs/programs/meta.json b/content/docs/programs/meta.json new file mode 100644 index 000000000..7f13a0e3f --- /dev/null +++ b/content/docs/programs/meta.json @@ -0,0 +1,13 @@ +{ + "title": "Developing Programs", + "pages": [ + "anchor", + "rust", + "deploying", + "examples", + "testing", + "limitations", + "faq" + ], + "defaultOpen": true +} diff --git a/content/docs/programs/rust/index.mdx b/content/docs/programs/rust/index.mdx new file mode 100644 index 000000000..57722b70a --- /dev/null +++ b/content/docs/programs/rust/index.mdx @@ -0,0 +1,495 @@ +--- +title: Rust Programs +description: + Learn how to develop Solana programs using Rust, including step-by-step + instructions for creating, building, testing, and deploying smart contracts on + the Solana blockchain. +altRoutes: + - /docs/programs/lang-rust +h1: Developing Programs in Rust +--- + +Solana programs are primarily developed using the Rust programming language. +This page focuses on writing Solana programs in Rust without using the Anchor +framework, an approach often referred to as writing "native Rust" programs. + +Native Rust development provides developers with direct control over their +Solana programs. However, this approach requires more manual setup and +boilerplate code compared to using the Anchor framework. This method is +recommended for developers who: + +- Seek granular control over program logic and optimizations +- Want to learn the underlying concepts before moving to higher-level frameworks + +For beginners, we recommend starting with the Anchor framework. See the +[Anchor](/docs/programs/anchor) section for more information. + +## Prerequisites + +For detailed installation instructions, visit the +[installation](/docs/intro/installation) page. + +Before you begin, ensure you have the following installed: + +- Rust: The programming language for building Solana programs. +- Solana CLI: Command-line tool for Solana development. + +## Getting Started + +The example below covers the basic steps to create your first Solana program +written in Rust. We'll create a minimal program that prints "Hello, world!" to +the program log. + + + + + +### Create a new Program + +First, create a new Rust project using the standard `cargo init` command with +the `--lib` flag. + +```shell title="Terminal" +cargo init hello_world --lib +``` + +Navigate to the project directory. You should see the default `src/lib.rs` and +`Cargo.toml` files + +```shell title="Terminal" +cd hello_world +``` + +Next, add the `solana-program` dependency. This is the minimum dependency +required to build a Solana program. + +```shell title="Terminal" +cargo add solana-program@1.18.26 +``` + +Next, add the following snippet to `Cargo.toml`. If you don't include this +config, the `target/deploy` directory will not be generated when you build the +program. + +```toml title="Cargo.toml" +[lib] +crate-type = ["cdylib", "lib"] +``` + +Your `Cargo.toml` file should look like the following: + +```toml title="Cargo.toml" +[package] +name = "hello_world" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +solana-program = "1.18.26" +``` + +Next, replace the contents of `src/lib.rs` with the following code. This is a +minimal Solana program that prints "Hello, world!" to the program log when the +program is invoked. + +The `msg!` macro is used in Solana programs to print a message to the program +log. + +```rs title="lib.rs" +use solana_program::{ + account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, msg, pubkey::Pubkey, +}; + +entrypoint!(process_instruction); + +pub fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { + msg!("Hello, world!"); + Ok(()) +} +``` + + + + +### Build the Program + +Next, build the program using the `cargo build-sbf` command. + +```shell title="Terminal" +cargo build-sbf +``` + +This command generates a `target/deploy` directory containing two important +files: + +1. A `.so` file (e.g., `hello_world.so`): This is the compiled Solana program + that will be deployed to the network as a "smart contract". +2. A keypair file (e.g., `hello_world-keypair.json`): The public key of this + keypair is used as the program ID when deploying the program. + +To view the program ID, run the following command in your terminal. This command +prints the public key of the keypair at the specified file path: + +```shell title="Terminal" +solana address -k ./target/deploy/hello_world-keypair.json +``` + +Example output: + +``` +4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz +``` + + + + +### Test the Program + +Next, test the program using the `solana-program-test` crate. Add the following +dependencies to `Cargo.toml`. + +```shell title="Terminal" +cargo add solana-program-test@1.18.26 --dev +cargo add solana-sdk@1.18.26 --dev +cargo add tokio --dev +``` + +Add the following test to `src/lib.rs`, below the program code. This is a test +module that invokes the hello world program. + +```rs title="lib.rs" +#[cfg(test)] +mod test { + use super::*; + use solana_program_test::*; + use solana_sdk::{signature::Signer, transaction::Transaction}; + + #[tokio::test] + async fn test_hello_world() { + let program_id = Pubkey::new_unique(); + let (mut banks_client, payer, recent_blockhash) = + ProgramTest::new("hello_world", program_id, processor!(process_instruction)) + .start() + .await; + + // Create the instruction to invoke the program + let instruction = + solana_program::instruction::Instruction::new_with_borsh(program_id, &(), vec![]); + + // Add the instruction to a new transaction + let mut transaction = Transaction::new_with_payer(&[instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer], recent_blockhash); + + // Process the transaction + let transaction_result = banks_client.process_transaction(transaction).await; + assert!(transaction_result.is_ok()); + } +} +``` + +Run the test using the `cargo test-sbf` command. The program log will display +"Hello, world!". + +```shell title="Terminal" +cargo test-sbf +``` + +Example output: + +```shell title="Terminal" {4} /Program log: Hello, world!/ +running 1 test +[2024-10-18T21:24:54.889570000Z INFO solana_program_test] "hello_world" SBF program from /hello_world/target/deploy/hello_world.so, modified 35 seconds, 828 ms, 268 µs and 398 ns ago +[2024-10-18T21:24:54.974294000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM invoke [1] +[2024-10-18T21:24:54.974814000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Hello, world! +[2024-10-18T21:24:54.976848000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM consumed 140 of 200000 compute units +[2024-10-18T21:24:54.976868000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM success +test test::test_hello_world ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.13s +``` + + + + +### Deploy the Program + +Next, deploy the program. When developing locally, we can use the +`solana-test-validator`. + +First, configure the Solana CLI to use the local Solana cluster. + +```shell title="Terminal" +solana config set -ul +``` + +Example output: + +``` +Config File: /.config/solana/cli/config.yml +RPC URL: http://localhost:8899 +WebSocket URL: ws://localhost:8900/ (computed) +Keypair Path: /.config/solana/id.json +Commitment: confirmed +``` + +Open a new terminal and run the `solana-test-validators` command to start the +local validator. + +```shell title="Terminal" +solana-test-validator +``` + +While the test validator is running, run the `solana program deploy` command in +a separate terminal to deploy the program to the local validator. + +```shell title="Terminal" +solana program deploy ./target/deploy/hello_world.so +``` + +Example output: + +``` +Program Id: 4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz +Signature: +5osMiNMiDZGM7L1e2tPHxU8wdB8gwG8fDnXLg5G7SbhwFz4dHshYgAijk4wSQL5cXiu8z1MMou5kLadAQuHp7ybH +``` + +You can inspect the program ID and transaction signature on +[Solana Explorer](https://explorer.solana.com/?cluster=custom&customUrl=http%3A%2F%2Flocalhost%3A8899). +Note that the cluster on Solana Explorer must also be localhost. The "Custom RPC +URL" option on Solana Explorer defaults to `http://localhost:8899`. + + + + +### Invoke the Program + +Next, we'll demonstrate how to invoke the program using a Rust client. + +First create an `examples` directory and a `client.rs` file. + +```shell title="Terminal" +mkdir -p examples +touch examples/client.rs +``` + +Add the following to `Cargo.toml`. + +```toml title="Cargo.toml" +[[example]] +name = "client" +path = "examples/client.rs" +``` + +Add the `solana-client` dependency. + +```shell title="Terminal" +cargo add solana-client@1.18.26 --dev +``` + +Add the following code to `examples/client.rs`. This is a Rust client script +that funds a new keypair to pay for transaction fees and then invokes the hello +world program. + +```rs title="example/client.rs" +use solana_client::rpc_client::RpcClient; +use solana_sdk::{ + commitment_config::CommitmentConfig, + instruction::Instruction, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, +}; +use std::str::FromStr; + +#[tokio::main] +async fn main() { + // Program ID (replace with your actual program ID) + let program_id = Pubkey::from_str("4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz").unwrap(); + + // Connect to the Solana devnet + let rpc_url = String::from("http://127.0.0.1:8899"); + let client = RpcClient::new_with_commitment(rpc_url, CommitmentConfig::confirmed()); + + // Generate a new keypair for the payer + let payer = Keypair::new(); + + // Request airdrop + let airdrop_amount = 1_000_000_000; // 1 SOL + let signature = client + .request_airdrop(&payer.pubkey(), airdrop_amount) + .expect("Failed to request airdrop"); + + // Wait for airdrop confirmation + loop { + let confirmed = client.confirm_transaction(&signature).unwrap(); + if confirmed { + break; + } + } + + // Create the instruction + let instruction = Instruction::new_with_borsh( + program_id, + &(), // Empty instruction data + vec![], // No accounts needed + ); + + // Add the instruction to new transaction + let mut transaction = Transaction::new_with_payer(&[instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer], client.get_latest_blockhash().unwrap()); + + // Send and confirm the transaction + match client.send_and_confirm_transaction(&transaction) { + Ok(signature) => println!("Transaction Signature: {}", signature), + Err(err) => eprintln!("Error sending transaction: {}", err), + } +} +``` + +Before running the script, replace the program ID in the code snippet above with +the one for your program. + +You can get your program ID by running the following command. + +```shell title="Terminal" +solana address -k ./target/deploy/hello_world-keypair.json +``` + +```diff +#[tokio::main] +async fn main() { +- let program_id = Pubkey::from_str("4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz").unwrap(); ++ let program_id = Pubkey::from_str("YOUR_PROGRAM_ID).unwrap(); + } +} +``` + +Run the client script with the following command. + +```shell title="Terminal" +cargo run --example client +``` + +Example output: + +``` +Transaction Signature: 54TWxKi3Jsi3UTeZbhLGUFX6JQH7TspRJjRRFZ8NFnwG5BXM9udxiX77bAACjKAS9fGnVeEazrXL4SfKrW7xZFYV +``` + +You can inspect the transaction signature on +[Solana Explorer](https://explorer.solana.com/?cluster=custom&customUrl=http%3A%2F%2Flocalhost%3A8899) +(local cluster) to see "Hello, world!" in the program log. + + + + +### Update the Program + +Solana programs can be updated by redeploying to the same program ID. Update the +program in `src/lib.rs` to print "Hello, Solana!" instead of "Hello, world!". + +```diff title="lib.rs" +pub fn process_instruction( + _program_id: &Pubkey, + _accounts: &[AccountInfo], + _instruction_data: &[u8], +) -> ProgramResult { +- msg!("Hello, world!"); ++ msg!("Hello, Solana!"); + Ok(()) +} +``` + +Test the updated program by running the `cargo test-sbf` command. + +```shell title="Terminal" +cargo test-sbf +``` + +You should see "Hello, Solana!" in the program log. + +```shell title="Terminal" {4} +running 1 test +[2024-10-23T19:28:28.842639000Z INFO solana_program_test] "hello_world" SBF program from /code/misc/delete/hello_world/target/deploy/hello_world.so, modified 4 minutes, 31 seconds, 435 ms, 566 µs and 766 ns ago +[2024-10-23T19:28:28.934854000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM invoke [1] +[2024-10-23T19:28:28.936735000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Hello, Solana! +[2024-10-23T19:28:28.938774000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM consumed 140 of 200000 compute units +[2024-10-23T19:28:28.938793000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM success +test test::test_hello_world ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.14s +``` + +Run the `cargo build-sbf` command to generate an updated `.so` file. + +```shell title="Terminal" +cargo build-sbf +``` + +Redeploy the program using the `solana program deploy` command. + +```shell title="Terminal" +solana program deploy ./target/deploy/hello_world.so +``` + +Run the client code again and inspect the transaction signature on Solana +Explorer to see "Hello, Solana!" in the program log. + +```shell title="Terminal" +cargo run --example client +``` + + + + +### Close the Program + +You can close your Solana program to reclaim the SOL allocated to the account. +Closing a program is irreversible, so it should be done with caution. + +To close a program, use the `solana program close ` command. For +example: + +```shell title="Terminal" +solana program close 4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz +--bypass-warning +``` + +Example output: + +``` +Closed Program Id 4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz, 0.1350588 SOL +reclaimed +``` + +Note that once a program is closed, its program ID cannot be reused. Attempting +to deploy a program with a previously closed program ID will result in an error. + +``` +Error: Program 4Ujf5fXfLx2PAwRqcECCLtgDxHKPznoJpa43jUBxFfMz has been closed, use +a new Program Id +``` + +If you need to redeploy a program with the same source code after closing a +program, you must generate a new program ID. To generate a new keypair for the +program, run the following command: + +```shell title="Terminal" +solana-keygen new -o ./target/deploy/hello_world-keypair.json --force +``` + +Alternatively, you can delete the existing keypair file (e.g. +`./target/deploy/hello_world-keypair.json`) and run `cargo build-sbf` again, +which will generate a new keypair file. + + + + diff --git a/content/docs/programs/rust/meta.json b/content/docs/programs/rust/meta.json new file mode 100644 index 000000000..fad25cd35 --- /dev/null +++ b/content/docs/programs/rust/meta.json @@ -0,0 +1,4 @@ +{ + "title": "Rust Programs", + "pages": ["program-structure"] +} diff --git a/content/docs/programs/rust/program-structure.mdx b/content/docs/programs/rust/program-structure.mdx new file mode 100644 index 000000000..58905cd3c --- /dev/null +++ b/content/docs/programs/rust/program-structure.mdx @@ -0,0 +1,1425 @@ +--- +title: Program Structure +description: + Learn how to structure Solana programs in Rust, including entrypoints, state + management, instruction handling, and testing. +h1: Rust Program Structure +--- + +Solana programs written in Rust have minimal structural requirements, allowing +for flexibility in how code is organized. The only requirement is that a program +must have an `entrypoint`, which defines where the execution of a program +begins. + +## Program Structure + +While there are no strict rules for file structure, Solana programs typically +follow a common pattern: + +- `entrypoint.rs`: Defines the entrypoint that routes incoming instructions. +- `state.rs`: Define program-specific state (account data). +- `instructions.rs`: Defines the instructions that the program can execute. +- `processor.rs`: Defines the instruction handlers (functions) that implement + the business logic for each instruction. +- `error.rs`: Defines custom errors that the program can return. + +You can find examples in the +[Solana Program Library](https://github.com/solana-labs/solana-program-library/tree/master/token/program/src). + +## Example Program + +To demonstrate how to build a native Rust program with multiple instructions, +we'll walk through a simple counter program that implements two instructions: + +1. `InitializeCounter`: Creates and initializes a new account with an initial + value. +2. `IncrementCounter`: Increments the value stored in an existing account. + +For simplicity, the program will be implemented in a single `lib.rs` file, +though in practice you may want to split larger programs into multiple files. + + + + +```rs title="lib.rs" +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + msg, + program::invoke, + program_error::ProgramError, + pubkey::Pubkey, + system_instruction, + sysvar::{rent::Rent, Sysvar}, +}; + +// Program entrypoint +entrypoint!(process_instruction); + +// Function to route instructions to the correct handler +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + // Unpack instruction data + let instruction = CounterInstruction::unpack(instruction_data)?; + + // Match instruction type + match instruction { + CounterInstruction::InitializeCounter { initial_value } => { + process_initialize_counter(program_id, accounts, initial_value)? + } + CounterInstruction::IncrementCounter => process_increment_counter(program_id, accounts)?, + }; + Ok(()) +} + +// Instructions that our program can execute +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub enum CounterInstruction { + InitializeCounter { initial_value: u64 }, // variant 0 + IncrementCounter, // variant 1 +} + +impl CounterInstruction { + pub fn unpack(input: &[u8]) -> Result { + // Get the instruction variant from the first byte + let (&variant, rest) = input + .split_first() + .ok_or(ProgramError::InvalidInstructionData)?; + + // Match instruction type and parse the remaining bytes based on the variant + match variant { + 0 => { + // For InitializeCounter, parse a u64 from the remaining bytes + let initial_value = u64::from_le_bytes( + rest.try_into() + .map_err(|_| ProgramError::InvalidInstructionData)?, + ); + Ok(Self::InitializeCounter { initial_value }) + } + 1 => Ok(Self::IncrementCounter), // No additional data needed + _ => Err(ProgramError::InvalidInstructionData), + } + } +} + +// Initialize a new counter account +fn process_initialize_counter( + program_id: &Pubkey, + accounts: &[AccountInfo], + initial_value: u64, +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + let counter_account = next_account_info(accounts_iter)?; + let payer_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + // Size of our counter account + let account_space = 8; // Size in bytes to store a u64 + + // Calculate minimum balance for rent exemption + let rent = Rent::get()?; + let required_lamports = rent.minimum_balance(account_space); + + // Create the counter account + invoke( + &system_instruction::create_account( + payer_account.key, // Account paying for the new account + counter_account.key, // Account to be created + required_lamports, // Amount of lamports to transfer to the new account + account_space as u64, // Size in bytes to allocate for the data field + program_id, // Set program owner to our program + ), + &[ + payer_account.clone(), + counter_account.clone(), + system_program.clone(), + ], + )?; + + // Create a new CounterAccount struct with the initial value + let counter_data = CounterAccount { + count: initial_value, + }; + + // Get a mutable reference to the counter account's data + let mut account_data = &mut counter_account.data.borrow_mut()[..]; + + // Serialize the CounterAccount struct into the account's data + counter_data.serialize(&mut account_data)?; + + msg!("Counter initialized with value: {}", initial_value); + + Ok(()) +} + +// Update an existing counter's value +fn process_increment_counter(program_id: &Pubkey, accounts: &[AccountInfo]) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + let counter_account = next_account_info(accounts_iter)?; + + // Verify account ownership + if counter_account.owner != program_id { + return Err(ProgramError::IncorrectProgramId); + } + + // Mutable borrow the account data + let mut data = counter_account.data.borrow_mut(); + + // Deserialize the account data into our CounterAccount struct + let mut counter_data: CounterAccount = CounterAccount::try_from_slice(&data)?; + + // Increment the counter value + counter_data.count = counter_data + .count + .checked_add(1) + .ok_or(ProgramError::InvalidAccountData)?; + + // Serialize the updated counter data back into the account + counter_data.serialize(&mut &mut data[..])?; + + msg!("Counter incremented to: {}", counter_data.count); + Ok(()) +} + +// Struct representing our counter account's data +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct CounterAccount { + count: u64, +} + +#[cfg(test)] +mod test { + use super::*; + use solana_program_test::*; + use solana_sdk::{ + instruction::{AccountMeta, Instruction}, + signature::{Keypair, Signer}, + system_program, + transaction::Transaction, + }; + + #[tokio::test] + async fn test_counter_program() { + let program_id = Pubkey::new_unique(); + let (mut banks_client, payer, recent_blockhash) = ProgramTest::new( + "counter_program", + program_id, + processor!(process_instruction), + ) + .start() + .await; + + // Create a new keypair to use as the address for our counter account + let counter_keypair = Keypair::new(); + let initial_value: u64 = 42; + + // Step 1: Initialize the counter + println!("Testing counter initialization..."); + + // Create initialization instruction + let mut init_instruction_data = vec![0]; // 0 = initialize instruction + init_instruction_data.extend_from_slice(&initial_value.to_le_bytes()); + + let initialize_instruction = Instruction::new_with_bytes( + program_id, + &init_instruction_data, + vec![ + AccountMeta::new(counter_keypair.pubkey(), true), + AccountMeta::new(payer.pubkey(), true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + // Send transaction with initialize instruction + let mut transaction = + Transaction::new_with_payer(&[initialize_instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer, &counter_keypair], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); + + // Check account data + let account = banks_client + .get_account(counter_keypair.pubkey()) + .await + .expect("Failed to get counter account"); + + if let Some(account_data) = account { + let counter: CounterAccount = CounterAccount::try_from_slice(&account_data.data) + .expect("Failed to deserialize counter data"); + assert_eq!(counter.count, 42); + println!( + "✅ Counter initialized successfully with value: {}", + counter.count + ); + } + + // Step 2: Increment the counter + println!("Testing counter increment..."); + + // Create increment instruction + let increment_instruction = Instruction::new_with_bytes( + program_id, + &[1], // 1 = increment instruction + vec![AccountMeta::new(counter_keypair.pubkey(), true)], + ); + + // Send transaction with increment instruction + let mut transaction = + Transaction::new_with_payer(&[increment_instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer, &counter_keypair], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); + + // Check account data + let account = banks_client + .get_account(counter_keypair.pubkey()) + .await + .expect("Failed to get counter account"); + + if let Some(account_data) = account { + let counter: CounterAccount = CounterAccount::try_from_slice(&account_data.data) + .expect("Failed to deserialize counter data"); + assert_eq!(counter.count, 43); + println!("✅ Counter incremented successfully to: {}", counter.count); + } + } +} +``` + +```toml title="Cargo.toml" +[package] +name = "counter_program" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +borsh = "1.5.1" +solana-program = "1.18.26" + +[dev-dependencies] +solana-program-test = "1.18.26" +solana-sdk = "1.18.26" +tokio = "1.41.0" +``` + + + + + + + + +### Create a new Program + +First, create a new Rust project using the standard `cargo init` command with +the `--lib` flag. + +```shell title="Terminal" +cargo init counter_program --lib +``` + +Navigate to the project directory. You should see the default `src/lib.rs` and +`Cargo.toml` files + +```shell title="Terminal" +cd counter_program +``` + +Next, add the `solana-program` dependency. This is the minimum dependency +required to build a Solana program. + +```shell title="Terminal" +cargo add solana-program@1.18.26 +``` + +Next, add the following snippet to `Cargo.toml`. If you don't include this +config, the `target/deploy` directory will not be generated when you build the +program. + +```toml title="Cargo.toml" +[lib] +crate-type = ["cdylib", "lib"] +``` + +Your `Cargo.toml` file should look like the following: + +```toml title="Cargo.toml" +[package] +name = "counter_program" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["cdylib", "lib"] + +[dependencies] +solana-program = "1.18.26" +``` + + + + +### Program Entrypoint + +A Solana program entrypoint is the function that gets called when a program is +invoked. The entrypoint has the following raw definition and developers are free +to create their own implementation of the entrypoint function. + +For simplicity, use the +[`entrypoint!`](https://github.com/solana-labs/solana/blob/v2.0/sdk/program/src/entrypoint.rs#L124-L140) +macro from the `solana_program` crate to define the entrypoint in your program. + +```rs +#[no_mangle] +pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64; +``` + +Replace the default code in `lib.rs` with the following code. This snippet: + +1. Imports the required dependencies from `solana_program` +2. Defines the program entrypoint using the `entrypoint!` macro +3. Implements the `process_instruction` function that will route instructions to + the appropriate handler functions + +```rs title="lib.rs" {13} /process_instruction/ +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + msg, + program::invoke, + program_error::ProgramError, + pubkey::Pubkey, + system_instruction, + sysvar::{rent::Rent, Sysvar}, +}; + +entrypoint!(process_instruction); + +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + // Your program logic + Ok(()) +} +``` + +The `entrypoint!` macro requires a function with the the following +[type signature](https://github.com/solana-labs/solana/blob/v2.0/sdk/program/src/entrypoint.rs#L28-L29) +as an argument: + +```rs +pub type ProcessInstruction = + fn(program_id: &Pubkey, accounts: &[AccountInfo], instruction_data: &[u8]) -> ProgramResult; +``` + +When a Solana program is invoked, the entrypoint +[deserializes](https://github.com/solana-labs/solana/blob/v2.0/sdk/program/src/entrypoint.rs#L277) +the +[input data](https://github.com/solana-labs/solana/blob/v2.0/sdk/program/src/entrypoint.rs#L129-L131) +(provided as bytes) into three values and passes them to the +[`process_instruction`](https://github.com/solana-labs/solana/blob/v2.0/sdk/program/src/entrypoint.rs#L132) +function: + +- `program_id`: The public key of the program being invoked (current program) +- `accounts`: The `AccountInfo` for accounts required by the instruction being + invoked +- `instruction_data`: Additional data passed to the program which specifies the + instruction to execute and its required arguments + +These three parameters directly correspond to the data that clients must provide +when building an instruction to invoke a program. + + + + +### Define Program State + +When building a Solana program, you'll typically start by defining your +program's state - the data that will be stored in accounts created and owned by +your program. + +Program state is defined using Rust structs that represent the data layout of +your program's accounts. You can define multiple structs to represent different +types of accounts for your program. + +When working with accounts, you need a way to convert your program's data types +to and from the raw bytes stored in an account's data field: + +- Serialization: Converting your data types into bytes to store in an account's + data field +- Deserialization: Converting the bytes stored in an account back into your data + types + +While you can use any serialization format for Solana program development, +[Borsh](https://borsh.io/) is commonly used. To use Borsh in your Solana +program: + +1. Add the `borsh` crate as a dependency to your `Cargo.toml`: + +```shell title="Terminal" +cargo add borsh +``` + +2. Import the Borsh traits and use the derive macro to implement the traits for + your structs: + +```rust +use borsh::{BorshSerialize, BorshDeserialize}; + +// Define struct representing our counter account's data +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct CounterAccount { + count: u64, +} +``` + +Add the `CounterAccount` struct to `lib.rs` to define the program state. This +struct will be used in both the initialization and increment instructions. + +```rs title="lib.rs" {12} {25-29} +use solana_program::{ + account_info::{next_account_info, AccountInfo}, + entrypoint, + entrypoint::ProgramResult, + msg, + program::invoke, + program_error::ProgramError, + pubkey::Pubkey, + system_instruction, + sysvar::{rent::Rent, Sysvar}, +}; +use borsh::{BorshSerialize, BorshDeserialize}; + +entrypoint!(process_instruction); + +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + // Your program logic + Ok(()) +} + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub struct CounterAccount { + count: u64, +} +``` + + + + +### Define Instructions + +Instructions refer to the different operations that your Solana program can +perform. Think of them as public APIs for your program - they define what +actions users can take when interacting with your program. + +Instructions are typically defined using a Rust enum where: + +- Each enum variant represents a different instruction +- The variant's payload represents the instruction's parameters + +Note that Rust enum variants are implicitly numbered starting from 0. + +Below is an example of an enum defining two instructions: + +```rust +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub enum CounterInstruction { + InitializeCounter { initial_value: u64 }, // variant 0 + IncrementCounter, // variant 1 +} +``` + +When a client invokes your program, they must provide instruction data (as a +buffer of bytes) where: + +- The first byte identifies which instruction variant to execute (0, 1, etc.) +- The remaining bytes contain the serialized instruction parameters (if + required) + +To convert the instruction data (bytes) into a variant of the enum, it is common +to implement a helper method. This method: + +1. Splits the first byte to get the instruction variant +2. Matches on the variant and parses any additional parameters from the + remaining bytes +3. Returns the corresponding enum variant + +For example, the `unpack` method for the `CounterInstruction` enum: + +```rust +impl CounterInstruction { + pub fn unpack(input: &[u8]) -> Result { + // Get the instruction variant from the first byte + let (&variant, rest) = input + .split_first() + .ok_or(ProgramError::InvalidInstructionData)?; + + // Match instruction type and parse the remaining bytes based on the variant + match variant { + 0 => { + // For InitializeCounter, parse a u64 from the remaining bytes + let initial_value = u64::from_le_bytes( + rest.try_into() + .map_err(|_| ProgramError::InvalidInstructionData)? + ); + Ok(Self::InitializeCounter { initial_value }) + } + 1 => Ok(Self::IncrementCounter), // No additional data needed + _ => Err(ProgramError::InvalidInstructionData), + } + } +} +``` + +Add the following code to `lib.rs` to define the instructions for the counter +program. + +```rs title="lib.rs" {18-46} +use borsh::{BorshDeserialize, BorshSerialize}; +use solana_program::{ + account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, msg, + program_error::ProgramError, pubkey::Pubkey, +}; + +entrypoint!(process_instruction); + +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + // Your program logic + Ok(()) +} + +#[derive(BorshSerialize, BorshDeserialize, Debug)] +pub enum CounterInstruction { + InitializeCounter { initial_value: u64 }, // variant 0 + IncrementCounter, // variant 1 +} + +impl CounterInstruction { + pub fn unpack(input: &[u8]) -> Result { + // Get the instruction variant from the first byte + let (&variant, rest) = input + .split_first() + .ok_or(ProgramError::InvalidInstructionData)?; + + // Match instruction type and parse the remaining bytes based on the variant + match variant { + 0 => { + // For InitializeCounter, parse a u64 from the remaining bytes + let initial_value = u64::from_le_bytes( + rest.try_into() + .map_err(|_| ProgramError::InvalidInstructionData)?, + ); + Ok(Self::InitializeCounter { initial_value }) + } + 1 => Ok(Self::IncrementCounter), // No additional data needed + _ => Err(ProgramError::InvalidInstructionData), + } + } +} +``` + + + + +### Instruction Handlers + +Instruction handlers refer to the functions that contain the business logic for +each instruction. It's common to name handler functions as +`process_`, but you're free to choose any naming convention. + +Add the following code to `lib.rs`. This code uses the `CounterInstruction` enum +and `unpack` method defined in the previous step to route incoming instructions +to the appropriate handler functions: + +```rs title="lib.rs" {8-17} {20-32} /process_initialize_counter/1 /process_increment_counter/1 +entrypoint!(process_instruction); + +pub fn process_instruction( + program_id: &Pubkey, + accounts: &[AccountInfo], + instruction_data: &[u8], +) -> ProgramResult { + // Unpack instruction data + let instruction = CounterInstruction::unpack(instruction_data)?; + + // Match instruction type + match instruction { + CounterInstruction::InitializeCounter { initial_value } => { + process_initialize_counter(program_id, accounts, initial_value)? + } + CounterInstruction::IncrementCounter => process_increment_counter(program_id, accounts)?, + }; +} + +fn process_initialize_counter( + program_id: &Pubkey, + accounts: &[AccountInfo], + initial_value: u64, +) -> ProgramResult { + // Implementation details... + Ok(()) +} + +fn process_increment_counter(program_id: &Pubkey, accounts: &[AccountInfo]) -> ProgramResult { + // Implementation details... + Ok(()) +} +``` + +Next, add the implementation of the `process_initialize_counter` function. This +instruction handler: + +1. Creates and allocates space for a new account to store the counter data +2. Initializing the account data with `initial_value` passed to the instruction + + + + +The `process_initialize_counter` function requires three accounts: + +1. The counter account that will be created and initialized +2. The payer account that will fund the new account creation +3. The System Program that we invoke to create the new account + +To define the accounts required by the instruction, we create an iterator over +the `accounts` slice and use the `next_account_info` function to get each +account. The number of accounts you define are the accounts required by the +instruction. + +The order of accounts is important - when building the instruction on the client +side, accounts must be provided in the same order as it is defined in the +program for the instruction to execute successfully. + +While the variable names for the accounts have no effect on the program's +functionality, using descriptive names is recommended. + +```rs title="lib.rs" {6-10} +fn process_initialize_counter( + program_id: &Pubkey, + accounts: &[AccountInfo], + initial_value: u64, +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + let counter_account = next_account_info(accounts_iter)?; + let payer_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + Ok(()) +} +``` + +Before creating an account, we need to: + +1. Specify the space (in bytes) to allocate to the account's data field. Since + we're storing a u64 value (`count`), we need 8 bytes. + +2. Calculate the minimum "rent" balance required. On Solana, accounts must + maintain a minimum balance of lamports (rent) based on amount of data stored + on the account. + +```rs title="lib.rs" {12-17} +fn process_initialize_counter( + program_id: &Pubkey, + accounts: &[AccountInfo], + initial_value: u64, +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + let counter_account = next_account_info(accounts_iter)?; + let payer_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + // Size of our counter account + let account_space = 8; // Size in bytes to store a u64 + + // Calculate minimum balance for rent exemption + let rent = Rent::get()?; + let required_lamports = rent.minimum_balance(account_space); + + Ok(()) +} +``` + +Once the space is defined and rent is calculated, create the account by invoking +the System Program's `create_account` instruction. + +On Solana, new accounts can only be created by the System Program. When creating +an account, we specify the amount of bytes to allocate and the program owner of +the new account. The System Program: + +1. Creates the new account +2. Allocates the specified space for the account's data field +3. Transfers ownership to the specified program + +This ownership transfer is important because only the program owner of an +account can modify an account's data. In this case, we set our program as the +owner, which will allow us to modify the account's data to store the counter +value. + +To invoke the System Program from our program's instruction, we make a Cross +Program Invocation (CPI) via the `invoke` function. A CPI allows one program to +call instructions on other programs - in this case, the System Program's +`create_account` instruction. + +```rs title="lib.rs" {19-33} +fn process_initialize_counter( + program_id: &Pubkey, + accounts: &[AccountInfo], + initial_value: u64, +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + let counter_account = next_account_info(accounts_iter)?; + let payer_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + // Size of our counter account + let account_space = 8; // Size in bytes to store a u64 + + // Calculate minimum balance for rent exemption + let rent = Rent::get()?; + let required_lamports = rent.minimum_balance(account_space); + + // Create the counter account + invoke( + &system_instruction::create_account( + payer_account.key, // Account paying for the new account + counter_account.key, // Account to be created + required_lamports, // Amount of lamports to transfer to the new account + account_space as u64, // Size in bytes to allocate for the data field + program_id, // Set program owner to our program + ), + &[ + payer_account.clone(), + counter_account.clone(), + system_program.clone(), + ], + )?; + + Ok(()) +} +``` + +Once the account is created, we initialize the account data by: + +1. Creating a new `CounterAccount` struct with the `initial_value` provided to + the instruction. +2. Getting a mutable reference to the new account's data field. +3. Serializing the `CounterAccount` struct into the account's data field, + effectively storing the `initial_value` on the account. + +```rs title="lib.rs" {35-44} /initial_value/ +fn process_initialize_counter( + program_id: &Pubkey, + accounts: &[AccountInfo], + initial_value: u64, +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + let counter_account = next_account_info(accounts_iter)?; + let payer_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + // Size of our counter account + let account_space = 8; // Size in bytes to store a u64 + + // Calculate minimum balance for rent exemption + let rent = Rent::get()?; + let required_lamports = rent.minimum_balance(account_space); + + // Create the counter account + invoke( + &system_instruction::create_account( + payer_account.key, // Account paying for the new account + counter_account.key, // Account to be created + required_lamports, // Amount of lamports to transfer to the new account + account_space as u64, // Size in bytes to allocate for the data field + program_id, // Set program owner to our program + ), + &[ + payer_account.clone(), + counter_account.clone(), + system_program.clone(), + ], + )?; + + // Create a new CounterAccount struct with the initial value + let counter_data = CounterAccount { + count: initial_value, + }; + + // Get a mutable reference to the counter account's data + let mut account_data = &mut counter_account.data.borrow_mut()[..]; + + // Serialize the CounterAccount struct into the account's data + counter_data.serialize(&mut account_data)?; + + msg!("Counter initialized with value: {}", initial_value); + + Ok(()) +} +``` + + + + +```rs title="lib.rs" +// Initialize a new counter account +fn process_initialize_counter( + program_id: &Pubkey, + accounts: &[AccountInfo], + initial_value: u64, +) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + + let counter_account = next_account_info(accounts_iter)?; + let payer_account = next_account_info(accounts_iter)?; + let system_program = next_account_info(accounts_iter)?; + + // Size of our counter account + let account_space = 8; // Size in bytes to store a u64 + + // Calculate minimum balance for rent exemption + let rent = Rent::get()?; + let required_lamports = rent.minimum_balance(account_space); + + // Create the counter account + invoke( + &system_instruction::create_account( + payer_account.key, // Account paying for the new account + counter_account.key, // Account to be created + required_lamports, // Amount of lamports to transfer to the new account + account_space as u64, // Size in bytes to allocate for the data field + program_id, // Set program owner to our program + ), + &[ + payer_account.clone(), + counter_account.clone(), + system_program.clone(), + ], + )?; + + // Create a new CounterAccount struct with the initial value + let counter_data = CounterAccount { + count: initial_value, + }; + + // Get a mutable reference to the counter account's data + let mut account_data = &mut counter_account.data.borrow_mut()[..]; + + // Serialize the CounterAccount struct into the account's data + counter_data.serialize(&mut account_data)?; + + msg!("Counter initialized with value: {}", initial_value); + + Ok(()) +} +``` + +Next, add the implementation of the `process_increment_counter` function. This +instruction increments the value of an existing counter account. + + + + +Just like the `process_initialize_counter` function, we start by creating an +iterator over the accounts. In this case, we are only expecting one account, +which is the account to be updated. + +Note that in practice, a developer must implement various security checks to +validate the accounts passed to the program. Since all accounts are provided by +the caller of the instruction, there is no guarantee that the accounts provided +are the ones the program expects. Missing account validation checks are a common +source of program vulnerabilities. + +The example below includes a check to ensure the account we're referring to as +the `counter_account` is owned by the executing program. + +```rs title="lib.rs" {6-9} +// Update an existing counter's value +fn process_increment_counter(program_id: &Pubkey, accounts: &[AccountInfo]) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + let counter_account = next_account_info(accounts_iter)?; + + // Verify account ownership + if counter_account.owner != program_id { + return Err(ProgramError::IncorrectProgramId); + } + + Ok(()) +} +``` + +To update the account data, we: + +- Mutably borrow the existing account's data field +- Deserialize the raw bytes into our `CounterAccount` struct +- Update the `count` value +- Serialize the modified struct back into the account's data field + +```rs title="lib.rs" {11-24} +// Update an existing counter's value +fn process_increment_counter(program_id: &Pubkey, accounts: &[AccountInfo]) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + let counter_account = next_account_info(accounts_iter)?; + + // Verify account ownership + if counter_account.owner != program_id { + return Err(ProgramError::IncorrectProgramId); + } + + // Mutable borrow the account data + let mut data = counter_account.data.borrow_mut(); + + // Deserialize the account data into our CounterAccount struct + let mut counter_data: CounterAccount = CounterAccount::try_from_slice(&data)?; + + // Increment the counter value + counter_data.count = counter_data + .count + .checked_add(1) + .ok_or(ProgramError::InvalidAccountData)?; + + // Serialize the updated counter data back into the account + counter_data.serialize(&mut &mut data[..])?; + + msg!("Counter incremented to: {}", counter_data.count); + Ok(()) +} +``` + + + + +```rs title="lib.rs" +// Update an existing counter's value +fn process_increment_counter(program_id: &Pubkey, accounts: &[AccountInfo]) -> ProgramResult { + let accounts_iter = &mut accounts.iter(); + let counter_account = next_account_info(accounts_iter)?; + + // Verify account ownership + if counter_account.owner != program_id { + return Err(ProgramError::IncorrectProgramId); + } + + // Mutable borrow the account data + let mut data = counter_account.data.borrow_mut(); + + // Deserialize the account data into our CounterAccount struct + let mut counter_data: CounterAccount = CounterAccount::try_from_slice(&data)?; + + // Increment the counter value + counter_data.count = counter_data + .count + .checked_add(1) + .ok_or(ProgramError::InvalidAccountData)?; + + // Serialize the updated counter data back into the account + counter_data.serialize(&mut &mut data[..])?; + + msg!("Counter incremented to: {}", counter_data.count); + Ok(()) +} +``` + + + + +### Instruction Testing + +To test the program instructions, add the following dependencies to +`Cargo.toml`. + +```shell title="Terminal" +cargo add solana-program-test@1.18.26 --dev +cargo add solana-sdk@1.18.26 --dev +cargo add tokio --dev +``` + +Then add the following test module to `lib.rs` and run `cargo test-sbf` to +execute the tests. Optionally, use the `--nocapture` flag to see the print +statements in the output. + +```shell title="Terminal" +cargo test-sbf -- --nocapture +``` + + + + +First, set up the test module and import required dependencies: + +```rs title="lib.rs" +#[cfg(test)] +mod test { + use super::*; + use solana_program_test::*; + use solana_sdk::{ + instruction::{AccountMeta, Instruction}, + signature::{Keypair, Signer}, + system_program, + transaction::Transaction, + }; + + #[tokio::test] + async fn test_counter_program() { + // Test code will go here + } +} +``` + +Next, set up the test using `ProgramTest`. Then create a new keypair to use as +the address for the counter account we'll initialize and define an initial value +to set for the counter. + +```rs title="lib.rs" +#[cfg(test)] +mod test { + use super::*; + use solana_program_test::*; + use solana_sdk::{ + instruction::{AccountMeta, Instruction}, + signature::{Keypair, Signer}, + system_program, + transaction::Transaction, + }; + + #[tokio::test] + async fn test_counter_program() { + let program_id = Pubkey::new_unique(); + let (mut banks_client, payer, recent_blockhash) = ProgramTest::new( + "counter_program", + program_id, + processor!(process_instruction), + ) + .start() + .await; + + // Create a new keypair to use as the address for our counter account + let counter_keypair = Keypair::new(); + let initial_value: u64 = 42; + } +} +``` + +When building an instruction, each account must be provided as an +[`AccountMeta`](https://github.com/solana-labs/solana/blob/v2.0/sdk/program/src/instruction.rs#L539-L545), +which specifies: + +- The account's public key (`Pubkey`) +- `is_writable`: Whether the account data will be modified +- `is_signer`: Whether the account must sign the transaction + +```rs +AccountMeta::new(account1_pubkey, true), // writable, signer +AccountMeta::new(account2_pubkey, false), // writable, not signer +AccountMeta::new_readonly(account3_pubkey, false), // not writable, not signer +AccountMeta::new_readonly(account4_pubkey, true), // writable, signer +``` + +To test the initialize instruction: + +- Create instruction data with variant 0 (`InitializeCounter`) and initial value +- Build the instruction with the program ID, instruction data, and required + accounts +- Send a transaction with the initialize instruction +- Check the account was created with the correct initial value + +```rs title="lib.rs" {16-53} + #[tokio::test] + async fn test_counter_program() { + let program_id = Pubkey::new_unique(); + let (mut banks_client, payer, recent_blockhash) = ProgramTest::new( + "counter_program", + program_id, + processor!(process_instruction), + ) + .start() + .await; + + // Create a new keypair to use as the address for our counter account + let counter_keypair = Keypair::new(); + let initial_value: u64 = 42; + + // Step 1: Initialize the counter + println!("Testing counter initialization..."); + + // Create initialization instruction + let mut init_instruction_data = vec![0]; // 0 = initialize instruction + init_instruction_data.extend_from_slice(&initial_value.to_le_bytes()); + + let initialize_instruction = Instruction::new_with_bytes( + program_id, + &init_instruction_data, + vec![ + AccountMeta::new(counter_keypair.pubkey(), true), + AccountMeta::new(payer.pubkey(), true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + // Send transaction with initialize instruction + let mut transaction = + Transaction::new_with_payer(&[initialize_instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer, &counter_keypair], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); + + // Check account data + let account = banks_client + .get_account(counter_keypair.pubkey()) + .await + .expect("Failed to get counter account"); + + if let Some(account_data) = account { + let counter: CounterAccount = CounterAccount::try_from_slice(&account_data.data) + .expect("Failed to deserialize counter data"); + assert_eq!(counter.count, 42); + println!( + "✅ Counter initialized successfully with value: {}", + counter.count + ); + } + } +``` + +To test the increment instruction: + +- Build the instruction with the program ID, instruction data, and required + accounts +- Send a transaction with the increment instruction +- Check the account was incremented to the correct value + +Note that the instruction data for the increment instruction is `[1]`, which +corresponds to variant 1 (`IncrementCounter`). Since there are no additional +parameters to the increment instruction, the data is simply the instruction +variant. + +```rs title="lib.rs" {55-82} + #[tokio::test] + async fn test_counter_program() { + let program_id = Pubkey::new_unique(); + let (mut banks_client, payer, recent_blockhash) = ProgramTest::new( + "counter_program", + program_id, + processor!(process_instruction), + ) + .start() + .await; + + // Create a new keypair to use as the address for our counter account + let counter_keypair = Keypair::new(); + let initial_value: u64 = 42; + + // Step 1: Initialize the counter + println!("Testing counter initialization..."); + + // Create initialization instruction + let mut init_instruction_data = vec![0]; // 0 = initialize instruction + init_instruction_data.extend_from_slice(&initial_value.to_le_bytes()); + + let initialize_instruction = Instruction::new_with_bytes( + program_id, + &init_instruction_data, + vec![ + AccountMeta::new(counter_keypair.pubkey(), true), + AccountMeta::new(payer.pubkey(), true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + // Send transaction with initialize instruction + let mut transaction = + Transaction::new_with_payer(&[initialize_instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer, &counter_keypair], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); + + // Check account data + let account = banks_client + .get_account(counter_keypair.pubkey()) + .await + .expect("Failed to get counter account"); + + if let Some(account_data) = account { + let counter: CounterAccount = CounterAccount::try_from_slice(&account_data.data) + .expect("Failed to deserialize counter data"); + assert_eq!(counter.count, 42); + println!( + "✅ Counter initialized successfully with value: {}", + counter.count + ); + } + + // Step 2: Increment the counter + println!("Testing counter increment..."); + + // Create increment instruction + let increment_instruction = Instruction::new_with_bytes( + program_id, + &[1], // 1 = increment instruction + vec![AccountMeta::new(counter_keypair.pubkey(), true)], + ); + + // Send transaction with increment instruction + let mut transaction = + Transaction::new_with_payer(&[increment_instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer, &counter_keypair], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); + + // Check account data + let account = banks_client + .get_account(counter_keypair.pubkey()) + .await + .expect("Failed to get counter account"); + + if let Some(account_data) = account { + let counter: CounterAccount = CounterAccount::try_from_slice(&account_data.data) + .expect("Failed to deserialize counter data"); + assert_eq!(counter.count, 43); + println!("✅ Counter incremented successfully to: {}", counter.count); + } + } +``` + + + + +```rs title="lib.rs" +#[cfg(test)] +mod test { + use super::*; + use solana_program_test::*; + use solana_sdk::{ + instruction::{AccountMeta, Instruction}, + signature::{Keypair, Signer}, + system_program, + transaction::Transaction, + }; + + #[tokio::test] + async fn test_counter_program() { + let program_id = Pubkey::new_unique(); + let (mut banks_client, payer, recent_blockhash) = ProgramTest::new( + "counter_program", + program_id, + processor!(process_instruction), + ) + .start() + .await; + + // Create a new keypair to use as the address for our counter account + let counter_keypair = Keypair::new(); + let initial_value: u64 = 42; + + // Step 1: Initialize the counter + println!("Testing counter initialization..."); + + // Create initialization instruction + let mut init_instruction_data = vec![0]; // 0 = initialize instruction + init_instruction_data.extend_from_slice(&initial_value.to_le_bytes()); + + let initialize_instruction = Instruction::new_with_bytes( + program_id, + &init_instruction_data, + vec![ + AccountMeta::new(counter_keypair.pubkey(), true), + AccountMeta::new(payer.pubkey(), true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + // Send transaction with initialize instruction + let mut transaction = + Transaction::new_with_payer(&[initialize_instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer, &counter_keypair], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); + + // Check account data + let account = banks_client + .get_account(counter_keypair.pubkey()) + .await + .expect("Failed to get counter account"); + + if let Some(account_data) = account { + let counter: CounterAccount = CounterAccount::try_from_slice(&account_data.data) + .expect("Failed to deserialize counter data"); + assert_eq!(counter.count, 42); + println!( + "✅ Counter initialized successfully with value: {}", + counter.count + ); + } + + // Step 2: Increment the counter + println!("Testing counter increment..."); + + // Create increment instruction + let increment_instruction = Instruction::new_with_bytes( + program_id, + &[1], // 1 = increment instruction + vec![AccountMeta::new(counter_keypair.pubkey(), true)], + ); + + // Send transaction with increment instruction + let mut transaction = + Transaction::new_with_payer(&[increment_instruction], Some(&payer.pubkey())); + transaction.sign(&[&payer, &counter_keypair], recent_blockhash); + banks_client.process_transaction(transaction).await.unwrap(); + + // Check account data + let account = banks_client + .get_account(counter_keypair.pubkey()) + .await + .expect("Failed to get counter account"); + + if let Some(account_data) = account { + let counter: CounterAccount = CounterAccount::try_from_slice(&account_data.data) + .expect("Failed to deserialize counter data"); + assert_eq!(counter.count, 43); + println!("✅ Counter incremented successfully to: {}", counter.count); + } + } +} +``` + +Example output: + +```shell title="Terminal" {6} {10} +running 1 test +[2024-10-29T20:51:13.783708000Z INFO solana_program_test] "counter_program" SBF program from /counter_program/target/deploy/counter_program.so, modified 2 seconds, 169 ms, 153 µs and 461 ns ago +[2024-10-29T20:51:13.855204000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM invoke [1] +[2024-10-29T20:51:13.856052000Z DEBUG solana_runtime::message_processor::stable_log] Program 11111111111111111111111111111111 invoke [2] +[2024-10-29T20:51:13.856135000Z DEBUG solana_runtime::message_processor::stable_log] Program 11111111111111111111111111111111 success +[2024-10-29T20:51:13.856242000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Counter initialized with value: 42 +[2024-10-29T20:51:13.856285000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM consumed 3791 of 200000 compute units +[2024-10-29T20:51:13.856307000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM success +[2024-10-29T20:51:13.860038000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM invoke [1] +[2024-10-29T20:51:13.860333000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Counter incremented to: 43 +[2024-10-29T20:51:13.860355000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM consumed 756 of 200000 compute units +[2024-10-29T20:51:13.860375000Z DEBUG solana_runtime::message_processor::stable_log] Program 1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM success +test test::test_counter_program ... ok + +test result: ok. 1 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; finished in 0.08s +``` + + + + diff --git a/content/docs/programs/testing.mdx b/content/docs/programs/testing.mdx new file mode 100644 index 000000000..dfa8c9167 --- /dev/null +++ b/content/docs/programs/testing.mdx @@ -0,0 +1,234 @@ +--- +title: "Testing with NodeJS" +description: "Testing native solana programs written with rust using NodeJS" +--- + +When developing programs on Solana, ensuring their correctness and reliability +is crucial. Until now devs have been using `solana-test-validator` for testing. +This document covers testing your Solana program with Node.js +using `solana-bankrun`. + +## Overview + +There are two ways to test programs on Solana: + +1. [solana-test-validator](https://docs.anza.xyz/cli/examples/test-validator): + That spins up a local emulator of the Solana Blockchain on your local machine + which receives the transactions to be processed by the validator. +2. The various + [BanksClient-based](https://docs.rs/solana-banks-client/latest/solana_banks_client/) + test frameworks for SBF (Solana Bytecode Format) programs: Bankrun is a + framework that simulates a Solana bank's operations, enabling developers to + deploy, interact with, and assess the behavior of programs under test + conditions that mimic the mainnet. It helps set up the test environment and + offers tools for detailed transaction insights, enhancing debugging and + verification. With the client, we can load programs, and simulate and process + transactions seamlessly. + [solana-program-test](https://docs.rs/solana-program-test) (Rust), + [solana-bankrun](https://github.com/kevinheavey/solana-bankrun) (Rust, + JavaScript), [anchor-bankrun](https://www.npmjs.com/package/anchor-bankrun) + (Anchor, JavaScript), + [solders.bankrun](https://kevinheavey.github.io/solders/api_reference/bankrun.html) + (Python) are examples of the BanksClient-based testing framework. + +> [`pnpm create solana-program`](https://github.com/solana-program/create-solana-program) +> can help you generate JS and Rust clients including tests. Anchor is not yet +> supported. + +In this guide, we are using Solana Bankrun. `Bankrun` is a superfast, powerful, +and lightweight framework for testing Solana programs in Node.js. + +- The biggest advantage of using Solana Bankrun is that you don't have to set + up + an environment to test programs like you'd have to do while using the + `solana-test-validator`. Instead, you can do that with a piece of code, + inside + the tests. +- It also dynamically sets time and account data, which isn't possible with + `solana-test-validator` + +## Installation + +Add `solana-bankrun` as a dev dependency to your node project. If your Solana +program is not a node project yet, you can initialize it using `npm init`. + +```bash +npm i -D solana-bankrun +``` + +## Usage + +### Program Directory + +Firstly, the program's `.so` file must be present in one of the following +directories: + +- `./tests/fixtures` (just create this directory if it doesn't exist already). +- Your current working directory. +- A directory you define in the `BPF_OUT_DIR` or `SBF_OUT_DIR` environment + variables. `export BPF_OUT_DIR='/path/to/binary'` +- Build your program specifying the correct directory so that library can pick + the file up from directory just from the name. + `cargo build-sbf --manifest-path=./program/Cargo.toml --sbf-out-dir=./tests/fixtures` + +### Testing Framework + +solana-bankrun is used in JavaScript or TypeScript with testing frameworks like +[ts-mocha](https://www.npmjs.com/package/ts-mocha), +[ava](https://github.com/avajs/ava), [Jest](https://jestjs.io/), +etc. Make sure to get started with any of the above. + +Add an [npm script](https://docs.npmjs.com/cli/v9/using-npm/scripts) to test +your program and create your `test.ts` file inside `tests` folder. + +```json +{ + "scripts": { + "test": "pnpm ts-mocha -p ./tsconfig.json -t 1000000 ./tests/test.ts" + } +} +``` + +### Start + +`start` function from `solana-bankrun` spins up a BanksServer and a BanksClient, +deploy programs and add accounts as instructed. + +```typescript +import { start } from "solana-bankrun"; +import { PublicKey } from "@solana/web3.js"; + +test("testing program instruction", async () => { + const programId = PublicKey.unique(); + const context = await start([{ name: "program_name", programId }], []); + + const client = context.banksClient; + const payer = context.payer; + // write tests +}); +``` + +### Bankrun `context` + +- We get access to the Bankrun `context` from the `start` function. Context + contains a BanksClient, a recent blockhash and a funded payer keypair. +- `context` has a `payer`, which is a funded keypair that can be used to sign + transactions. +- `context` also has `context.lastBlockhash` or `context.getLatestBlockhash` to + make fetching [Blockhash](/docs/terminology#blockhash) + convenient during tests. +- `context.banksClient` is used to send transactions and query account data from + the ledger state. For example, sometimes + [Rent](/docs/terminology#rent) (in lamports) is + required to build a transaction to be submitted, for example, when using the + SystemProgram's + createAccount() instruction. You can do that using BanksClient: + + ```typescript + const rent = await client.getRent(); + + const Ix: TransactionInstruction = SystemProgram.createAccount({ + // ... + lamports: Number(rent.minimumBalance(BigInt(ACCOUNT_SIZE))), + //.... + }); + ``` + +- You can read account data from BanksClient using `getAccount` function + ```typescript + AccountInfo = await client.getAccount(counter); + ``` + +### Process Transaction + +The `processTransaction()` function executes the transaction with the loaded +programs +and accounts from the start function and will return a transaction. + +```typescript +let transaction = await client.processTransaction(tx); +``` + +## Example + +Here's an example to write test for +a [hello world program](https://github.com/solana-developers/program-examples/tree/main/basics/hello-solana/native) : + +```typescript +import { + PublicKey, + Transaction, + TransactionInstruction, +} from "@solana/web3.js"; +import { start } from "solana-bankrun"; +import { describe, test } from "node:test"; +import { assert } from "chai"; + +describe("hello-solana", async () => { + // load program in solana-bankrun + const PROGRAM_ID = PublicKey.unique(); + const context = await start( + [{ name: "hello_solana_program", programId: PROGRAM_ID }], + [], + ); + const client = context.banksClient; + const payer = context.payer; + + test("Say hello!", async () => { + const blockhash = context.lastBlockhash; + // We set up our instruction first. + let ix = new TransactionInstruction({ + // using payer keypair from context to sign the txn + keys: [{ pubkey: payer.publicKey, isSigner: true, isWritable: true }], + programId: PROGRAM_ID, + data: Buffer.alloc(0), // No data + }); + + const tx = new Transaction(); + tx.recentBlockhash = blockhash; + // using payer keypair from context to sign the txn + tx.add(ix).sign(payer); + + // Now we process the transaction + let transaction = await client.processTransaction(tx); + + assert(transaction.logMessages[0].startsWith("Program " + PROGRAM_ID)); + assert(transaction.logMessages[1] === "Program log: Hello, Solana!"); + assert( + transaction.logMessages[2] === + "Program log: Our program's Program ID: " + PROGRAM_ID, + ); + assert( + transaction.logMessages[3].startsWith( + "Program " + PROGRAM_ID + " consumed", + ), + ); + assert(transaction.logMessages[4] === "Program " + PROGRAM_ID + " success"); + assert(transaction.logMessages.length == 5); + }); +}); +``` + +This is how the output looks like after running the tests for +[hello world program](https://github.com/solana-developers/program-examples/tree/main/basics/hello-solana/native). + +```text +[2024-06-04T12:57:36.188822000Z INFO solana_program_test] "hello_solana_program" SBF program from tests/fixtures/hello_solana_program.so, modified 3 seconds, 20 ms, 687 µs and 246 ns ago +[2024-06-04T12:57:36.246838000Z DEBUG solana_runtime::message_processor::stable_log] Program 11111111111111111111111111111112 invoke [1] +[2024-06-04T12:57:36.246892000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Hello, Solana! +[2024-06-04T12:57:36.246917000Z DEBUG solana_runtime::message_processor::stable_log] Program log: Our program's Program ID: 11111111111111111111111111111112 +[2024-06-04T12:57:36.246932000Z DEBUG solana_runtime::message_processor::stable_log] Program 11111111111111111111111111111112 consumed 2905 of 200000 compute units +[2024-06-04T12:57:36.246937000Z DEBUG solana_runtime::message_processor::stable_log] Program 11111111111111111111111111111112 success +▶ hello-solana + ✔ Say hello! (5.667917ms) +▶ hello-solana (7.047667ms) + +ℹ tests 1 +ℹ suites 1 +ℹ pass 1 +ℹ fail 0 +ℹ cancelled 0 +ℹ skipped 0 +ℹ todo 0 +ℹ duration_ms 63.52616 +``` diff --git a/content/docs/rpc/deprecated/confirmTransaction.mdx b/content/docs/rpc/deprecated/confirmTransaction.mdx new file mode 100644 index 000000000..9ba54ef9c --- /dev/null +++ b/content/docs/rpc/deprecated/confirmTransaction.mdx @@ -0,0 +1,13 @@ +--- +title: confirmTransaction +hideTableOfContents: true +h1: confirmTransaction RPC Method +--- + +Fetch the current status of a transaction signature (processed, confirmed, +finalized). + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getSignatureStatuses](/docs/rpc/http/getsignaturestatuses) instead. + diff --git a/content/docs/rpc/deprecated/getConfirmedBlock.mdx b/content/docs/rpc/deprecated/getConfirmedBlock.mdx new file mode 100644 index 000000000..4512b4aaa --- /dev/null +++ b/content/docs/rpc/deprecated/getConfirmedBlock.mdx @@ -0,0 +1,202 @@ +--- +title: getConfirmedBlock +hideTableOfContents: true +altRoutes: + - /docs/rpc/getConfirmedBlock + - /docs/rpc/http/getConfirmedBlock +h1: getConfirmedBlock RPC Method +--- + +Returns identity and transaction information about a confirmed block in the +ledger + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getBlock](/docs/rpc/http/getblock) instead. + + + + + + +### Parameters + + + slot number, as u64 integer + + + + +Configuration object containing the following fields: + + + + + level of transaction detail to return, either "full", "signatures", or "none" + + + + whether to populate the `rewards` array. + + + + +Encoding format for Account data + + + +
+ +- `jsonParsed` encoding attempts to use program-specific instruction parsers to + return more human-readable and explicit data in the + `transaction.message.instructions` list. +- If `jsonParsed` is requested but a parser cannot be found, the instruction + falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` + fields). + +
+ +
+ +
+ +### Result + +The result field will be an object with the following fields: + +- `` - if specified block is not confirmed +- `` - if block is confirmed, an object with the following fields: + - `blockhash: ` - the blockhash of this block, as base-58 encoded + string + - `previousBlockhash: ` - the blockhash of this block's parent, as + base-58 encoded string; if the parent block is not available due to ledger + cleanup, this field will return "11111111111111111111111111111111" + - `parentSlot: ` - the slot index of this block's parent + - `transactions: ` - present if "full" transaction details are + requested; an array of JSON objects containing: + - `transaction: ` - + [Transaction](/docs/rpc/json-structures#transactions) object, either in + JSON format or encoded binary data, depending on encoding parameter + - `meta: ` - transaction status metadata object, containing `null` + or: + - `err: ` - Error if transaction failed, null if transaction + succeeded. + [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) + - `fee: ` - fee this transaction was charged, as u64 integer + - `preBalances: ` - array of u64 account balances from before the + transaction was processed + - `postBalances: ` - array of u64 account balances after the + transaction was processed + - `innerInstructions: ` - List of + [inner instructions](/docs/rpc/json-structures#inner-instructions) or + `null` if inner instruction recording was not enabled during this + transaction + - `preTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from before + the transaction was processed or omitted if token balance recording was + not yet enabled during this transaction + - `postTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from after + the transaction was processed or omitted if token balance recording was + not yet enabled during this transaction + - `logMessages: ` - array of string log messages or `null` if + log message recording was not enabled during this transaction + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError + - `signatures: ` - present if "signatures" are requested for + transaction details; an array of signatures strings, corresponding to the + transaction order in the block + - `rewards: ` - present if rewards are requested; an array of JSON + objects containing: + - `pubkey: ` - The public key, as base-58 encoded string, of the + account that received the reward + - `lamports: `- number of reward lamports credited or debited by the + account, as a i64 + - `postBalance: ` - account balance in lamports after the reward was + applied + - `rewardType: ` - type of reward: "fee", "rent", + "voting", "staking" + - `commission: ` - vote account commission when the reward was + credited, only present for voting and staking rewards + - `blockTime: ` - estimated production time, as Unix timestamp + (seconds since the Unix epoch). null if not available + +#### For more details on returned data: + +- [Transaction Structure](/docs/rpc/json-structures#transactions) +- [Inner Instructions Structure](/docs/rpc/json-structures#inner-instructions) +- [Token Balances Structure](/docs/rpc/json-structures#token-balances) + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getConfirmedBlock", + "params": [430, "base64"] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "blockTime": null, + "blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA", + "parentSlot": 429, + "previousBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B", + "rewards": [], + "transactions": [ + { + "meta": { + "err": null, + "fee": 5000, + "innerInstructions": [], + "logMessages": [], + "postBalances": [499998932500, 26858640, 1, 1, 1], + "postTokenBalances": [], + "preBalances": [499998937500, 26858640, 1, 1, 1], + "preTokenBalances": [], + "status": { + "Ok": null + } + }, + "transaction": [ + "AVj7dxHlQ9IrvdYVIjuiRFs1jLaDMHixgrv+qtHBwz51L4/ImLZhszwiyEJDIp7xeBSpm/TX5B7mYzxa+fPOMw0BAAMFJMJVqLw+hJYheizSoYlLm53KzgT82cDVmazarqQKG2GQsLgiqktA+a+FDR4/7xnDX7rsusMwryYVUdixfz1B1Qan1RcZLwqvxvJl4/t3zHragsUp0L47E24tAFUgAAAABqfVFxjHdMkoVmOYaR1etoteuKObS21cc1VbIQAAAAAHYUgdNXR0u3xNdiTr072z2DVec9EQQ/wNo1OAAAAAAAtxOUhPBp2WSjUNJEgfvy70BbxI00fZyEPvFHNfxrtEAQQEAQIDADUCAAAAAQAAAAAAAACtAQAAAAAAAAdUE18R96XTJCe+YfRfUp6WP+YKCy/72ucOL8AoBFSpAA==", + "base64" + ] + } + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getConfirmedBlocks.mdx b/content/docs/rpc/deprecated/getConfirmedBlocks.mdx new file mode 100644 index 000000000..6871d8eac --- /dev/null +++ b/content/docs/rpc/deprecated/getConfirmedBlocks.mdx @@ -0,0 +1,65 @@ +--- +title: getConfirmedBlocks +hideTableOfContents: true +altRoutes: + - /docs/rpc/getConfirmedBlocks + - /docs/rpc/http/getConfirmedBlocks +h1: getConfirmedBlocks RPC Method +--- + +Returns a list of confirmed blocks between two slots + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getBlocks](/docs/rpc/http/getblocks) instead. + + + + + + +### Parameters + + + start_slot, as u64 integer + + + + +Configuration object containing the following fields: + + + + + +### Result + +The result field will be an array of u64 integers listing confirmed blocks +between `start_slot` and either `end_slot` - if provided, or latest confirmed +block, inclusive. Max range allowed is 500,000 slots. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc": "2.0","id":1,"method":"getConfirmedBlocks","params":[5, 10]} +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": [5, 6, 7, 8, 9, 10], "id": 1 } +``` + + + diff --git a/content/docs/rpc/deprecated/getConfirmedBlocksWithLimit.mdx b/content/docs/rpc/deprecated/getConfirmedBlocksWithLimit.mdx new file mode 100644 index 000000000..00ac74755 --- /dev/null +++ b/content/docs/rpc/deprecated/getConfirmedBlocksWithLimit.mdx @@ -0,0 +1,72 @@ +--- +title: getConfirmedBlocksWithLimit +hideTableOfContents: true +altRoutes: + - /docs/rpc/getConfirmedBlocksWithLimit + - /docs/rpc/http/getConfirmedBlocksWithLimit +h1: getConfirmedBlocksWithLimit RPC Method +--- + +Returns a list of confirmed blocks starting at the given slot + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getBlocksWithLimit](/docs/rpc/http/getblockswithlimit) instead. + + + + + + +### Parameters + + + start_slot, as u64 integer + + + + limit, as u64 integer + + + + +Configuration object containing the following fields: + + + + + +### Result + +The result field will be an array of u64 integers listing confirmed blocks +starting at `start_slot` for up to `limit` blocks, inclusive. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getConfirmedBlocksWithLimit", + "params": [5, 3] + } +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": [5, 6, 7], "id": 1 } +``` + + + diff --git a/content/docs/rpc/deprecated/getConfirmedSignaturesForAddress2.mdx b/content/docs/rpc/deprecated/getConfirmedSignaturesForAddress2.mdx new file mode 100644 index 000000000..05e82d1e2 --- /dev/null +++ b/content/docs/rpc/deprecated/getConfirmedSignaturesForAddress2.mdx @@ -0,0 +1,120 @@ +--- +title: getConfirmedSignaturesForAddress2 +hideTableOfContents: true +altRoutes: + - /docs/rpc/getConfirmedSignaturesForAddress2 + - /docs/rpc/http/getConfirmedSignaturesForAddress2 + - /docs/rpc/getSignaturesForAddress2 + - /docs/rpc/http/getSignaturesForAddress2 + - /docs/rpc/deprecated/getSignaturesForAddress2 + - /docs/rpc/http/getconfirmedsignaturesforaddress + - /docs/rpc/deprecated/getconfirmedsignaturesforaddress + - /docs/rpc/getconfirmedsignaturesforaddress +h1: getConfirmedSignaturesForAddress2 RPC Method +--- + +Returns signatures for confirmed transactions that include the given address in +their `accountKeys` list. Returns signatures backwards in time from the provided +signature or most recent confirmed block + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getSignaturesForAddress](/docs/rpc/http/getsignaturesforaddress) instead. + + + + + + +### Parameters + + + account address, as base-58 encoded string + + + +Configuration object containing the following fields: + + + + + maximum transaction signatures to return (between 1 and 1,000, default: + 1,000). + + + + start searching backwards from this transaction signature. (If not provided + the search starts from the top of the highest max confirmed block.) + + + + search until this transaction signature, if found before limit reached. + + + + +### Result + +The result field will be an array of ``, ordered from newest to oldest +transaction, containing transaction signature information with the following +fields: + +- `signature: ` - transaction signature as base-58 encoded string +- `slot: ` - The slot that contains the block with the transaction +- `err: ` - Error if transaction failed, null if transaction + succeeded. + [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) +- `memo: ` - Memo associated with the transaction, null if no memo + is present +- `blockTime: ` - estimated production time, as Unix timestamp + (seconds since the Unix epoch) of when transaction was processed. null if not + available. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getConfirmedSignaturesForAddress2", + "params": [ + "Vote111111111111111111111111111111111111111", + { + "limit": 1 + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "err": null, + "memo": null, + "signature": "5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv", + "slot": 114, + "blockTime": null + } + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getConfirmedTransaction.mdx b/content/docs/rpc/deprecated/getConfirmedTransaction.mdx new file mode 100644 index 000000000..e899bb639 --- /dev/null +++ b/content/docs/rpc/deprecated/getConfirmedTransaction.mdx @@ -0,0 +1,153 @@ +--- +title: getConfirmedTransaction +hideTableOfContents: true +altRoutes: + - /docs/rpc/getConfirmedTransaction + - /docs/rpc/http/getConfirmedTransaction +h1: getConfirmedTransaction RPC Method +--- + +Returns transaction details for a confirmed transaction + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getTransaction](/docs/rpc/http/gettransaction) instead. + + + + + + +### Parameters + + + transaction signature, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +Encoding format for Account data + + + +
+ +- `base58` is slow and limited to less than 129 bytes of Account data. +- `jsonParsed` encoding attempts to use program-specific instruction parsers to + return more human-readable and explicit data in the + `transaction.message.instructions` list. +- If `jsonParsed` is requested but a parser cannot be found, the instruction + falls back to regular `json` encoding (`accounts`, `data`, and + `programIdIndex` fields). + +
+ +
+ +
+ +### Result + +- `` - if transaction is not found or not confirmed +- `` - if transaction is confirmed, an object with the following fields: + - `slot: ` - the slot this transaction was processed in + - `transaction: ` - + [Transaction](/docs/rpc/json-structures#transactions) object, either in JSON + format or encoded binary data, depending on encoding parameter + - `blockTime: ` - estimated production time, as Unix timestamp + (seconds since the Unix epoch) of when the transaction was processed. null + if not available + - `meta: ` - transaction status metadata object: + - `err: ` - Error if transaction failed, null if transaction + succeeded. + [TransactionError definitions](https://docs.rs/solana-sdk/latest/solana_sdk/transaction/enum.TransactionError.html) + - `fee: ` - fee this transaction was charged, as u64 integer + - `preBalances: ` - array of u64 account balances from before the + transaction was processed + - `postBalances: ` - array of u64 account balances after the + transaction was processed + - `innerInstructions: ` - List of + [inner instructions](/docs/rpc/json-structures#inner-instructions) or + `null` if inner instruction recording was not enabled during this + transaction + - `preTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from before the + transaction was processed or omitted if token balance recording was not + yet enabled during this transaction + - `postTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from after the + transaction was processed or omitted if token balance recording was not + yet enabled during this transaction + - `logMessages: ` - array of string log messages or `null` if + log message recording was not enabled during this transaction + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getConfirmedTransaction", + "params": [ + "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv", + "base64" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "meta": { + "err": null, + "fee": 5000, + "innerInstructions": [], + "postBalances": [499998932500, 26858640, 1, 1, 1], + "postTokenBalances": [], + "preBalances": [499998937500, 26858640, 1, 1, 1], + "preTokenBalances": [], + "status": { + "Ok": null + } + }, + "slot": 430, + "transaction": [ + "AVj7dxHlQ9IrvdYVIjuiRFs1jLaDMHixgrv+qtHBwz51L4/ImLZhszwiyEJDIp7xeBSpm/TX5B7mYzxa+fPOMw0BAAMFJMJVqLw+hJYheizSoYlLm53KzgT82cDVmazarqQKG2GQsLgiqktA+a+FDR4/7xnDX7rsusMwryYVUdixfz1B1Qan1RcZLwqvxvJl4/t3zHragsUp0L47E24tAFUgAAAABqfVFxjHdMkoVmOYaR1etoteuKObS21cc1VbIQAAAAAHYUgdNXR0u3xNdiTr072z2DVec9EQQ/wNo1OAAAAAAAtxOUhPBp2WSjUNJEgfvy70BbxI00fZyEPvFHNfxrtEAQQEAQIDADUCAAAAAQAAAAAAAACtAQAAAAAAAAdUE18R96XTJCe+YfRfUp6WP+YKCy/72ucOL8AoBFSpAA==", + "base64" + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getFeeCalculatorForBlockhash.mdx b/content/docs/rpc/deprecated/getFeeCalculatorForBlockhash.mdx new file mode 100644 index 000000000..084a612f3 --- /dev/null +++ b/content/docs/rpc/deprecated/getFeeCalculatorForBlockhash.mdx @@ -0,0 +1,94 @@ +--- +title: getFeeCalculatorForBlockhash +hideTableOfContents: true +altRoutes: + - /docs/rpc/getFeeCalculatorForBlockhash + - /docs/rpc/http/getFeeCalculatorForBlockhash +h1: getFeeCalculatorForBlockhash RPC Method +--- + +Returns the fee calculator associated with the query blockhash, or `null` if the +blockhash has expired + + + This method is expected to be removed in `solana-core` v2.0. Please use + [isBlockhashValid](/docs/rpc/http/isblockhashvalid) or + [getFeeForMessage](/docs/rpc/http/getfeeformessage) instead. + + + + + + +### Parameters + + + query blockhash, as a base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to: + +- `` - if the query blockhash has expired; or +- `` - otherwise, a JSON object containing: + - `feeCalculator: ` - `FeeCalculator` object describing the cluster + fee rate at the queried blockhash + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getFeeCalculatorForBlockhash", + "params": [ + "GJxqhuxcgfn5Tcj6y3f8X4FeCDd2RQ6SnEMo1AAxrPRZ" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 221 + }, + "value": { + "feeCalculator": { + "lamportsPerSignature": 5000 + } + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getFeeRateGovernor.mdx b/content/docs/rpc/deprecated/getFeeRateGovernor.mdx new file mode 100644 index 000000000..85b1d6ab8 --- /dev/null +++ b/content/docs/rpc/deprecated/getFeeRateGovernor.mdx @@ -0,0 +1,74 @@ +--- +title: getFeeRateGovernor +hideTableOfContents: true +altRoutes: + - /docs/rpc/getFeeRateGovernor + - /docs/rpc/http/getFeeRateGovernor +h1: getFeeRateGovernor RPC Method +--- + +Returns the fee rate governor information from the root bank + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getFeeForMessage](/docs/rpc/http/getfeeformessage) instead. + + + + + + +### Parameters + +**None** + +### Result + +The result will be an RpcResponse JSON object with `value` equal to an `object` +with the following fields: + +- `burnPercent: ` - Percentage of fees collected to be destroyed +- `maxLamportsPerSignature: ` - Largest value `lamportsPerSignature` can + attain for the next slot +- `minLamportsPerSignature: ` - Smallest value `lamportsPerSignature` can + attain for the next slot +- `targetLamportsPerSignature: ` - Desired fee rate for the cluster +- `targetSignaturesPerSlot: ` - Desired signature rate for the cluster + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getFeeRateGovernor"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 54 + }, + "value": { + "feeRateGovernor": { + "burnPercent": 50, + "maxLamportsPerSignature": 100000, + "minLamportsPerSignature": 5000, + "targetLamportsPerSignature": 10000, + "targetSignaturesPerSlot": 20000 + } + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getFees.mdx b/content/docs/rpc/deprecated/getFees.mdx new file mode 100644 index 000000000..2920f2575 --- /dev/null +++ b/content/docs/rpc/deprecated/getFees.mdx @@ -0,0 +1,91 @@ +--- +title: getFees +hideTableOfContents: true +altRoutes: + - /docs/rpc/getFees + - /docs/rpc/http/getFees +h1: getFees RPC Method +--- + +Returns a recent block hash from the ledger, a fee schedule that can be used to +compute the cost of submitting a transaction using it, and the last slot in +which the blockhash will be valid. + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getFeeForMessage](/docs/rpc/http/getfeeformessage) instead. + + + + + + +### Parameters + + + Pubkey of account to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +### Result + +The result will be an RpcResponse JSON object with `value` set to a JSON object +with the following fields: + +- `blockhash: ` - a Hash as base-58 encoded string +- `feeCalculator: ` - FeeCalculator object, the fee schedule for this + block hash +- `lastValidSlot: ` - DEPRECATED - this value is inaccurate and should not + be relied upon +- `lastValidBlockHeight: ` - last + [block height](/docs/terminology#block-height) at which the blockhash will + be valid + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { "jsonrpc":"2.0", "id": 1, "method":"getFees"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 1 + }, + "value": { + "blockhash": "CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR", + "feeCalculator": { + "lamportsPerSignature": 5000 + }, + "lastValidSlot": 297, + "lastValidBlockHeight": 296 + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getRecentBlockhash.mdx b/content/docs/rpc/deprecated/getRecentBlockhash.mdx new file mode 100644 index 000000000..01ef9ecc7 --- /dev/null +++ b/content/docs/rpc/deprecated/getRecentBlockhash.mdx @@ -0,0 +1,85 @@ +--- +title: getRecentBlockhash +hideTableOfContents: true +altRoutes: + - /docs/rpc/http/getRecentBlockhash + - /docs/rpc/getRecentBlockhash +h1: getRecentBlockhash RPC Method +--- + +Returns a recent block hash from the ledger, and a fee schedule that can be used +to compute the cost of submitting a transaction using it. + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getLatestBlockhash](/docs/rpc/http/getlatestblockhash) instead. + + + + + + +### Parameters + + + Pubkey of account to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +### Result + +An RpcResponse containing a JSON object consisting of a string blockhash and +FeeCalculator JSON object. + +- `RpcResponse` - RpcResponse JSON object with `value` field set to a + JSON object including: +- `blockhash: ` - a Hash as base-58 encoded string +- `feeCalculator: ` - FeeCalculator object, the fee schedule for this + block hash + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getRecentBlockhash"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 1 + }, + "value": { + "blockhash": "CSymwgTNX1j3E4qhKfJAUE41nBWEwXufoYryPbkde5RR", + "feeCalculator": { + "lamportsPerSignature": 5000 + } + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getSignatureConfirmation.mdx b/content/docs/rpc/deprecated/getSignatureConfirmation.mdx new file mode 100644 index 000000000..de94d85f4 --- /dev/null +++ b/content/docs/rpc/deprecated/getSignatureConfirmation.mdx @@ -0,0 +1,13 @@ +--- +title: getSignatureConfirmation +hideTableOfContents: true +h1: getSignatureConfirmation RPC Method +--- + +Fetch the current status of a transaction signature (processed, confirmed, +finalized). + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getSignatureStatuses](/docs/rpc/http/getsignaturestatuses) instead. + diff --git a/content/docs/rpc/deprecated/getSignatureStatus.mdx b/content/docs/rpc/deprecated/getSignatureStatus.mdx new file mode 100644 index 000000000..6598af609 --- /dev/null +++ b/content/docs/rpc/deprecated/getSignatureStatus.mdx @@ -0,0 +1,13 @@ +--- +title: getSignatureStatus +hideTableOfContents: true +h1: getSignatureStatus RPC Method +--- + +Fetch the current status of a transaction signature (processed, confirmed, +finalized). + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getSignatureStatuses](/docs/rpc/http/getsignaturestatuses) instead. + diff --git a/content/docs/rpc/deprecated/getSnapshotSlot.mdx b/content/docs/rpc/deprecated/getSnapshotSlot.mdx new file mode 100644 index 000000000..5c00657e6 --- /dev/null +++ b/content/docs/rpc/deprecated/getSnapshotSlot.mdx @@ -0,0 +1,58 @@ +--- +title: getSnapshotSlot +hideTableOfContents: true +altRoutes: + - /docs/rpc/http/getSnapshotSlot + - /docs/rpc/getSnapshotSlot +h1: getSnapshotSlot RPC Method +--- + +Returns the highest slot that the node has a snapshot for + + + This method is expected to be removed in `solana-core` v2.0. Please use + [getHighestSnapshotSlot](/docs/rpc/http/gethighestsnapshotslot) instead. + + + + + + +### Parameters + +**None** + +### Result + +`` - Snapshot slot + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getSnapshotSlot"} +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 100, "id": 1 } +``` + +Result when the node has no snapshot: + +```json +{ + "jsonrpc": "2.0", + "error": { "code": -32008, "message": "No snapshot" }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/getStakeActivation.mdx b/content/docs/rpc/deprecated/getStakeActivation.mdx new file mode 100644 index 000000000..8618c90f1 --- /dev/null +++ b/content/docs/rpc/deprecated/getStakeActivation.mdx @@ -0,0 +1,97 @@ +--- +title: getStakeActivation +hideTableOfContents: true +altRoutes: + - /docs/rpc/http/getStakeActivation +h1: getStakeActivation RPC Method +--- + +Returns epoch activation information for a stake account + + + This method is expected to be removed in `solana-core` v2.0. Please use [this + alternative + approach](https://github.com/solana-developers/solana-rpc-get-stake-activation) + instead. + + + + + + +### Parameters + + + Pubkey of stake Account to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + epoch for which to calculate activation details. If parameter not provided, + defaults to current epoch. **DEPRECATED**, inputs other than the current epoch + return an error. + + + + +### Result + +The result will be a JSON object with the following fields: + +- `state: ` - the stake account's activation state, either: `active`, + `inactive`, `activating`, or `deactivating` +- `active: ` - stake active during the epoch +- `inactive: ` - stake inactive during the epoch + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getStakeActivation", + "params": [ + "CYRJWqiSjLitBAcRxPvWpgX3s5TvmN2SuRY3eEYypFvT", + { + "epoch": 4 + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "active": 124429280, + "inactive": 73287840, + "state": "activating" + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/deprecated/meta.json b/content/docs/rpc/deprecated/meta.json new file mode 100644 index 000000000..1d50209ee --- /dev/null +++ b/content/docs/rpc/deprecated/meta.json @@ -0,0 +1,19 @@ +{ + "title": "Deprecated Methods", + "pages": [ + "confirmTransaction", + "getConfirmedBlock", + "getConfirmedBlocks", + "getConfirmedBlocksWithLimit", + "getConfirmedSignaturesForAddress2", + "getConfirmedTransaction", + "getFeeCalculatorForBlockhash", + "getFeeRateGovernor", + "getFees", + "getRecentBlockhash", + "getSignatureConfirmation", + "getSignatureStatus", + "getSnapshotSlot", + "getStakeActivation" + ] +} diff --git a/content/docs/rpc/http/getBalance.mdx b/content/docs/rpc/http/getBalance.mdx new file mode 100644 index 000000000..9ec32868c --- /dev/null +++ b/content/docs/rpc/http/getBalance.mdx @@ -0,0 +1,72 @@ +--- +title: getBalance +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBalance +h1: getBalance RPC Method +--- + +Returns the lamport balance of the account of provided Pubkey + + + + + +### Parameters + + + Pubkey of account to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +`RpcResponse` - RpcResponse JSON object with `value` field set to the +balance + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getBalance", + "params": [ + "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { "context": { "slot": 1 }, "value": 0 }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getBlock.mdx b/content/docs/rpc/http/getBlock.mdx new file mode 100644 index 000000000..eeac37287 --- /dev/null +++ b/content/docs/rpc/http/getBlock.mdx @@ -0,0 +1,287 @@ +--- +title: getBlock +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBlock +h1: getBlock RPC Method +--- + +Returns identity and transaction information about a confirmed block in the +ledger + + + + + +### Parameters + + + slot number, as `u64` integer + + + + +Configuration object containing the following fields: + + + +- `processed` is not supported. + + + + + +encoding format for each returned Transaction + + + +
+ +- `jsonParsed` attempts to use program-specific instruction parsers to return + more human-readable and explicit data in the + `transaction.message.instructions` list. +- If `jsonParsed` is requested but a parser cannot be found, the instruction + falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` + fields). + +
+ +
+ + + +level of transaction detail to return + + + +
+ +- If `accounts` are requested, transaction details only include signatures and + an annotated list of accounts in each transaction. +- Transaction metadata is limited to only: fee, err, pre_balances, + post_balances, pre_token_balances, and post_token_balances. + +
+ +
+ + + +the max transaction version to return in responses. + +
+ +- If the requested block contains a transaction with a higher version, an error + will be returned. +- If this parameter is omitted, only legacy transactions will be returned, and a + block containing any versioned transaction will prompt the error. + +
+ +
+ + + whether to populate the `rewards` array. If parameter not provided, the + default includes rewards. + + +
+ +### Result + +The result field will be an object with the following fields: + +- `` - if specified block is not confirmed +- `` - if block is confirmed, an object with the following fields: + - `blockhash: ` - the blockhash of this block, as base-58 encoded + string + - `previousBlockhash: ` - the blockhash of this block's parent, as + base-58 encoded string; if the parent block is not available due to ledger + cleanup, this field will return "11111111111111111111111111111111" + - `parentSlot: ` - the slot index of this block's parent + - `transactions: ` - present if "full" transaction details are + requested; an array of JSON objects containing: + - `transaction: ` - + [Transaction](/docs/rpc/json-structures#transactions) object, either in + JSON format or encoded binary data, depending on encoding parameter + - `meta: ` - transaction status metadata object, containing `null` + or: + - `err: ` - Error if transaction failed, null if transaction + succeeded. + [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) + - `fee: ` - fee this transaction was charged, as u64 integer + - `preBalances: ` - array of u64 account balances from before the + transaction was processed + - `postBalances: ` - array of u64 account balances after the + transaction was processed + - `innerInstructions: ` - List of + [inner instructions](/docs/rpc/json-structures#inner-instructions) or + `null` if inner instruction recording was not enabled during this + transaction + - `preTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from before + the transaction was processed or omitted if token balance recording was + not yet enabled during this transaction + - `postTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from after + the transaction was processed or omitted if token balance recording was + not yet enabled during this transaction + - `logMessages: ` - array of string log messages or `null` if + log message recording was not enabled during this transaction + - `rewards: ` - transaction-level rewards, populated if + rewards are requested; an array of JSON objects containing: + - `pubkey: ` - The public key, as base-58 encoded string, of the + account that received the reward + - `lamports: `- number of reward lamports credited or debited by + the account, as a i64 + - `postBalance: ` - account balance in lamports after the reward + was applied + - `rewardType: ` - type of reward: "fee", "rent", + "voting", "staking" + - `commission: ` - vote account commission when the reward + was credited, only present for voting and staking rewards + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError + - `loadedAddresses: ` - Transaction addresses loaded + from address lookup tables. Undefined if + `maxSupportedTransactionVersion` is not set in request params, or if + `jsonParsed` encoding is set in request params. + - `writable: ` - Ordered list of base-58 encoded + addresses for writable loaded accounts + - `readonly: ` - Ordered list of base-58 encoded + addresses for readonly loaded accounts + - `returnData: ` - the most-recent return data generated + by an instruction in the transaction, with the following fields: + - `programId: ` - the program that generated the return data, as + base-58 encoded Pubkey + - `data: <[string, encoding]>` - the return data itself, as base-64 + encoded binary data + - `computeUnitsConsumed: ` - number of + [compute units](/docs/core/fees#compute-budget) consumed by the + transaction + - `version: <"legacy"|number|undefined>` - Transaction version. Undefined if + `maxSupportedTransactionVersion` is not set in request params. + - `signatures: ` - present if "signatures" are requested for + transaction details; an array of signatures strings, corresponding to the + transaction order in the block + - `rewards: ` - block-level rewards, present if rewards are + requested; an array of JSON objects containing: + - `pubkey: ` - The public key, as base-58 encoded string, of the + account that received the reward + - `lamports: `- number of reward lamports credited or debited by the + account, as a i64 + - `postBalance: ` - account balance in lamports after the reward was + applied + - `rewardType: ` - type of reward: "fee", "rent", + "voting", "staking" + - `commission: ` - vote account commission when the reward was + credited, only present for voting and staking rewards + - `blockTime: ` - estimated production time, as Unix timestamp + (seconds since the Unix epoch). null if not available + - `blockHeight: ` - the number of blocks beneath this block + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0","id":1, + "method":"getBlock", + "params": [ + 430, + { + "encoding": "json", + "maxSupportedTransactionVersion":0, + "transactionDetails":"full", + "rewards":false + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "blockHeight": 428, + "blockTime": null, + "blockhash": "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA", + "parentSlot": 429, + "previousBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B", + "transactions": [ + { + "meta": { + "err": null, + "fee": 5000, + "innerInstructions": [], + "logMessages": [], + "postBalances": [499998932500, 26858640, 1, 1, 1], + "postTokenBalances": [], + "preBalances": [499998937500, 26858640, 1, 1, 1], + "preTokenBalances": [], + "rewards": null, + "status": { + "Ok": null + } + }, + "transaction": { + "message": { + "accountKeys": [ + "3UVYmECPPMZSCqWKfENfuoTv51fTDTWicX9xmBD2euKe", + "AjozzgE83A3x1sHNUR64hfH7zaEBWeMaFuAN9kQgujrc", + "SysvarS1otHashes111111111111111111111111111", + "SysvarC1ock11111111111111111111111111111111", + "Vote111111111111111111111111111111111111111" + ], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 3, + "numRequiredSignatures": 1 + }, + "instructions": [ + { + "accounts": [1, 2, 3, 0], + "data": "37u9WtQpcm6ULa3WRQHmj49EPs4if7o9f1jSRVZpm2dvihR9C8jY4NqEwXUbLwx15HBSNcP1", + "programIdIndex": 4 + } + ], + "recentBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B" + }, + "signatures": [ + "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv" + ] + } + } + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getBlockCommitment.mdx b/content/docs/rpc/http/getBlockCommitment.mdx new file mode 100644 index 000000000..2ac02f61a --- /dev/null +++ b/content/docs/rpc/http/getBlockCommitment.mdx @@ -0,0 +1,65 @@ +--- +title: getBlockCommitment +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBlockCommitment +h1: getBlockCommitment RPC Method +--- + +Returns commitment for particular block + + + + + +### Parameters + + + block number, identified by Slot + + +### Result + +The result field will be a JSON object containing: + +- `commitment` - commitment, comprising either: + - `` - Unknown block + - `` - commitment, array of u64 integers logging the amount of cluster + stake in lamports that has voted on the block at each depth from 0 to + `MAX_LOCKOUT_HISTORY` + 1 +- `totalStake` - total active stake, in lamports, of the current epoch + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getBlockCommitment", + "params":[5] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "commitment": [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 10, 32 + ], + "totalStake": 42 + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getBlockHeight.mdx b/content/docs/rpc/http/getBlockHeight.mdx new file mode 100644 index 000000000..a3a2fd39e --- /dev/null +++ b/content/docs/rpc/http/getBlockHeight.mdx @@ -0,0 +1,66 @@ +--- +title: getBlockHeight +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBlockHeight +h1: getBlockHeight RPC Method +--- + +Returns the current block height of the node + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +- `` - Current block height + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0","id":1, + "method":"getBlockHeight" + } +' +``` + +Result: + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": 1233, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getBlockProduction.mdx b/content/docs/rpc/http/getBlockProduction.mdx new file mode 100644 index 000000000..361231321 --- /dev/null +++ b/content/docs/rpc/http/getBlockProduction.mdx @@ -0,0 +1,94 @@ +--- +title: getBlockProduction +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBlockProduction +h1: getBlockProduction RPC Method +--- + +Returns recent block production information from the current or previous epoch. + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + Only return results for this validator identity (base-58 encoded) + + + +Slot range to return block production for. If parameter not provided, defaults to current epoch. + +- `firstSlot: ` - first slot to return block production information for + (inclusive) +- (optional) `lastSlot: ` - last slot to return block production + information for (inclusive). If parameter not provided, defaults to the + highest slot + + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to: + +- `` + - `byIdentity: ` - a dictionary of validator identities, as base-58 + encoded strings. Value is a two element array containing the number of + leader slots and the number of blocks produced. + - `range: ` - Block production slot range + - `firstSlot: ` - first slot of the block production information + (inclusive) + - `lastSlot: ` - last slot of block production information (inclusive) + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getBlockProduction"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 9887 + }, + "value": { + "byIdentity": { + "85iYT5RuzRTDgjyRa3cP8SYhM2j21fj7NhfJ3peu1DPr": [9888, 9886] + }, + "range": { + "firstSlot": 0, + "lastSlot": 9887 + } + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getBlockTime.mdx b/content/docs/rpc/http/getBlockTime.mdx new file mode 100644 index 000000000..f15ec2291 --- /dev/null +++ b/content/docs/rpc/http/getBlockTime.mdx @@ -0,0 +1,75 @@ +--- +title: getBlockTime +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBlockTime +h1: getBlockTime RPC Method +--- + +Returns the estimated production time of a block. + + + Each validator reports their UTC time to the ledger on a regular interval by + intermittently adding a timestamp to a Vote for a particular block. A + requested block's time is calculated from the stake-weighted mean of the Vote + timestamps in a set of recent blocks recorded on the ledger. + + + + + + +### Parameters + + + block number, identified by Slot + + +### Result + +- `` - estimated production time, as Unix timestamp (seconds since the Unix + epoch) + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0", "id":1, + "method": "getBlockTime", + "params":[5] + } +' +``` + +### Response + +When a block time is available: + +```json +{ + "jsonrpc": "2.0", + "result": 1574721591, + "id": 1 +} +``` + +When a block time is not available: + +```json +{ + "jsonrpc": "2.0", + "error": { + "code": -32004, + "message": "Block not available for slot 150" + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getBlocks.mdx b/content/docs/rpc/http/getBlocks.mdx new file mode 100644 index 000000000..ac867bf7d --- /dev/null +++ b/content/docs/rpc/http/getBlocks.mdx @@ -0,0 +1,79 @@ +--- +title: getBlocks +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBlocks +h1: getBlocks RPC Method +--- + +Returns a list of confirmed blocks between two slots + + + + + +### Parameters + + + start_slot, as `u64` integer + + + + end_slot, as `u64` integer (must be no more than 500,000 blocks higher than + the `start_slot`) + + + + +Configuration object containing the following fields: + + + +- "processed" is not supported + + + + + +### Result + +The result field will be an array of u64 integers listing confirmed blocks +between `start_slot` and either `end_slot` - if provided, or latest confirmed +block, inclusive. Max range allowed is 500,000 slots. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getBlocks", + "params": [ + 5, 10 + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [5, 6, 7, 8, 9, 10], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getBlocksWithLimit.mdx b/content/docs/rpc/http/getBlocksWithLimit.mdx new file mode 100644 index 000000000..2b5e6edc3 --- /dev/null +++ b/content/docs/rpc/http/getBlocksWithLimit.mdx @@ -0,0 +1,77 @@ +--- +title: getBlocksWithLimit +hideTableOfContents: true +altRoutes: + - /docs/rpc/getBlocksWithLimit +h1: getBlocksWithLimit RPC Method +--- + +Returns a list of confirmed blocks starting at the given slot + + + + + +### Parameters + + + start_slot, as `u64` integer + + + + limit, as `u64` integer (must be no more than 500,000 blocks higher than the + `start_slot`) + + + + +Configuration object containing the following field: + + + +- "processed" is not supported + + + + + +### Result + +The result field will be an array of u64 integers listing confirmed blocks +starting at `start_slot` for up to `limit` blocks, inclusive. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id":1, + "method":"getBlocksWithLimit", + "params":[5, 3] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [5, 6, 7], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getClusterNodes.mdx b/content/docs/rpc/http/getClusterNodes.mdx new file mode 100644 index 000000000..c462414a7 --- /dev/null +++ b/content/docs/rpc/http/getClusterNodes.mdx @@ -0,0 +1,69 @@ +--- +title: getClusterNodes +hideTableOfContents: true +altRoutes: + - /docs/rpc/getClusterNodes +h1: getClusterNodes RPC Method +--- + +Returns information about all the nodes participating in the cluster + + + + + +### Parameters + +**None** + +### Result + +The result field will be an array of JSON objects, each with the following sub +fields: + +- `pubkey: ` - Node public key, as base-58 encoded string +- `gossip: ` - Gossip network address for the node +- `tpu: ` - TPU network address for the node +- `rpc: ` - JSON RPC network address for the node, or `null` if the + JSON RPC service is not enabled +- `version: ` - The software version of the node, or `null` if the + version information is not available +- `featureSet: ` - The unique identifier of the node's feature set +- `shredVersion: ` - The shred version the node has been configured to + use + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getClusterNodes" + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "gossip": "10.239.6.48:8001", + "pubkey": "9QzsJf7LPLj8GkXbYT3LFDKqsj2hHG7TA3xinJHu8epQ", + "rpc": "10.239.6.48:8899", + "tpu": "10.239.6.48:8856", + "version": "1.0.0 c375ce1f" + } + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getEpochInfo.mdx b/content/docs/rpc/http/getEpochInfo.mdx new file mode 100644 index 000000000..728e96a11 --- /dev/null +++ b/content/docs/rpc/http/getEpochInfo.mdx @@ -0,0 +1,76 @@ +--- +title: getEpochInfo +hideTableOfContents: true +altRoutes: + - /docs/rpc/getEpochInfo +h1: getEpochInfo RPC Method +--- + +Returns information about the current epoch + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +The result field will be an object with the following fields: + +- `absoluteSlot: ` - the current slot +- `blockHeight: ` - the current block height +- `epoch: ` - the current epoch +- `slotIndex: ` - the current slot relative to the start of the current + epoch +- `slotsInEpoch: ` - the number of slots in this epoch +- `transactionCount: ` - total number of transactions processed + without error since genesis + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getEpochInfo"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "absoluteSlot": 166598, + "blockHeight": 166500, + "epoch": 27, + "slotIndex": 2790, + "slotsInEpoch": 8192, + "transactionCount": 22661093 + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getEpochSchedule.mdx b/content/docs/rpc/http/getEpochSchedule.mdx new file mode 100644 index 000000000..6683c7459 --- /dev/null +++ b/content/docs/rpc/http/getEpochSchedule.mdx @@ -0,0 +1,64 @@ +--- +title: getEpochSchedule +hideTableOfContents: true +altRoutes: + - /docs/rpc/getEpochSchedule +h1: getEpochSchedule RPC Method +--- + +Returns the epoch schedule information from this cluster's genesis config + + + + + +### Parameters + +**None** + +### Result + +The result field will be an object with the following fields: + +- `slotsPerEpoch: ` - the maximum number of slots in each epoch +- `leaderScheduleSlotOffset: ` - the number of slots before beginning of an + epoch to calculate a leader schedule for that epoch +- `warmup: ` - whether epochs start short and grow +- `firstNormalEpoch: ` - first normal-length epoch, log2(slotsPerEpoch) - + log2(MINIMUM_SLOTS_PER_EPOCH) +- `firstNormalSlot: ` - MINIMUM_SLOTS_PER_EPOCH \* + (2.pow(firstNormalEpoch) - 1) + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0","id":1, + "method":"getEpochSchedule" + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "firstNormalEpoch": 8, + "firstNormalSlot": 8160, + "leaderScheduleSlotOffset": 8192, + "slotsPerEpoch": 8192, + "warmup": true + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getFeeForMessage.mdx b/content/docs/rpc/http/getFeeForMessage.mdx new file mode 100644 index 000000000..402ccc2b8 --- /dev/null +++ b/content/docs/rpc/http/getFeeForMessage.mdx @@ -0,0 +1,80 @@ +--- +title: getFeeForMessage +hideTableOfContents: true +altRoutes: + - /docs/rpc/getFeeForMessage +h1: getFeeForMessage RPC Method +--- + +Get the fee the network will charge for a particular Message + + + This method is only available in `solana-core` v1.9 or newer. Please use + [getFees](/docs/rpc/deprecated/getfees) for `solana-core` v1.8 and below. + + + + + + +### Parameters + + + Base-64 encoded Message + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +- `` - Fee corresponding to the message at the specified blockhash + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' +{ + "id":1, + "jsonrpc":"2.0", + "method":"getFeeForMessage", + "params":[ + "AQABAgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEBAQAA", + { + "commitment":"processed" + } + ] +} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { "context": { "slot": 5068 }, "value": 5000 }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getFirstAvailableBlock.mdx b/content/docs/rpc/http/getFirstAvailableBlock.mdx new file mode 100644 index 000000000..da9f2f058 --- /dev/null +++ b/content/docs/rpc/http/getFirstAvailableBlock.mdx @@ -0,0 +1,45 @@ +--- +title: getFirstAvailableBlock +hideTableOfContents: true +altRoutes: + - /docs/rpc/getFirstAvailableBlock +h1: getFirstAvailableBlock RPC Method +--- + +Returns the slot of the lowest confirmed block that has not been purged from the +ledger + + + + + +### Parameters + +**None** + +### Result + +- `` - Slot + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0","id":1, + "method":"getFirstAvailableBlock" + } +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 250000, "id": 1 } +``` + + + diff --git a/content/docs/rpc/http/getGenesisHash.mdx b/content/docs/rpc/http/getGenesisHash.mdx new file mode 100644 index 000000000..b7212fd88 --- /dev/null +++ b/content/docs/rpc/http/getGenesisHash.mdx @@ -0,0 +1,45 @@ +--- +title: getGenesisHash +hideTableOfContents: true +altRoutes: + - /docs/rpc/getGenesisHash +h1: getGenesisHash RPC Method +--- + +Returns the genesis hash + + + + + +### Parameters + +**None** + +### Result + +- `` - a Hash as base-58 encoded string + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getGenesisHash"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": "GH7ome3EiwEr7tu9JuTh2dpYWBJK3z69Xm1ZE3MEE6JC", + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getHealth.mdx b/content/docs/rpc/http/getHealth.mdx new file mode 100644 index 000000000..2b2d52778 --- /dev/null +++ b/content/docs/rpc/http/getHealth.mdx @@ -0,0 +1,78 @@ +--- +title: getHealth +hideTableOfContents: true +altRoutes: + - /docs/rpc/getHealth +h1: getHealth RPC Method +--- + +Returns the current health of the node. A healthy node is one that is within +`HEALTH_CHECK_SLOT_DISTANCE` slots of the latest cluster confirmed slot. + + + + + +### Parameters + +**None** + +### Result + +If the node is healthy: "ok" + +If the node is unhealthy, a JSON RPC error response is returned. The specifics +of the error response are **UNSTABLE** and may change in the future + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getHealth"} +' +``` + +### Response + +Healthy Result: + +```json +{ "jsonrpc": "2.0", "result": "ok", "id": 1 } +``` + +Unhealthy Result (generic): + +```json +{ + "jsonrpc": "2.0", + "error": { + "code": -32005, + "message": "Node is unhealthy", + "data": {} + }, + "id": 1 +} +``` + +Unhealthy Result (if additional information is available) + +```json +{ + "jsonrpc": "2.0", + "error": { + "code": -32005, + "message": "Node is behind by 42 slots", + "data": { + "numSlotsBehind": 42 + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getHighestSnapshotSlot.mdx b/content/docs/rpc/http/getHighestSnapshotSlot.mdx new file mode 100644 index 000000000..6bfd06d03 --- /dev/null +++ b/content/docs/rpc/http/getHighestSnapshotSlot.mdx @@ -0,0 +1,75 @@ +--- +title: getHighestSnapshotSlot +hideTableOfContents: true +altRoutes: + - /docs/rpc/getHighestSnapshotSlot +h1: getHighestSnapshotSlot RPC Method +--- + +Returns the highest slot information that the node has snapshots for. + +This will find the highest full snapshot slot, and the highest incremental +snapshot slot _based on_ the full snapshot slot, if there is one. + + + This method is only available in `solana-core` v1.9 or newer. Please use + [getSnapshotSlot](/docs/rpc/http/getsnapshotslot) for `solana-core` v1.8 and + below. + + + + + + +### Parameters + +**None** + +### Result + +When the node has a snapshot, this returns a JSON object with the following +fields: + +- `full: ` - Highest full snapshot slot +- `incremental: ` - Highest incremental snapshot slot _based on_ + `full` + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1,"method":"getHighestSnapshotSlot"} +' +``` + +### Response + +Result when the node has a snapshot: + +```json +{ + "jsonrpc": "2.0", + "result": { + "full": 100, + "incremental": 110 + }, + "id": 1 +} +``` + +Result when the node has no snapshot: + +```json +{ + "jsonrpc": "2.0", + "error": { "code": -32008, "message": "No snapshot" }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getIdentity.mdx b/content/docs/rpc/http/getIdentity.mdx new file mode 100644 index 000000000..5b1bb87e2 --- /dev/null +++ b/content/docs/rpc/http/getIdentity.mdx @@ -0,0 +1,50 @@ +--- +title: getIdentity +hideTableOfContents: true +altRoutes: + - /docs/rpc/getIdentity +h1: getIdentity RPC Method +--- + +Returns the identity pubkey for the current node + + + + + +### Parameters + +**None** + +### Result + +The result field will be a JSON object with the following fields: + +- `identity` - the identity pubkey of the current node \(as a base-58 encoded + string\) + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getIdentity"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "identity": "2r1F4iWqVcb8M1DbAjQuFpebkQHY9hcVU4WuW2DJBppN" + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getInflationGovernor.mdx b/content/docs/rpc/http/getInflationGovernor.mdx new file mode 100644 index 000000000..2c66d15ec --- /dev/null +++ b/content/docs/rpc/http/getInflationGovernor.mdx @@ -0,0 +1,71 @@ +--- +title: getInflationGovernor +hideTableOfContents: true +altRoutes: + - /docs/rpc/getInflationGovernor +h1: getInflationGovernor RPC Method +--- + +Returns the current inflation governor + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + +### Result + +The result field will be a JSON object with the following fields: + +- `initial: ` - the initial inflation percentage from time 0 +- `terminal: ` - terminal inflation percentage +- `taper: ` - rate per year at which inflation is lowered. (Rate reduction + is derived using the target slot time in genesis config) +- `foundation: ` - percentage of total inflation allocated to the + foundation +- `foundationTerm: ` - duration of foundation pool inflation in years + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getInflationGovernor"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "foundation": 0.05, + "foundationTerm": 7, + "initial": 0.15, + "taper": 0.15, + "terminal": 0.015 + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getInflationRate.mdx b/content/docs/rpc/http/getInflationRate.mdx new file mode 100644 index 000000000..74f2d0a57 --- /dev/null +++ b/content/docs/rpc/http/getInflationRate.mdx @@ -0,0 +1,56 @@ +--- +title: getInflationRate +hideTableOfContents: true +altRoutes: + - /docs/rpc/getInflationRate +h1: getInflationRate RPC Method +--- + +Returns the specific inflation values for the current epoch + + + + + +### Parameters + +**None** + +### Result + +The result field will be a JSON object with the following fields: + +- `total: ` - total inflation +- `validator: ` -inflation allocated to validators +- `foundation: ` - inflation allocated to the foundation +- `epoch: ` - epoch for which these values are valid + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getInflationRate"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "epoch": 100, + "foundation": 0.001, + "total": 0.149, + "validator": 0.148 + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getInflationReward.mdx b/content/docs/rpc/http/getInflationReward.mdx new file mode 100644 index 000000000..37fa13680 --- /dev/null +++ b/content/docs/rpc/http/getInflationReward.mdx @@ -0,0 +1,96 @@ +--- +title: getInflationReward +hideTableOfContents: true +altRoutes: + - /docs/rpc/getInflationReward +h1: getInflationReward RPC Method +--- + +Returns the inflation / staking reward for a list of addresses for an epoch + + + + + +### Parameters + + + An array of addresses to query, as base-58 encoded strings + + + + +Configuration object containing the following fields: + + + + + An epoch for which the reward occurs. If omitted, the previous epoch will be + used + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +The result field will be a JSON array with the following fields: + +- `epoch: ` - epoch for which reward occurred +- `effectiveSlot: ` - the slot in which the rewards are effective +- `amount: ` - reward amount in lamports +- `postBalance: ` - post balance of the account in lamports +- `commission: ` - vote account commission when the reward was + credited + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getInflationReward", + "params": [ + [ + "6dmNQ5jwLeLk5REvio1JcMshcbvkYMwy26sJ8pbkvStu", + "BGsqMegLpV6n6Ve146sSX2dTjUMj3M92HnU8BbNRMhF2" + ], + {"epoch": 2} + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "amount": 2500, + "effectiveSlot": 224, + "epoch": 2, + "postBalance": 499999442500 + }, + null + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getLargestAccounts.mdx b/content/docs/rpc/http/getLargestAccounts.mdx new file mode 100644 index 000000000..dee212458 --- /dev/null +++ b/content/docs/rpc/http/getLargestAccounts.mdx @@ -0,0 +1,146 @@ +--- +title: getLargestAccounts +hideTableOfContents: true +altRoutes: + - /docs/rpc/getLargestAccounts +h1: getLargestAccounts RPC Method +--- + +Returns the 20 largest accounts, by lamport balance (results may be cached up to +two hours) + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + filter results by account type + + + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to an array of +`` containing: + +- `address: ` - base-58 encoded address of the account +- `lamports: ` - number of lamports in the account, as a u64 + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getLargestAccounts"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 54 + }, + "value": [ + { + "lamports": 999974, + "address": "99P8ZgtJYe1buSK8JXkvpLh8xPsCFuLYhz9hQFNw93WJ" + }, + { + "lamports": 42, + "address": "uPwWLo16MVehpyWqsLkK3Ka8nLowWvAHbBChqv2FZeL" + }, + { + "lamports": 42, + "address": "aYJCgU7REfu3XF8b3QhkqgqQvLizx8zxuLBHA25PzDS" + }, + { + "lamports": 42, + "address": "CTvHVtQ4gd4gUcw3bdVgZJJqApXE9nCbbbP4VTS5wE1D" + }, + { + "lamports": 20, + "address": "4fq3xJ6kfrh9RkJQsmVd5gNMvJbuSHfErywvEjNQDPxu" + }, + { + "lamports": 4, + "address": "AXJADheGVp9cruP8WYu46oNkRbeASngN5fPCMVGQqNHa" + }, + { + "lamports": 2, + "address": "8NT8yS6LiwNprgW4yM1jPPow7CwRUotddBVkrkWgYp24" + }, + { + "lamports": 1, + "address": "SysvarEpochSchedu1e111111111111111111111111" + }, + { + "lamports": 1, + "address": "11111111111111111111111111111111" + }, + { + "lamports": 1, + "address": "Stake11111111111111111111111111111111111111" + }, + { + "lamports": 1, + "address": "SysvarC1ock11111111111111111111111111111111" + }, + { + "lamports": 1, + "address": "StakeConfig11111111111111111111111111111111" + }, + { + "lamports": 1, + "address": "SysvarRent111111111111111111111111111111111" + }, + { + "lamports": 1, + "address": "Config1111111111111111111111111111111111111" + }, + { + "lamports": 1, + "address": "SysvarStakeHistory1111111111111111111111111" + }, + { + "lamports": 1, + "address": "SysvarRecentB1ockHashes11111111111111111111" + }, + { + "lamports": 1, + "address": "SysvarFees111111111111111111111111111111111" + }, + { + "lamports": 1, + "address": "Vote111111111111111111111111111111111111111" + } + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getLatestBlockhash.mdx b/content/docs/rpc/http/getLatestBlockhash.mdx new file mode 100644 index 000000000..4de507cb5 --- /dev/null +++ b/content/docs/rpc/http/getLatestBlockhash.mdx @@ -0,0 +1,90 @@ +--- +title: getLatestBlockhash +hideTableOfContents: true +altRoutes: + - /docs/rpc/getLatestBlockhash +h1: getLatestBlockhash RPC Method +--- + +Returns the latest blockhash + + + This method is only available in `solana-core` v1.9 or newer. Please use + [getRecentBlockhash](/docs/rpc/http/getrecentblockhash) for `solana-core` v1.8 + and below. + + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +`RpcResponse` - RpcResponse JSON object with `value` field set to a JSON +object including: + +- `blockhash: ` - a Hash as base-58 encoded string +- `lastValidBlockHeight: ` - last + [block height](/docs/terminology#block-height) at which the blockhash will + be valid + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "id":1, + "jsonrpc":"2.0", + "method":"getLatestBlockhash", + "params":[ + { + "commitment":"processed" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 2792 + }, + "value": { + "blockhash": "EkSnNWid2cvwEVnVx9aBqawnmiCNiDgp3gUdkDPTKN1N", + "lastValidBlockHeight": 3090 + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getLeaderSchedule.mdx b/content/docs/rpc/http/getLeaderSchedule.mdx new file mode 100644 index 000000000..8bb920a6a --- /dev/null +++ b/content/docs/rpc/http/getLeaderSchedule.mdx @@ -0,0 +1,93 @@ +--- +title: getLeaderSchedule +hideTableOfContents: true +altRoutes: + - /docs/rpc/getLeaderSchedule +h1: getLeaderSchedule RPC Method +--- + +Returns the leader schedule for an epoch + + + + + +### Parameters + + + +Fetch the leader schedule for the epoch that corresponds to the provided slot. + + + If unspecified, the leader schedule for the current epoch is fetched + + + + + + +Configuration object containing the following fields: + + + + + Only return results for this validator identity (base-58 encoded) + + + + +### Result + +Returns a result with one of the two following values: + +- `` - if requested epoch is not found, or +- `` - the result field will be a dictionary of validator identities, as + base-58 encoded strings, and their corresponding leader slot indices as values + (indices are relative to the first slot in the requested epoch) + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getLeaderSchedule", + "params": [ + null, + { + "identity": "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F": [ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, + 57, 58, 59, 60, 61, 62, 63 + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getMaxRetransmitSlot.mdx b/content/docs/rpc/http/getMaxRetransmitSlot.mdx new file mode 100644 index 000000000..95022e5c7 --- /dev/null +++ b/content/docs/rpc/http/getMaxRetransmitSlot.mdx @@ -0,0 +1,42 @@ +--- +title: getMaxRetransmitSlot +hideTableOfContents: true +altRoutes: + - /docs/rpc/getMaxRetransmitSlot +h1: getMaxRetransmitSlot RPC Method +--- + +Get the max slot seen from retransmit stage. + + + + + +### Parameters + +**None** + +### Result + +`` - Slot number + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getMaxRetransmitSlot"} +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 1234, "id": 1 } +``` + + + diff --git a/content/docs/rpc/http/getMaxShredInsertSlot.mdx b/content/docs/rpc/http/getMaxShredInsertSlot.mdx new file mode 100644 index 000000000..c0cc321b7 --- /dev/null +++ b/content/docs/rpc/http/getMaxShredInsertSlot.mdx @@ -0,0 +1,42 @@ +--- +title: getMaxShredInsertSlot +hideTableOfContents: true +altRoutes: + - /docs/rpc/getMaxShredInsertSlot +h1: getMaxShredInsertSlot RPC Method +--- + +Get the max slot seen from after shred insert. + + + + + +### Parameters + +**None** + +### Result + +`` - Slot number + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getMaxShredInsertSlot"} +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 1234, "id": 1 } +``` + + + diff --git a/content/docs/rpc/http/getMinimumBalanceForRentExemption.mdx b/content/docs/rpc/http/getMinimumBalanceForRentExemption.mdx new file mode 100644 index 000000000..58244f2ba --- /dev/null +++ b/content/docs/rpc/http/getMinimumBalanceForRentExemption.mdx @@ -0,0 +1,61 @@ +--- +title: getMinimumBalanceForRentExemption +hideTableOfContents: true +altRoutes: + - /docs/rpc/getMinimumBalanceForRentExemption +h1: getMinimumBalanceForRentExemption RPC Method +--- + +Returns minimum balance required to make account rent exempt. + + + + + +### Parameters + + + the Account's data length + + + + +Configuration object containing the following fields: + + + + + +### Result + +`` - minimum lamports required in the Account to remain rent free + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getMinimumBalanceForRentExemption", + "params": [50] + } +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 500, "id": 1 } +``` + + + diff --git a/content/docs/rpc/http/getMultipleAccounts.mdx b/content/docs/rpc/http/getMultipleAccounts.mdx new file mode 100644 index 000000000..f66de839c --- /dev/null +++ b/content/docs/rpc/http/getMultipleAccounts.mdx @@ -0,0 +1,154 @@ +--- +title: getMultipleAccounts +hideTableOfContents: true +altRoutes: + - /docs/rpc/getMultipleAccounts +h1: getMultipleAccounts RPC Method +--- + +Returns the account information for a list of Pubkeys. + + + + + +### Parameters + + + An array of Pubkeys to query, as base-58 encoded strings (up to a maximum of + 100) + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + Request a slice of the account's data. + +- `length: ` - number of bytes to return +- `offset: ` - byte offset from which to start reading + + + Data slicing is only available for `base58`, `base64`, or `base64+zstd` + encodings. + + + + + + +encoding format for the returned Account data + + + +
+ +- `base58` is slow and limited to less than 129 bytes of Account data. +- `base64` will return base64 encoded data for Account data of any size. +- `base64+zstd` compresses the Account data using + [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data. +- If `jsonParsed` is requested but a parser cannot be found, the field falls + back to `base64` encoding, detectable when the `data` field is type + ``. + +
+ +
+ +
+ +### Result + +The result will be a JSON object with `value` equal to an array of: + +- `` - if the account at that Pubkey doesn't exist, or +- `` - a JSON object containing: + - `lamports: ` - number of lamports assigned to this account, as a u64 + - `owner: ` - base-58 encoded Pubkey of the program this account has + been assigned to + - `data: <[string, encoding]|object>` - data associated with the account, + either as encoded binary data or JSON format `{: }` - + depending on encoding parameter + - `executable: ` - boolean indicating if the account contains a program + \(and is strictly read-only\) + - `rentEpoch: ` - the epoch at which this account will next owe rent, as + u64 + - `space: ` - the data size of the account + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getMultipleAccounts", + "params": [ + [ + "vines1vzrYbzLMRdu58ou5XTby4qAqVRLmqo36NKPTg", + "4fYNw3dojWmQ4dXtSGE9epjRGy9pFSx62YypT7avPYvA" + ], + { + "encoding": "base58" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { "apiVersion": "2.0.15", "slot": 341197247 }, + "value": [ + { + "data": ["", "base58"], + "executable": false, + "lamports": 88849814690250, + "owner": "11111111111111111111111111111111", + "rentEpoch": 18446744073709551615, + "space": 0 + }, + { + "data": ["", "base58"], + "executable": false, + "lamports": 998763433, + "owner": "2WRuhE4GJFoE23DYzp2ij6ZnuQ8p9mJeU6gDgfsjR4or", + "rentEpoch": 18446744073709551615, + "space": 0 + } + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getProgramAccounts.mdx b/content/docs/rpc/http/getProgramAccounts.mdx new file mode 100644 index 000000000..6d4d16ad3 --- /dev/null +++ b/content/docs/rpc/http/getProgramAccounts.mdx @@ -0,0 +1,179 @@ +--- +title: getProgramAccounts +hideTableOfContents: true +altRoutes: + - /docs/rpc/getProgramAccounts +h1: getProgramAccounts RPC Method +--- + +Returns all accounts owned by the provided program Pubkey + + + + + +### Parameters + + + Pubkey of program, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + wrap the result in an RpcResponse JSON object + + + + +encoding format for the returned Account data + + + +
+ +- `base58` is slow and limited to less than 129 bytes of Account data. +- `base64` will return base64 encoded data for Account data of any size. +- `base64+zstd` compresses the Account data using + [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data. +- If `jsonParsed` is requested but a parser cannot be found, the field falls + back to `base64` encoding, detectable when the `data` field is type + ``. + +
+ +
+ + + Request a slice of the account's data. + +- `length: ` - number of bytes to return +- `offset: ` - byte offset from which to start reading + + + Data slicing is only available for `base58`, `base64`, or `base64+zstd` + encodings. + + + + + + +filter results using up to 4 filter objects + + + The resultant account(s) must meet **ALL** filter criteria to be included in + the returned results + + + + +
+ +### Result + +By default, the result field will be an array of JSON objects. + + + If the `withContext` flag is set, the array will be wrapped in an + `RpcResponse` JSON object. + + +The resultant response array will contain: + +- `pubkey: ` - the account Pubkey as base-58 encoded string +- `account: ` - a JSON object, with the following sub fields: + - `lamports: ` - number of lamports assigned to this account, as a u64 + - `owner: ` - base-58 encoded Pubkey of the program this account has + been assigned to + - `data: <[string,encoding]|object>` - data associated with the account, + either as encoded binary data or JSON format `{: }` - + depending on encoding parameter + - `executable: ` - boolean indicating if the account contains a program + \(and is strictly read-only\) + - `rentEpoch: ` - the epoch at which this account will next owe rent, as + u64 + - `space: ` - the data size of the account + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getProgramAccounts", + "params": [ + "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", + { + "filters": [ + { + "dataSize": 17 + }, + { + "memcmp": { + "offset": 4, + "bytes": "3Mc6vR" + } + } + ] + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "account": { + "data": "2R9jLfiAQ9bgdcw6h8s44439", + "executable": false, + "lamports": 15298080, + "owner": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", + "rentEpoch": 28, + "space": 42 + }, + "pubkey": "CxELquR1gPP8wHe33gZ4QxqGB3sZ9RSwsJ2KshVewkFY" + } + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getRecentPerformanceSamples.mdx b/content/docs/rpc/http/getRecentPerformanceSamples.mdx new file mode 100644 index 000000000..8026498bb --- /dev/null +++ b/content/docs/rpc/http/getRecentPerformanceSamples.mdx @@ -0,0 +1,100 @@ +--- +title: getRecentPerformanceSamples +hideTableOfContents: true +altRoutes: + - /docs/rpc/getRecentPerformanceSamples +h1: getRecentPerformanceSamples RPC Method +--- + +Returns a list of recent performance samples, in reverse slot order. Performance +samples are taken every 60 seconds and include the number of transactions and +slots that occur in a given time window. + + + + + +### Parameters + + + +number of samples to return (maximum 720) + + + +### Result + +An array of `RpcPerfSample` with the following fields: + +- `slot: ` - Slot in which sample was taken at +- `numTransactions: ` - Number of transactions processed during the sample + period +- `numSlots: ` - Number of slots completed during the sample period +- `samplePeriodSecs: ` - Number of seconds in a sample window +- `numNonVoteTransactions: ` - Number of non-vote transactions processed + during the sample period. + + + `numNonVoteTransactions` is present starting with v1.15. To get a number of + voting transactions compute: +
+ `numTransactions - numNonVoteTransactions` +
+ + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0", "id":1, + "method": "getRecentPerformanceSamples", + "params": [4] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "numSlots": 126, + "numTransactions": 126, + "numNonVoteTransactions": 1, + "samplePeriodSecs": 60, + "slot": 348125 + }, + { + "numSlots": 126, + "numTransactions": 126, + "numNonVoteTransactions": 1, + "samplePeriodSecs": 60, + "slot": 347999 + }, + { + "numSlots": 125, + "numTransactions": 125, + "numNonVoteTransactions": 0, + "samplePeriodSecs": 60, + "slot": 347873 + }, + { + "numSlots": 125, + "numTransactions": 125, + "numNonVoteTransactions": 0, + "samplePeriodSecs": 60, + "slot": 347748 + } + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getRecentPrioritizationFees.mdx b/content/docs/rpc/http/getRecentPrioritizationFees.mdx new file mode 100644 index 000000000..383e79f1d --- /dev/null +++ b/content/docs/rpc/http/getRecentPrioritizationFees.mdx @@ -0,0 +1,93 @@ +--- +title: getRecentPrioritizationFees +hideTableOfContents: true +altRoutes: + - /docs/rpc/getRecentPrioritizationFees +h1: getRecentPrioritizationFees RPC Method +--- + +Returns a list of prioritization fees from recent blocks. + + + Currently, a node's prioritization-fee cache stores data from up to 150 + blocks. + + + + + + +### Parameters + + + +An array of Account addresses (up to a maximum of 128 addresses), as base-58 +encoded strings + + + If this parameter is provided, the response will reflect a fee to land a + transaction locking all of the provided accounts as writable. + + + + +### Result + +An array of `RpcPrioritizationFee` with the following fields: + +- `slot: ` - slot in which the fee was observed +- `prioritizationFee: ` - the per-compute-unit fee paid by at least one + successfully landed transaction, specified in increments of micro-lamports + (0.000001 lamports) + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0", "id":1, + "method": "getRecentPrioritizationFees", + "params": [ + ["CxELquR1gPP8wHe33gZ4QxqGB3sZ9RSwsJ2KshVewkFY"] + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "slot": 348125, + "prioritizationFee": 0 + }, + { + "slot": 348126, + "prioritizationFee": 1000 + }, + { + "slot": 348127, + "prioritizationFee": 500 + }, + { + "slot": 348128, + "prioritizationFee": 0 + }, + { + "slot": 348129, + "prioritizationFee": 1234 + } + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getSignatureStatuses.mdx b/content/docs/rpc/http/getSignatureStatuses.mdx new file mode 100644 index 000000000..330d81af5 --- /dev/null +++ b/content/docs/rpc/http/getSignatureStatuses.mdx @@ -0,0 +1,115 @@ +--- +title: getSignatureStatuses +hideTableOfContents: true +altRoutes: + - /docs/rpc/getSignatureStatuses +h1: getSignatureStatuses RPC Method +--- + +Returns the statuses of a list of signatures. Each signature must be a +[txid](/docs/terminology#transaction-id), the first signature of a +transaction. + + + Unless the `searchTransactionHistory` configuration parameter is included, + this method only searches the recent status cache of signatures, which retains + statuses for all active slots plus `MAX_RECENT_BLOCKHASHES` rooted slots. + + + + + + +### Parameters + + + An array of transaction signatures to confirm, as base-58 encoded strings (up + to a maximum of 256) + + + + +Configuration object containing the following fields: + + + +if `true` - a Solana node will search its ledger cache for any signatures not +found in the recent status cache + + + + + +### Result + +An array of `RpcResponse` consisting of either: + +- `` - Unknown transaction, or +- `` + - `slot: ` - The slot the transaction was processed + - `confirmations: ` - Number of blocks since signature + confirmation, null if rooted, as well as finalized by a supermajority of the + cluster + - `err: ` - Error if transaction failed, null if transaction + succeeded. See + [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) + - `confirmationStatus: ` - The transaction's cluster confirmation + status; Either `processed`, `confirmed`, or `finalized`. See + [Commitment](/docs/rpc/#configuring-state-commitment) for more on + optimistic confirmation. + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getSignatureStatuses", + "params": [ + [ + "5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW" + ], + { + "searchTransactionHistory": true + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 82 + }, + "value": [ + { + "slot": 48, + "confirmations": null, + "err": null, + "status": { + "Ok": null + }, + "confirmationStatus": "finalized" + }, + null + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getSignaturesForAddress.mdx b/content/docs/rpc/http/getSignaturesForAddress.mdx new file mode 100644 index 000000000..7c2c0bb59 --- /dev/null +++ b/content/docs/rpc/http/getSignaturesForAddress.mdx @@ -0,0 +1,115 @@ +--- +title: getSignaturesForAddress +hideTableOfContents: true +altRoutes: + - /docs/rpc/getSignaturesForAddress +h1: getSignaturesForAddress RPC Method +--- + +Returns signatures for confirmed transactions that include the given address in +their `accountKeys` list. Returns signatures backwards in time from the provided +signature or most recent confirmed block + + + + + +### Parameters + + + Account address as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + maximum transaction signatures to return (between 1 and 1,000). + + + + start searching backwards from this transaction signature. If not provided the + search starts from the top of the highest max confirmed block. + + + + search until this transaction signature, if found before limit reached + + + + +### Result + +An array of ``, ordered from **newest** to **oldest** transaction, +containing transaction signature information with the following fields: + +- `signature: ` - transaction signature as base-58 encoded string +- `slot: ` - The slot that contains the block with the transaction +- `err: ` - Error if transaction failed, null if transaction + succeeded. See + [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) + for more info. +- `memo: ` - Memo associated with the transaction, null if no memo + is present +- `blockTime: ` - estimated production time, as Unix timestamp + (seconds since the Unix epoch) of when transaction was processed. null if not + available. +- `confirmationStatus: ` - The transaction's cluster confirmation + status; Either `processed`, `confirmed`, or `finalized`. See + [Commitment](/docs/rpc/#configuring-state-commitment) for more on + optimistic confirmation. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getSignaturesForAddress", + "params": [ + "Vote111111111111111111111111111111111111111", + { + "limit": 1 + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": [ + { + "err": null, + "memo": null, + "signature": "5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv", + "slot": 114, + "blockTime": null + } + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getSlot.mdx b/content/docs/rpc/http/getSlot.mdx new file mode 100644 index 000000000..3d916e732 --- /dev/null +++ b/content/docs/rpc/http/getSlot.mdx @@ -0,0 +1,58 @@ +--- +title: getSlot +hideTableOfContents: true +altRoutes: + - /docs/rpc/getSlot +h1: getSlot RPC Method +--- + +Returns the slot that has reached the +[given or default commitment level](/docs/rpc/#configuring-state-commitment) + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +`` - Current slot + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getSlot"} +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 1234, "id": 1 } +``` + + + diff --git a/content/docs/rpc/http/getSlotLeader.mdx b/content/docs/rpc/http/getSlotLeader.mdx new file mode 100644 index 000000000..567b1bee2 --- /dev/null +++ b/content/docs/rpc/http/getSlotLeader.mdx @@ -0,0 +1,61 @@ +--- +title: getSlotLeader +hideTableOfContents: true +altRoutes: + - /docs/rpc/getSlotLeader +h1: getSlotLeader RPC Method +--- + +Returns the current slot leader + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +`` - Node identity Pubkey as base-58 encoded string + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getSlotLeader"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": "ENvAW7JScgYq6o4zKZwewtkzzJgDzuJAFxYasvmEQdpS", + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getSlotLeaders.mdx b/content/docs/rpc/http/getSlotLeaders.mdx new file mode 100644 index 000000000..e89708ad0 --- /dev/null +++ b/content/docs/rpc/http/getSlotLeaders.mdx @@ -0,0 +1,73 @@ +--- +title: getSlotLeaders +hideTableOfContents: true +altRoutes: + - /docs/rpc/getSlotLeaders +h1: getSlotLeaders RPC Method +--- + +Returns the slot leaders for a given slot range + + + + + +### Parameters + + + Start slot, as u64 integer + + + + Limit, as u64 integer (between 1 and 5,000) + + +### Result + +`` - array of Node identity public keys as base-58 encoded +strings + + + + + +### Code sample + +If the current slot is `#99` - query the next `10` leaders with the following +request: + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0", "id": 1, + "method": "getSlotLeaders", + "params": [100, 10] + } +' +``` + +### Response + +The first leader returned is the leader for slot `#100`: + +```json +{ + "jsonrpc": "2.0", + "result": [ + "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", + "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", + "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", + "ChorusmmK7i1AxXeiTtQgQZhQNiXYU84ULeaYF1EH15n", + "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", + "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", + "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", + "Awes4Tr6TX8JDzEhCZY2QVNimT6iD1zWHzf1vNyGvpLM", + "DWvDTSh3qfn88UoQTEKRV2JnLt5jtJAVoiCo3ivtMwXP", + "DWvDTSh3qfn88UoQTEKRV2JnLt5jtJAVoiCo3ivtMwXP" + ], + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getStakeMinimumDelegation.mdx b/content/docs/rpc/http/getStakeMinimumDelegation.mdx new file mode 100644 index 000000000..107432c15 --- /dev/null +++ b/content/docs/rpc/http/getStakeMinimumDelegation.mdx @@ -0,0 +1,67 @@ +--- +title: getStakeMinimumDelegation +hideTableOfContents: true +altRoutes: + - /docs/rpc/getStakeMinimumDelegation +h1: getStakeMinimumDelegation RPC Method +--- + +Returns the stake minimum delegation, in lamports. + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to: + +- `` - The stake minimum delegation, in lamports + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0", "id":1, + "method": "getStakeMinimumDelegation" + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 501 + }, + "value": 1000000000 + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getSupply.mdx b/content/docs/rpc/http/getSupply.mdx new file mode 100644 index 000000000..a879de051 --- /dev/null +++ b/content/docs/rpc/http/getSupply.mdx @@ -0,0 +1,84 @@ +--- +title: getSupply +hideTableOfContents: true +altRoutes: + - /docs/rpc/getSupply +h1: getSupply RPC Method +--- + +Returns information about the current supply. + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + exclude non circulating accounts list from response + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to a JSON +object containing: + +- `total: ` - Total supply in lamports +- `circulating: ` - Circulating supply in lamports +- `nonCirculating: ` - Non-circulating supply in lamports +- `nonCirculatingAccounts: ` - an array of account addresses of + non-circulating accounts, as strings. If `excludeNonCirculatingAccountsList` + is enabled, the returned array will be empty. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0", "id":1, "method":"getSupply"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 1114 + }, + "value": { + "circulating": 16000, + "nonCirculating": 1000000, + "nonCirculatingAccounts": [ + "FEy8pTbP5fEoqMV1GdTz83byuA8EKByqYat1PKDgVAq5", + "9huDUZfxoJ7wGMTffUE7vh1xePqef7gyrLJu9NApncqA", + "3mi1GmwEE3zo2jmfDuzvjSX9ovRXsDUKHvsntpkhuLJ9", + "BYxEJTDerkaRWBem3XgnVcdhppktBXa2HbkHPKj2Ui4Z" + ], + "total": 1016000 + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getTokenAccountBalance.mdx b/content/docs/rpc/http/getTokenAccountBalance.mdx new file mode 100644 index 000000000..9bffd4c0a --- /dev/null +++ b/content/docs/rpc/http/getTokenAccountBalance.mdx @@ -0,0 +1,90 @@ +--- +title: getTokenAccountBalance +hideTableOfContents: true +altRoutes: + - /docs/rpc/getTokenAccountBalance +h1: getTokenAccountBalance RPC Method +--- + +Returns the token balance of an SPL Token account. + + + + + +### Parameters + + + Pubkey of Token account to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to a JSON +object containing: + +- `amount: ` - the raw balance without decimals, a string representation + of u64 +- `decimals: ` - number of base 10 digits to the right of the decimal place +- `uiAmount: ` - the balance, using mint-prescribed decimals + **DEPRECATED** +- `uiAmountString: ` - the balance as a string, using mint-prescribed + decimals + +For more details on returned data, the +[Token Balances Structure](/docs/rpc/json-structures#token-balances) response +from [getBlock](/docs/rpc/http/getblock) follows a similar structure. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getTokenAccountBalance", + "params": [ + "7fUAJdStEuGbc3sM84cKRL6yYaaSstyLSU4ve5oovLS7" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 1114 + }, + "value": { + "amount": "9864", + "decimals": 2, + "uiAmount": 98.64, + "uiAmountString": "98.64" + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getTokenAccountsByDelegate.mdx b/content/docs/rpc/http/getTokenAccountsByDelegate.mdx new file mode 100644 index 000000000..c62d78457 --- /dev/null +++ b/content/docs/rpc/http/getTokenAccountsByDelegate.mdx @@ -0,0 +1,189 @@ +--- +title: getTokenAccountsByDelegate +hideTableOfContents: true +altRoutes: + - /docs/rpc/getTokenAccountsByDelegate +h1: getTokenAccountsByDelegate RPC Method +--- + +Returns all SPL Token accounts by approved Delegate. + + + + + +### Parameters + + + Pubkey of account delegate to query, as base-58 encoded string + + + + +A JSON object with one of the following fields: + +- `mint: ` - Pubkey of the specific token Mint to limit accounts to, as + base-58 encoded string; or +- `programId: ` - Pubkey of the Token program that owns the accounts, as + base-58 encoded string + + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + Request a slice of the account's data. + +- `length: ` - number of bytes to return +- `offset: ` - byte offset from which to start reading + + + Data slicing is only available for `base58`, `base64`, or `base64+zstd` + encodings. + + + + + + +Encoding format for Account data + + + +
+ +- `base58` is slow and limited to less than 129 bytes of Account data. +- `base64` will return base64 encoded data for Account data of any size. +- `base64+zstd` compresses the Account data using + [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data. +- If `jsonParsed` is requested but a parser cannot be found, the field falls + back to `base64` encoding, detectable when the `data` field is type `string`. + +
+ +
+ +
+ +### Result + +The result will be an RpcResponse JSON object with `value` equal to an array of +JSON objects, which will contain: + +- `pubkey: ` - the account Pubkey as base-58 encoded string +- `account: ` - a JSON object, with the following sub fields: + - `lamports: ` - number of lamports assigned to this account, as a u64 + - `owner: ` - base-58 encoded Pubkey of the program this account has + been assigned to + - `data: ` - Token state data associated with the account, either as + encoded binary data or in JSON format `{: }` + - `executable: ` - boolean indicating if the account contains a program + (and is strictly read-only\) + - `rentEpoch: ` - the epoch at which this account will next owe rent, as + u64 + - `space: ` - the data size of the account + +When the data is requested with the `jsonParsed` encoding a format similar to +that of the [Token Balances Structure](/docs/rpc/json-structures#token-balances) +can be expected inside the structure, both for the `tokenAmount` and the +`delegatedAmount` - with the latter being an optional object. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getTokenAccountsByDelegate", + "params": [ + "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", + { + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" + }, + { + "encoding": "jsonParsed" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 1114 + }, + "value": [ + { + "account": { + "data": { + "program": "spl-token", + "parsed": { + "info": { + "tokenAmount": { + "amount": "1", + "decimals": 1, + "uiAmount": 0.1, + "uiAmountString": "0.1" + }, + "delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", + "delegatedAmount": { + "amount": "1", + "decimals": 1, + "uiAmount": 0.1, + "uiAmountString": "0.1" + }, + "state": "initialized", + "isNative": false, + "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E", + "owner": "CnPoSPKXu7wJqxe59Fs72tkBeALovhsCxYeFwPCQH9TD" + }, + "type": "account" + }, + "space": 165 + }, + "executable": false, + "lamports": 1726080, + "owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "rentEpoch": 4, + "space": 165 + }, + "pubkey": "28YTZEwqtMHWrhWcvv34se7pjS7wctgqzCPB3gReCFKp" + } + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getTokenAccountsByOwner.mdx b/content/docs/rpc/http/getTokenAccountsByOwner.mdx new file mode 100644 index 000000000..103f3c86c --- /dev/null +++ b/content/docs/rpc/http/getTokenAccountsByOwner.mdx @@ -0,0 +1,209 @@ +--- +title: getTokenAccountsByOwner +hideTableOfContents: true +altRoutes: + - /docs/rpc/getTokenAccountsByOwner +h1: getTokenAccountsByOwner RPC Method +--- + +Returns all SPL Token accounts by token owner. + + + + + +### Parameters + + + Pubkey of account delegate to query, as base-58 encoded string + + + + +A JSON object with either one of the following fields: + +- `mint: ` - Pubkey of the specific token Mint to limit accounts to, as + base-58 encoded string; or +- `programId: ` - Pubkey of the Token program that owns the accounts, as + base-58 encoded string + + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + Request a slice of the account's data. + +- `length: ` - number of bytes to return +- `offset: ` - byte offset from which to start reading + + + Data slicing is only available for `base58`, `base64`, or `base64+zstd` + encodings. + + + + + + +Encoding format for Account data + + + +
+ +- `base58` is slow and limited to less than 129 bytes of Account data. +- `base64` will return base64 encoded data for Account data of any size. +- `base64+zstd` compresses the Account data using + [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data. +- If `jsonParsed` is requested but a parser cannot be found, the field falls + back to `base64` encoding, detectable when the `data` field is type `string`. + +
+ +
+ +
+ +### Result + +The result will be an RpcResponse JSON object with `value` equal to an array of +JSON objects, which will contain: + +- `pubkey: ` - the account Pubkey as base-58 encoded string +- `account: ` - a JSON object, with the following sub fields: + - `lamports: ` - number of lamports assigned to this account, as a u64 + - `owner: ` - base-58 encoded Pubkey of the program this account has + been assigned to + - `data: ` - Token state data associated with the account, either as + encoded binary data or in JSON format `{: }` + - `executable: ` - boolean indicating if the account contains a program + \(and is strictly read-only\) + - `rentEpoch: ` - the epoch at which this account will next owe rent, as + u64 + - `space: ` - the data size of the account + +When the data is requested with the `jsonParsed` encoding a format similar to +that of the [Token Balances Structure](/docs/rpc/json-structures#token-balances) +can be expected inside the structure, both for the `tokenAmount` and the +`delegatedAmount` - with the latter being an optional object. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getTokenAccountsByOwner", + "params": [ + "A1TMhSGzQxMr1TboBKtgixKz1sS6REASMxPo1qsyTSJd", + { + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" + }, + { + "encoding": "jsonParsed" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { "apiVersion": "2.0.15", "slot": 341197933 }, + "value": [ + { + "account": { + "data": { + "parsed": { + "info": { + "isNative": false, + "mint": "2cHr7QS3xfuSV8wdxo3ztuF4xbiarF6Nrgx3qpx3HzXR", + "owner": "A1TMhSGzQxMr1TboBKtgixKz1sS6REASMxPo1qsyTSJd", + "state": "initialized", + "tokenAmount": { + "amount": "420000000000000", + "decimals": 6, + "uiAmount": 420000000.0, + "uiAmountString": "420000000" + } + }, + "type": "account" + }, + "program": "spl-token", + "space": 165 + }, + "executable": false, + "lamports": 2039280, + "owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "rentEpoch": 18446744073709551615, + "space": 165 + }, + "pubkey": "BGocb4GEpbTFm8UFV2VsDSaBXHELPfAXrvd4vtt8QWrA" + }, + { + "account": { + "data": { + "parsed": { + "info": { + "isNative": false, + "mint": "4KVSsAtsG8JByKfB2jYWgGwvVR9WcBSUfsqpTSL9c3Jr", + "owner": "A1TMhSGzQxMr1TboBKtgixKz1sS6REASMxPo1qsyTSJd", + "state": "initialized", + "tokenAmount": { + "amount": "10000000000000", + "decimals": 9, + "uiAmount": 10000.0, + "uiAmountString": "10000" + } + }, + "type": "account" + }, + "program": "spl-token", + "space": 165 + }, + "executable": false, + "lamports": 2039280, + "owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "rentEpoch": 18446744073709551615, + "space": 165 + }, + "pubkey": "9PwCPoWJ75LSgZeGMubXBdufYMVd66HrcF78QzW6ZHkV" + } + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getTokenLargestAccounts.mdx b/content/docs/rpc/http/getTokenLargestAccounts.mdx new file mode 100644 index 000000000..0342b0bc0 --- /dev/null +++ b/content/docs/rpc/http/getTokenLargestAccounts.mdx @@ -0,0 +1,97 @@ +--- +title: getTokenLargestAccounts +hideTableOfContents: true +altRoutes: + - /docs/rpc/getTokenLargestAccounts +h1: getTokenLargestAccounts RPC Method +--- + +Returns the 20 largest accounts of a particular SPL Token type. + + + + + +### Parameters + + + Pubkey of the token Mint to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to an array of +JSON objects containing: + +- `address: ` - the address of the token account +- `amount: ` - the raw token account balance without decimals, a string + representation of u64 +- `decimals: ` - number of base 10 digits to the right of the decimal place +- `uiAmount: ` - the token account balance, using mint-prescribed + decimals **DEPRECATED** +- `uiAmountString: ` - the token account balance as a string, using + mint-prescribed decimals + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getTokenLargestAccounts", + "params": [ + "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 1114 + }, + "value": [ + { + "address": "FYjHNoFtSQ5uijKrZFyYAxvEr87hsKXkXcxkcmkBAf4r", + "amount": "771", + "decimals": 2, + "uiAmount": 7.71, + "uiAmountString": "7.71" + }, + { + "address": "BnsywxTcaYeNUtzrPxQUvzAWxfzZe3ZLUJ4wMMuLESnu", + "amount": "229", + "decimals": 2, + "uiAmount": 2.29, + "uiAmountString": "2.29" + } + ] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getTokenSupply.mdx b/content/docs/rpc/http/getTokenSupply.mdx new file mode 100644 index 000000000..111611fcd --- /dev/null +++ b/content/docs/rpc/http/getTokenSupply.mdx @@ -0,0 +1,86 @@ +--- +title: getTokenSupply +hideTableOfContents: true +altRoutes: + - /docs/rpc/getTokenSupply +h1: getTokenSupply RPC Method +--- + +Returns the total supply of an SPL Token type. + + + + + +### Parameters + + + Pubkey of the token Mint to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +### Result + +The result will be an RpcResponse JSON object with `value` equal to a JSON +object containing: + +- `amount: ` - the raw total token supply without decimals, a string + representation of u64 +- `decimals: ` - number of base 10 digits to the right of the decimal place +- `uiAmount: ` - the total token supply, using mint-prescribed + decimals **DEPRECATED** +- `uiAmountString: ` - the total token supply as a string, using + mint-prescribed decimals + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "getTokenSupply", + "params": [ + "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 1114 + }, + "value": { + "amount": "100000", + "decimals": 2, + "uiAmount": 1000, + "uiAmountString": "1000" + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getTransaction.mdx b/content/docs/rpc/http/getTransaction.mdx new file mode 100644 index 000000000..f6f447b25 --- /dev/null +++ b/content/docs/rpc/http/getTransaction.mdx @@ -0,0 +1,213 @@ +--- +title: getTransaction +hideTableOfContents: true +altRoutes: + - /docs/rpc/getTransaction +h1: getTransaction RPC Method +--- + +Returns transaction details for a confirmed transaction + + + + + +### Parameters + + + Transaction signature, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + +- `processed` is not supported. + + + + + Set the max transaction version to return in responses. If the requested + transaction is a higher version, an error will be returned. If this parameter + is omitted, only legacy transactions will be returned, and any versioned + transaction will prompt the error. + + + + +Encoding for the returned Transaction + + + +
+ +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit data in the + `transaction.message.instructions` list. +- If `jsonParsed` is requested but a parser cannot be found, the instruction + falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` + fields). + +
+ +
+ +
+ +### Result + +- `` - if transaction is not found or not confirmed +- `` - if transaction is confirmed, an object with the following fields: + - `slot: ` - the slot this transaction was processed in + - `transaction: ` - + [Transaction](/docs/rpc/json-structures#transactions) object, either in JSON + format or encoded binary data, depending on encoding parameter + - `blockTime: ` - estimated production time, as Unix timestamp + (seconds since the Unix epoch) of when the transaction was processed. null + if not available + - `meta: ` - transaction status metadata object: + - `err: ` - Error if transaction failed, null if transaction + succeeded. + [TransactionError definitions](https://docs.rs/solana-sdk/latest/solana_sdk/transaction/enum.TransactionError.html) + - `fee: ` - fee this transaction was charged, as u64 integer + - `preBalances: ` - array of u64 account balances from before the + transaction was processed + - `postBalances: ` - array of u64 account balances after the + transaction was processed + - `innerInstructions: ` - List of + [inner instructions](/docs/rpc/json-structures#inner-instructions) or + `null` if inner instruction recording was not enabled during this + transaction + - `preTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from before the + transaction was processed or omitted if token balance recording was not + yet enabled during this transaction + - `postTokenBalances: ` - List of + [token balances](/docs/rpc/json-structures#token-balances) from after the + transaction was processed or omitted if token balance recording was not + yet enabled during this transaction + - `logMessages: ` - array of string log messages or `null` if + log message recording was not enabled during this transaction + - DEPRECATED: `status: ` - Transaction status + - `"Ok": ` - Transaction was successful + - `"Err": ` - Transaction failed with TransactionError + - `rewards: ` - transaction-level rewards, populated if rewards + are requested; an array of JSON objects containing: + - `pubkey: ` - The public key, as base-58 encoded string, of the + account that received the reward + - `lamports: `- number of reward lamports credited or debited by the + account, as a i64 + - `postBalance: ` - account balance in lamports after the reward was + applied + - `rewardType: ` - type of reward: currently only "rent", other + types may be added in the future + - `commission: ` - vote account commission when the reward + was credited, only present for voting and staking rewards + - `loadedAddresses: ` - Transaction addresses loaded from + address lookup tables. Undefined if `maxSupportedTransactionVersion` is + not set in request params, or if `jsonParsed` encoding is set in request + params. + - `writable: ` - Ordered list of base-58 encoded addresses + for writable loaded accounts + - `readonly: ` - Ordered list of base-58 encoded addresses + for readonly loaded accounts + - `returnData: ` - the most-recent return data generated + by an instruction in the transaction, with the following fields: + - `programId: ` - the program that generated the return data, as + base-58 encoded Pubkey + - `data: <[string, encoding]>` - the return data itself, as base-64 + encoded binary data + - `computeUnitsConsumed: ` - number of + [compute units](/docs/core/fees#compute-budget) consumed by the + transaction + - `version: <"legacy"|number|undefined>` - Transaction version. Undefined if + `maxSupportedTransactionVersion` is not set in request params. + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getTransaction", + "params": [ + "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv", + "json" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "meta": { + "err": null, + "fee": 5000, + "innerInstructions": [], + "postBalances": [499998932500, 26858640, 1, 1, 1], + "postTokenBalances": [], + "preBalances": [499998937500, 26858640, 1, 1, 1], + "preTokenBalances": [], + "rewards": [], + "status": { + "Ok": null + } + }, + "slot": 430, + "transaction": { + "message": { + "accountKeys": [ + "3UVYmECPPMZSCqWKfENfuoTv51fTDTWicX9xmBD2euKe", + "AjozzgE83A3x1sHNUR64hfH7zaEBWeMaFuAN9kQgujrc", + "SysvarS1otHashes111111111111111111111111111", + "SysvarC1ock11111111111111111111111111111111", + "Vote111111111111111111111111111111111111111" + ], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 3, + "numRequiredSignatures": 1 + }, + "instructions": [ + { + "accounts": [1, 2, 3, 0], + "data": "37u9WtQpcm6ULa3WRQHmj49EPs4if7o9f1jSRVZpm2dvihR9C8jY4NqEwXUbLwx15HBSNcP1", + "programIdIndex": 4 + } + ], + "recentBlockhash": "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B" + }, + "signatures": [ + "2nBhEBYYvfaAe16UMNqRHre4YNSskvuYgx3M6E4JP1oDYvZEJHvoPzyUidNgNX5r9sTyN1J9UxtbCXy2rqYcuyuv" + ] + } + }, + "blockTime": null, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getTransactionCount.mdx b/content/docs/rpc/http/getTransactionCount.mdx new file mode 100644 index 000000000..b2d84c885 --- /dev/null +++ b/content/docs/rpc/http/getTransactionCount.mdx @@ -0,0 +1,57 @@ +--- +title: getTransactionCount +hideTableOfContents: true +altRoutes: + - /docs/rpc/getTransactionCount +h1: getTransactionCount RPC Method +--- + +Returns the current Transaction count from the ledger + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +`` - the current Transaction count from the ledger + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getTransactionCount"} +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 268, "id": 1 } +``` + + + diff --git a/content/docs/rpc/http/getVersion.mdx b/content/docs/rpc/http/getVersion.mdx new file mode 100644 index 000000000..172e651e3 --- /dev/null +++ b/content/docs/rpc/http/getVersion.mdx @@ -0,0 +1,50 @@ +--- +title: getVersion +hideTableOfContents: true +altRoutes: + - /docs/rpc/getVersion +h1: getVersion RPC Method +--- + +Returns the current Solana version running on the node + + + + + +### Parameters + +**None** + +### Result + +The result field will be a JSON object with the following fields: + +- `solana-core` - software version of solana-core as a `string` +- `feature-set` - unique identifier of the current software's feature set as a + `u32` + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"getVersion"} +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { "feature-set": 2891131721, "solana-core": "1.16.7" }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getVoteAccounts.mdx b/content/docs/rpc/http/getVoteAccounts.mdx new file mode 100644 index 000000000..ea0d130ab --- /dev/null +++ b/content/docs/rpc/http/getVoteAccounts.mdx @@ -0,0 +1,113 @@ +--- +title: getVoteAccounts +hideTableOfContents: true +altRoutes: + - /docs/rpc/getVoteAccounts +h1: getVoteAccounts RPC Method +--- + +Returns the account info and associated stake for all the voting accounts in the +current bank. + + + + + +### Parameters + + + +Configuration object containing the following fields: + + + + + Only return results for this validator vote address (base-58 encoded) + + + + Do not filter out delinquent validators with no stake + + + + Specify the number of slots behind the tip that a validator must fall to be + considered delinquent. **NOTE:** For the sake of consistency between ecosystem + products, _it is **not** recommended that this argument be specified._ + + + + +### Result + +The result field will be a JSON object of `current` and `delinquent` accounts, +each containing an array of JSON objects with the following sub fields: + +- `votePubkey: ` - Vote account address, as base-58 encoded string +- `nodePubkey: ` - Validator identity, as base-58 encoded string +- `activatedStake: ` - the stake, in lamports, delegated to this vote + account and active in this epoch +- `epochVoteAccount: ` - bool, whether the vote account is staked for this + epoch +- `commission: ` - percentage (0-100) of rewards payout owed to the vote + account +- `lastVote: ` - Most recent slot voted on by this vote account +- `epochCredits: ` - Latest history of earned credits for up to five + epochs, as an array of arrays containing: `[epoch, credits, previousCredits]`. +- `rootSlot: ` - Current root slot for this vote account + + + + + +### Code sample + +Restrict results to a single validator vote account: + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getVoteAccounts", + "params": [ + { + "votePubkey": "3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "current": [ + { + "commission": 0, + "epochVoteAccount": true, + "epochCredits": [ + [1, 64, 0], + [2, 192, 64] + ], + "nodePubkey": "B97CCUW3AEZFGy6uUg6zUdnNYvnVq5VG8PUtb2HayTDD", + "lastVote": 147, + "activatedStake": 42, + "votePubkey": "3ZT31jkAGhUaw8jsy4bTknwBMP8i4Eueh52By4zXcsVw" + } + ], + "delinquent": [] + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/getaccountinfo.mdx b/content/docs/rpc/http/getaccountinfo.mdx new file mode 100644 index 000000000..0fb6ce6d3 --- /dev/null +++ b/content/docs/rpc/http/getaccountinfo.mdx @@ -0,0 +1,138 @@ +--- +title: getAccountInfo +hideTableOfContents: true +altRoutes: + - /docs/rpc/getAccountInfo +h1: getAccountInfo RPC Method +--- + +Returns all information associated with the account of provided Pubkey + + + + + +### Parameters + + + Pubkey of account to query, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +Encoding format for Account data + + + +
+ +- `base58` is slow and limited to less than 129 bytes of Account data. +- `base64` will return base64 encoded data for Account data of any size. +- `base64+zstd` compresses the Account data using + [Zstandard](https://facebook.github.io/zstd/) and base64-encodes the result. +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data. +- If `jsonParsed` is requested but a parser cannot be found, the field falls + back to `base64` encoding, detectable when the `data` field is type `string`. + +
+ +
+ + + Request a slice of the account's data. + +- `length: ` - number of bytes to return +- `offset: ` - byte offset from which to start reading + + + Data slicing is only available for `base58`, `base64`, or `base64+zstd` + encodings. + + + + + + The minimum slot that the request can be evaluated at + + +
+ +### Result + +The result will be an RpcResponse JSON object with `value` equal to: + +- `` - if the requested account doesn't exist +- `` - otherwise, a JSON object containing: + - `lamports: ` - number of lamports assigned to this account, as a u64 + - `owner: ` - base-58 encoded Pubkey of the program this account has + been assigned to + - `data: <[string, encoding]|object>` - data associated with the account, + either as encoded binary data or JSON format `{: }` - + depending on encoding parameter + - `executable: ` - boolean indicating if the account contains a program + \(and is strictly read-only\) + - `rentEpoch: ` - the epoch at which this account will next owe rent, as + u64 + - `space: ` - the data size of the account + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getAccountInfo", + "params": [ + "vines1vzrYbzLMRdu58ou5XTby4qAqVRLmqo36NKPTg", + { + "encoding": "base58" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { "apiVersion": "2.0.15", "slot": 341197053 }, + "value": { + "data": ["", "base58"], + "executable": false, + "lamports": 88849814690250, + "owner": "11111111111111111111111111111111", + "rentEpoch": 18446744073709551615, + "space": 0 + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/index.mdx b/content/docs/rpc/http/index.mdx new file mode 100644 index 000000000..b67726514 --- /dev/null +++ b/content/docs/rpc/http/index.mdx @@ -0,0 +1,104 @@ +--- +title: HTTP Methods +seoTitle: Solana RPC HTTP Methods +hideTableOfContents: false +h1: Solana RPC HTTP Methods +--- + +Solana nodes accept HTTP requests using the +[JSON-RPC 2.0](https://www.jsonrpc.org/specification) specification. + +> For JavaScript applications, use the +> [@solana/web3.js](https://github.com/solana-labs/solana-web3.js) library as a +> convenient interface for the RPC methods to interact with a Solana node. For +> an PubSub connection to a Solana node, use the +> [Websocket API](/docs/rpc/websocket/). + +## RPC HTTP Endpoint + +Default port: `8899` + +- http://localhost:8899 +- http://192.168.1.88:8899 + +## Request Formatting + +To make a JSON-RPC request, send an HTTP POST request with a +`Content-Type: application/json` header. The JSON request data should contain 4 +fields: + +- `jsonrpc: ` - set to `"2.0"` +- `id: ` - a unique identifier for the request, + generated by the client. Typically a string or number, though null is + technically allowed but not advised +- `method: ` - a string containing the method to be invoked +- `params: ` - a JSON array of ordered parameter values + +Example using curl: + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getBalance", + "params": [ + "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri" + ] + } +' +``` + +The response output will be a JSON object with the following fields: + +- `jsonrpc: ` - matching the request specification +- `id: ` - matching the request identifier +- `result: ` - requested data or success + confirmation + +Requests can be sent in batches by sending an array of JSON-RPC request objects +as the data for a single POST. + +### Example Request + +The commitment parameter should be included as the last element in the `params` +array: + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "getBalance", + "params": [ + "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri", + { + "commitment": "finalized" + } + ] + } +' +``` + +## Definitions + +- Hash: A SHA-256 hash of a chunk of data. +- Pubkey: The public key of a Ed25519 key-pair. +- Transaction: A list of Solana instructions signed by a client keypair to + authorize those actions. +- Signature: An Ed25519 signature of transaction's payload data including + instructions. This can be used to identify transactions. + +## Health Check + +Although not a JSON RPC API, a `GET /health` at the RPC HTTP Endpoint provides a +health-check mechanism for use by load balancers or other network +infrastructure. This request will always return a HTTP 200 OK response with a +body of "ok", "behind" or "unknown": + +- `ok`: The node is within `HEALTH_CHECK_SLOT_DISTANCE` slots from the latest + cluster confirmed slot +- `behind { distance }`: The node is behind `distance` slots from the latest + cluster confirmed slot where `distance > HEALTH_CHECK_SLOT_DISTANCE` +- `unknown`: The node is unable to determine where it stands in relation to the + cluster diff --git a/content/docs/rpc/http/isBlockhashValid.mdx b/content/docs/rpc/http/isBlockhashValid.mdx new file mode 100644 index 000000000..0843e5878 --- /dev/null +++ b/content/docs/rpc/http/isBlockhashValid.mdx @@ -0,0 +1,84 @@ +--- +title: isBlockhashValid +hideTableOfContents: true +altRoutes: + - /docs/rpc/isBlockhashValid +h1: isBlockhashValid RPC Method +--- + +Returns whether a blockhash is still valid or not + + + This method is only available in `solana-core` v1.9 or newer. Please use + [getFeeCalculatorForBlockhash](/docs/rpc/http/getfeecalculatorforblockhash) + for `solana-core` v1.8 and below. + + + + + + +### Parameters + + + the blockhash of the block to evaluate, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + The minimum slot that the request can be evaluated at + + + + +### Result + +`` - `true` if the blockhash is still valid + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "id":45, + "jsonrpc":"2.0", + "method":"isBlockhashValid", + "params":[ + "J7rBdM6AecPDEZp8aPq5iPSNKVkU5Q76F3oAV4eW5wsW", + {"commitment":"processed"} + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 2483 + }, + "value": false + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/meta.json b/content/docs/rpc/http/meta.json new file mode 100644 index 000000000..6f1ae6291 --- /dev/null +++ b/content/docs/rpc/http/meta.json @@ -0,0 +1,57 @@ +{ + "title": "HTTP Methods", + "pages": [ + "getAccountInfo", + "getBalance", + "getBlock", + "getBlockCommitment", + "getBlockHeight", + "getBlockProduction", + "getBlocks", + "getBlocksWithLimit", + "getBlockTime", + "getClusterNodes", + "getEpochInfo", + "getEpochSchedule", + "getFeeForMessage", + "getFirstAvailableBlock", + "getGenesisHash", + "getHealth", + "getHighestSnapshotSlot", + "getIdentity", + "getInflationGovernor", + "getInflationRate", + "getInflationReward", + "getLargestAccounts", + "getLatestBlockhash", + "getLeaderSchedule", + "getMaxRetransmitSlot", + "getMaxShredInsertSlot", + "getMinimumBalanceForRentExemption", + "getMultipleAccounts", + "getProgramAccounts", + "getRecentPerformanceSamples", + "getRecentPrioritizationFees", + "getSignaturesForAddress", + "getSignatureStatuses", + "getSlot", + "getSlotLeader", + "getSlotLeaders", + "getStakeMinimumDelegation", + "getSupply", + "getTokenAccountBalance", + "getTokenAccountsByDelegate", + "getTokenAccountsByOwner", + "getTokenLargestAccounts", + "getTokenSupply", + "getTransaction", + "getTransactionCount", + "getVersion", + "getVoteAccounts", + "isBlockhashValid", + "minimumLedgerSlot", + "requestAirdrop", + "sendTransaction", + "simulateTransaction" + ] +} diff --git a/content/docs/rpc/http/minimumLedgerSlot.mdx b/content/docs/rpc/http/minimumLedgerSlot.mdx new file mode 100644 index 000000000..19436dbc4 --- /dev/null +++ b/content/docs/rpc/http/minimumLedgerSlot.mdx @@ -0,0 +1,47 @@ +--- +title: minimumLedgerSlot +hideTableOfContents: true +altRoutes: + - /docs/rpc/minimumLedgerSlot +h1: minimumLedgerSlot RPC Method +--- + +Returns the lowest slot that the node has information about in its ledger. + + + This value may increase over time if the node is configured to purge older + ledger data + + + + + + +### Parameters + +**None** + +### Result + +`u64` - Minimum ledger slot number + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + {"jsonrpc":"2.0","id":1, "method":"minimumLedgerSlot"} +' +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 1234, "id": 1 } +``` + + + diff --git a/content/docs/rpc/http/requestAirdrop.mdx b/content/docs/rpc/http/requestAirdrop.mdx new file mode 100644 index 000000000..b195d8189 --- /dev/null +++ b/content/docs/rpc/http/requestAirdrop.mdx @@ -0,0 +1,72 @@ +--- +title: requestAirdrop +hideTableOfContents: true +altRoutes: + - /docs/rpc/requestAirdrop +h1: requestAirdrop RPC Method +--- + +Requests an airdrop of lamports to a Pubkey + + + + + +### Parameters + + + Pubkey of account to receive lamports, as a base-58 encoded string + + + + lamports to airdrop, as a "u64" + + + + +Configuration object containing the following fields: + + + + + +### Result + +`` - Transaction Signature of the airdrop, as a base-58 encoded string + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", "id": 1, + "method": "requestAirdrop", + "params": [ + "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri", + 1000000000 + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": "5VERv8NMvzbJMEkV8xnrLkEaWRtSz9CosKDYjCJjBRnbJLgp8uirBgmQpjKhoR4tjF3ZpRzrFmBV6UjKdiSZkQUW", + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/sendTransaction.mdx b/content/docs/rpc/http/sendTransaction.mdx new file mode 100644 index 000000000..fe83701a7 --- /dev/null +++ b/content/docs/rpc/http/sendTransaction.mdx @@ -0,0 +1,125 @@ +--- +title: sendTransaction +hideTableOfContents: true +altRoutes: + - /docs/rpc/sendTransaction +h1: sendTransaction RPC Method +--- + +Submits a signed transaction to the cluster for processing. + +This method does not alter the transaction in any way; it relays the transaction +created by clients to the node as-is. + +If the node's rpc service receives the transaction, this method immediately +succeeds, without waiting for any confirmations. A successful response from this +method does not guarantee the transaction is processed or confirmed by the +cluster. + +While the rpc service will reasonably retry to submit it, the transaction could +be rejected if transaction's `recent_blockhash` expires before it lands. + +Use [`getSignatureStatuses`](/docs/rpc/http/getsignaturestatuses) to ensure a +transaction is processed and confirmed. + +Before submitting, the following preflight checks are performed: + +1. The transaction signatures are verified +2. The transaction is simulated against the bank slot specified by the preflight + commitment. On failure an error will be returned. Preflight checks may be + disabled if desired. It is recommended to specify the same commitment and + preflight commitment to avoid confusing behavior. + +The returned signature is the first signature in the transaction, which is used +to identify the transaction +([transaction id](/docs/terminology#transaction-id)). This identifier can be +easily extracted from the transaction data before submission. + + + + + +### Parameters + + + Fully-signed Transaction, as encoded string. + + + + +Configuration object containing the following optional fields: + + + +Encoding used for the transaction data. + +Values: `base58` (_slow_, **DEPRECATED**), or `base64`. + + + + + when `true`, skip the preflight transaction checks + + + + Commitment level to use for preflight. + + + + Maximum number of times for the RPC node to retry sending the transaction to + the leader. If this parameter not provided, the RPC node will retry the + transaction until it is finalized or until the blockhash expires. + + + + set the minimum slot at which to perform preflight transaction checks + + + + +### Result + +`` - First Transaction Signature embedded in the transaction, as base-58 +encoded string ([transaction id](/docs/terminology#transaction-id)) + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "sendTransaction", + "params": [ + "4hXTCkRzt9WyecNzV1XPgCDfGAZzQKNxLXgynz5QDuWWPSAZBZSHptvWRL3BjCvzUXRdKvHL2b7yGrRQcWyaqsaBCncVG7BFggS8w9snUts67BSh3EqKpXLUm5UMHfD7ZBe9GhARjbNQMLJ1QD3Spr6oMTBU6EhdB4RD8CP2xUxr2u3d6fos36PD98XS6oX8TQjLpsMwncs5DAMiD4nNnR8NBfyghGCWvCVifVwvA8B8TJxE1aiyiv2L429BCWfyzAme5sZW8rDb14NeCQHhZbtNqfXhcp2tAnaAT" + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": "2id3YC2jK9G5Wo2phDx4gJVAew8DcY5NAojnVuao8rkxwPYPe8cSwE5GzhEgJA2y8fVjDEo6iR6ykBvDxrTQrtpb", + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/http/simulateTransaction.mdx b/content/docs/rpc/http/simulateTransaction.mdx new file mode 100644 index 000000000..1b8520a15 --- /dev/null +++ b/content/docs/rpc/http/simulateTransaction.mdx @@ -0,0 +1,197 @@ +--- +title: simulateTransaction +hideTableOfContents: true +altRoutes: + - /docs/rpc/simulateTransaction +h1: simulateTransaction RPC Method +--- + +Simulate sending a transaction + + + + + +### Parameters + + + +Transaction, as an encoded string. + + + The transaction must have a valid blockhash, but is not required to be signed. + + + + + + +Configuration object containing the following fields: + + + Commitment level to simulate the transaction at + + + + if `true` the transaction signatures will be verified (conflicts with + `replaceRecentBlockhash`) + + + + if `true` the transaction recent blockhash will be replaced with the most + recent blockhash. (conflicts with `sigVerify`) + + + + the minimum slot that the request can be evaluated at + + + + +Encoding used for the transaction data. + +Values: `base58` (_slow_, **DEPRECATED**), or `base64`. + + + + + +If `true` the response will include +[inner instructions](/docs/rpc/json-structures#inner-instructions). These inner +instructions will be `jsonParsed` where possible, otherwise `json`. + + + + + +Accounts configuration object containing the following fields: + + + An `array` of accounts to return, as base-58 encoded strings + + + + +encoding for returned Account data + + + +
+ +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data. +- If `jsonParsed` is requested but a + [parser cannot be found](https://github.com/solana-labs/solana/blob/cfd0a00ae2ba85a6d76757df8b4fa38ed242d185/account-decoder/src/parse_account_data.rs#L98-L100), + the field falls back to `base64` encoding, detectable when the returned + `accounts.data` field is type `string`. + +
+ +
+ +
+ +
+ +### Result + +The result will be an RpcResponse JSON object with `value` set to a JSON object +with the following fields: + +- `err: ` - Error if transaction failed, null if transaction + succeeded. + [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) +- `logs: ` - Array of log messages the transaction instructions + output during execution, null if simulation failed before the transaction was + able to execute (for example due to an invalid blockhash or signature + verification failure) +- `accounts: ` - array of accounts with the same length as the + `accounts.addresses` array in the request + - `` - if the account doesn't exist or if `err` is not null + - `` - otherwise, a JSON object containing: + - `lamports: ` - number of lamports assigned to this account, as a u64 + - `owner: ` - base-58 encoded Pubkey of the program this account has + been assigned to + - `data: <[string, encoding]|object>` - data associated with the account, + either as encoded binary data or JSON format `{: }` - + depending on encoding parameter + - `executable: ` - boolean indicating if the account contains a + program \(and is strictly read-only\) + - `rentEpoch: ` - the epoch at which this account will next owe rent, + as u64 +- `unitsConsumed: ` - The number of compute budget units consumed + during the processing of this transaction +- `returnData: ` - the most-recent return data generated by an + instruction in the transaction, with the following fields: + - `programId: ` - the program that generated the return data, as + base-58 encoded Pubkey + - `data: <[string, encoding]>` - the return data itself, as base-64 encoded + binary data +- `innerInstructions: ` - Defined only if + `innerInstructions` was set to `true`. The value is a list of + [inner instructions](/docs/rpc/json-structures#inner-instructions). + + + + + +### Code sample + +```shell +curl https://api.devnet.solana.com -s -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc": "2.0", + "id": 1, + "method": "simulateTransaction", + "params": [ + "AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEDArczbMia1tLmq7zz4DinMNN0pJ1JtLdqIJPUw3YrGCzYAMHBsgN27lcgB6H2WQvFgyZuJYHa46puOQo9yQ8CVQbd9uHXZaGT2cvhRs7reawctIXtX1s3kTqM9YV+/wCp20C7Wj2aiuk5TReAXo+VTVg8QTHjs0UjNMMKCvpzZ+ABAgEBARU=", + { + "encoding":"base64" + } + ] + } +' +``` + +### Response + +```json +{ + "jsonrpc": "2.0", + "result": { + "context": { + "slot": 218 + }, + "value": { + "err": null, + "accounts": null, + "logs": [ + "Program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri invoke [1]", + "Program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri consumed 2366 of 1400000 compute units", + "Program return: 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri KgAAAAAAAAA=", + "Program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri success" + ], + "returnData": { + "data": ["Kg==", "base64"], + "programId": "83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri" + }, + "unitsConsumed": 2366 + } + }, + "id": 1 +} +``` + + + diff --git a/content/docs/rpc/index.mdx b/content/docs/rpc/index.mdx new file mode 100644 index 000000000..8041916bb --- /dev/null +++ b/content/docs/rpc/index.mdx @@ -0,0 +1,98 @@ +--- +title: Solana RPC Methods +seoTitle: "Solana RPC Methods: HTTP & Websockets" +hideTableOfContents: false +h1: Solana RPC Methods & Documentation +--- + +Interact with Solana nodes directly with the JSON RPC API via the HTTP and +Websocket methods. + +## Configuring State Commitment + +For preflight checks and transaction processing, Solana nodes choose which bank +state to query based on a commitment requirement set by the client. The +commitment describes how finalized a block is at that point in time. When +querying the ledger state, it's recommended to use lower levels of commitment to +report progress and higher levels to ensure the state will not be rolled back. + +In descending order of commitment (most finalized to least finalized), clients +may specify: + +- `finalized` - the node will query the most recent block confirmed by + supermajority of the cluster as having reached maximum lockout, meaning the + cluster has recognized this block as finalized +- `confirmed` - the node will query the most recent block that has been voted on + by supermajority of the cluster. + - It incorporates votes from gossip and replay. + - It does not count votes on descendants of a block, only direct votes on that + block. + - This confirmation level also upholds "optimistic confirmation" guarantees in + release 1.3 and onwards. +- `processed` - the node will query its most recent block. Note that the block + may still be skipped by the cluster. + +For processing many dependent transactions in series, it's recommended to use +`confirmed` commitment, which balances speed with rollback safety. For total +safety, it's recommended to use `finalized` commitment. + +### Default Commitment + +If commitment configuration is not provided, the node will +[default to `finalized` commitment](https://github.com/anza-xyz/agave/blob/aa0922d6845e119ba466f88497e8209d1c82febc/sdk/src/commitment_config.rs#L199-L203) + +Only methods that query bank state accept the commitment parameter. They are +indicated in the API Reference below. + +## RpcResponse Structure + +Many methods that take a commitment parameter return an RpcResponse JSON object +comprised of two parts: + +- `context` : An RpcResponseContext JSON structure including a `slot` field at + which the operation was evaluated. +- `value` : The value returned by the operation itself. + +## Parsed Responses + +Some methods support an `encoding` parameter, and can return account or +instruction data in parsed JSON format if `"encoding":"jsonParsed"` is requested +and the node has a parser for the owning program. Solana nodes currently support +JSON parsing for the following native and SPL programs: + +| Program | Account State | Instructions | +| ---------------------------- | ------------- | ------------ | +| Address Lookup | v1.15.0 | v1.15.0 | +| BPF Loader | n/a | stable | +| BPF Upgradeable Loader | stable | stable | +| Config | stable | | +| SPL Associated Token Account | n/a | stable | +| SPL Memo | n/a | stable | +| SPL Token | stable | stable | +| SPL Token 2022 | stable | stable | +| Stake | stable | stable | +| Vote | stable | stable | + +The list of account parsers can be found +[here](https://github.com/solana-labs/solana/blob/master/account-decoder/src/parse_account_data.rs), +and instruction parsers +[here](https://github.com/solana-labs/solana/blob/master/transaction-status/src/parse_instruction.rs). + +## Filter criteria + +Some methods support providing a `filters` object to enable pre-filtering the +data returned within the RpcResponse JSON object. The following filters exist: + +- `memcmp: object` - compares a provided series of bytes with program account + data at a particular offset. Fields: + + - `offset: usize` - offset into program account data to start comparison + - `bytes: string` - data to match, as encoded string + - `encoding: string` - encoding for filter `bytes` data, either "base58" or + "base64". Data is limited in size to 128 or fewer decoded bytes.
+ **NEW: This field, and base64 support generally, is only available in + solana-core v1.14.0 or newer. Please omit when querying nodes on earlier + versions** + +- `dataSize: u64` - compares the program account data length with the provided + data size diff --git a/content/docs/rpc/json-structures.mdx b/content/docs/rpc/json-structures.mdx new file mode 100644 index 000000000..0a4972bd9 --- /dev/null +++ b/content/docs/rpc/json-structures.mdx @@ -0,0 +1,109 @@ +--- +title: Data Structures as JSON +hideTableOfContents: false +h1: Common JSON Data Structures for Solana RPC Methods +--- + +Various Solana RPC methods will return more complex responses as structured JSON +objects, filled with specific keyed values. + +The most common of these JSON data structures include: + +- [transactions](#transactions) +- [inner instructions](#inner-instructions) +- [token balances](#token-balances) + +## Transactions + +Transactions are quite different from those on other blockchains. Be sure to +review [Anatomy of a Transaction](/docs/core/transactions) to learn about +transactions on Solana. + +The JSON structure of a transaction is defined as follows: + +- `signatures: ` - A list of base-58 encoded signatures applied + to the transaction. The list is always of length + `message.header.numRequiredSignatures` and not empty. The signature at index + `i` corresponds to the public key at index `i` in `message.accountKeys`. The + first one is used as the + [transaction id](/docs/terminology#transaction-id). +- `message: ` - Defines the content of the transaction. + - `accountKeys: ` - List of base-58 encoded public keys used by + the transaction, including by the instructions and for signatures. The first + `message.header.numRequiredSignatures` public keys must sign the + transaction. + - `header: ` - Details the account types and signatures required by + the transaction. + - `numRequiredSignatures: ` - The total number of signatures + required to make the transaction valid. The signatures must match the + first `numRequiredSignatures` of `message.accountKeys`. + - `numReadonlySignedAccounts: ` - The last + `numReadonlySignedAccounts` of the signed keys are read-only accounts. + Programs may process multiple transactions that load read-only accounts + within a single PoH entry, but are not permitted to credit or debit + lamports or modify account data. Transactions targeting the same + read-write account are evaluated sequentially. + - `numReadonlyUnsignedAccounts: ` - The last + `numReadonlyUnsignedAccounts` of the unsigned keys are read-only accounts. + - `recentBlockhash: ` - A base-58 encoded hash of a recent block in + the ledger used to prevent transaction duplication and to give transactions + lifetimes. + - `instructions: ` - List of program instructions that will be + executed in sequence and committed in one atomic transaction if all succeed. + - `programIdIndex: ` - Index into the `message.accountKeys` array + indicating the program account that executes this instruction. + - `accounts: ` - List of ordered indices into the + `message.accountKeys` array indicating which accounts to pass to the + program. + - `data: ` - The program input data encoded in a base-58 string. + - `addressTableLookups: ` - List of address table + lookups used by a transaction to dynamically load addresses from on-chain + address lookup tables. Undefined if `maxSupportedTransactionVersion` is not + set. + - `accountKey: ` - base-58 encoded public key for an address lookup + table account. + - `writableIndexes: ` - List of indices used to load + addresses of writable accounts from a lookup table. + - `readonlyIndexes: ` - List of indices used to load + addresses of readonly accounts from a lookup table. + +## Inner Instructions + +The Solana runtime records the cross-program instructions that are invoked +during transaction processing and makes these available for greater transparency +of what was executed on-chain per transaction instruction. Invoked instructions +are grouped by the originating transaction instruction and are listed in order +of processing. + +The JSON structure of inner instructions is defined as a list of objects in the +following structure: + +- `index: number` - Index of the transaction instruction from which the inner + instruction(s) originated +- `instructions: ` - Ordered list of inner program instructions + that were invoked during a single transaction instruction. + - `programIdIndex: ` - Index into the `message.accountKeys` array + indicating the program account that executes this instruction. + - `accounts: ` - List of ordered indices into the + `message.accountKeys` array indicating which accounts to pass to the + program. + - `data: ` - The program input data encoded in a base-58 string. + +## Token Balances + +The JSON structure of token balances is defined as a list of objects in the +following structure: + +- `accountIndex: ` - Index of the account in which the token balance is + provided for. +- `mint: ` - Pubkey of the token's mint. +- `owner: ` - Pubkey of token balance's owner. +- `programId: ` - Pubkey of the Token program that owns the + account. +- `uiTokenAmount: ` - + - `amount: ` - Raw amount of tokens as a string, ignoring decimals. + - `decimals: ` - Number of decimals configured for token's mint. + - `uiAmount: ` - Token amount as a float, accounting for + decimals. **DEPRECATED** + - `uiAmountString: ` - Token amount as a string, accounting for + decimals. diff --git a/content/docs/rpc/meta.json b/content/docs/rpc/meta.json new file mode 100644 index 000000000..5ccc9f120 --- /dev/null +++ b/content/docs/rpc/meta.json @@ -0,0 +1,5 @@ +{ + "title": "Solana RPC Methods", + "pages": ["json-structures", "http", "websocket", "deprecated"], + "defaultOpen": true +} diff --git a/content/docs/rpc/websocket/accountSubscribe.mdx b/content/docs/rpc/websocket/accountSubscribe.mdx new file mode 100644 index 000000000..5417adc53 --- /dev/null +++ b/content/docs/rpc/websocket/accountSubscribe.mdx @@ -0,0 +1,160 @@ +--- +title: accountSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/accountSubscribe +h1: accountSubscribe RPC Method +--- + +Subscribe to an account to receive notifications when the lamports or data for a +given account public key changes + + + + + +### Parameters + + + Account Pubkey, as base-58 encoded string + + + + +Configuration object containing the following fields: + + + + + +Encoding format for Account data + + + +
+ +- `base58` is slow. +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data +- If `jsonParsed` is requested but a parser cannot be found, the field falls + back to binary encoding, detectable when the `data`field is type`string`. + +
+ +
+ +
+ +### Result + +`` - Subscription id \(needed to unsubscribe\) + +
+ + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "accountSubscribe", + "params": [ + "CM78CPUeXjn8o3yroDHxUtKsZZgoy4GPkPPXfouKNH12", + { + "encoding": "jsonParsed", + "commitment": "finalized" + } + ] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 23784, "id": 1 } +``` + + +
+ +#### Notification Format: + +The notification format is the same as seen in the +[getAccountInfo](/docs/rpc/http/getaccountinfo) RPC HTTP method. + +Base58 encoding: + +```json +{ + "jsonrpc": "2.0", + "method": "accountNotification", + "params": { + "result": { + "context": { + "slot": 5199307 + }, + "value": { + "data": [ + "11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR", + "base58" + ], + "executable": false, + "lamports": 33594, + "owner": "11111111111111111111111111111111", + "rentEpoch": 635, + "space": 80 + } + }, + "subscription": 23784 + } +} +``` + +Parsed-JSON encoding: + +```json +{ + "jsonrpc": "2.0", + "method": "accountNotification", + "params": { + "result": { + "context": { + "slot": 5199307 + }, + "value": { + "data": { + "program": "nonce", + "parsed": { + "type": "initialized", + "info": { + "authority": "Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX", + "blockhash": "LUaQTmM7WbMRiATdMMHaRGakPtCkc2GHtH57STKXs6k", + "feeCalculator": { + "lamportsPerSignature": 5000 + } + } + } + }, + "executable": false, + "lamports": 33594, + "owner": "11111111111111111111111111111111", + "rentEpoch": 635, + "space": 80 + } + }, + "subscription": 23784 + } +} +``` diff --git a/content/docs/rpc/websocket/accountUnsubscribe.mdx b/content/docs/rpc/websocket/accountUnsubscribe.mdx new file mode 100644 index 000000000..39e6613f4 --- /dev/null +++ b/content/docs/rpc/websocket/accountUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: accountUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/accountUnsubscribe +h1: accountUnsubscribe RPC Method +--- + +Unsubscribe from account change notifications + + + + + +### Parameters + + + id of the account Subscription to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "accountUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/blockSubscribe.mdx b/content/docs/rpc/websocket/blockSubscribe.mdx new file mode 100644 index 000000000..ba25b1313 --- /dev/null +++ b/content/docs/rpc/websocket/blockSubscribe.mdx @@ -0,0 +1,386 @@ +--- +title: blockSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/blockSubscribe +h1: blockSubscribe RPC Method +--- + +Subscribe to receive notification anytime a new block is `confirmed` or +`finalized`. + + + This subscription is considered **unstable** and is only available if the + validator was started with the `--rpc-pubsub-enable-block-subscription` flag. + The format of this subscription may change in the future. + + + + + + +### Parameters + + + +filter criteria for the logs to receive results by account type; currently +supported: + + + `all` - include all transactions in block + + + + +A JSON object with the following field: + +- `mentionsAccountOrProgram: ` - return only transactions that mention + the provided public key (as base-58 encoded string). If no mentions in a given + block, then no notification will be sent. + + + + + + + +Configuration object containing the following fields: + + + +- `processed` is not supported. + + + + + +encoding format for each returned Transaction + + + +
+ +- `jsonParsed` attempts to use program-specific instruction parsers to return + more human-readable and explicit data in the + `transaction.message.instructions` list. +- If `jsonParsed` is requested but a parser cannot be found, the instruction + falls back to regular JSON encoding (`accounts`, `data`, and `programIdIndex` + fields). + +
+ +
+ + + +level of transaction detail to return + + + +
+ +- If `accounts` are requested, transaction details only include signatures and + an annotated list of accounts in each transaction. +- Transaction metadata is limited to only: fee, err, pre_balances, + post_balances, pre_token_balances, and post_token_balances. + +
+ +
+ + + +the max transaction version to return in responses. + +
+ +- If the requested block contains a transaction with a higher version, an error + will be returned. +- If this parameter is omitted, only legacy transactions will be returned, and a + block containing any versioned transaction will prompt the error. + +
+ +
+ + + whether to populate the `rewards` array. If parameter not provided, the + default includes rewards. + + +
+ +### Result + +`integer` - subscription id \(needed to unsubscribe\) + +
+ + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": "1", + "method": "blockSubscribe", + "params": ["all"] +} +``` + +```json +{ + "jsonrpc": "2.0", + "id": "1", + "method": "blockSubscribe", + "params": [ + { + "mentionsAccountOrProgram": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op" + }, + { + "commitment": "confirmed", + "encoding": "base64", + "showRewards": true, + "transactionDetails": "full" + } + ] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 0, "id": 1 } +``` + + +
+ +#### Notification Format: + +The notification will be an object with the following fields: + +- `slot: ` - The corresponding slot. +- `err: ` - Error if something went wrong publishing the + notification otherwise null. +- `block: ` - A block object as seen in the + [getBlock](/docs/rpc/http/getblock) RPC HTTP method. + +```json +{ + "jsonrpc": "2.0", + "method": "blockNotification", + "params": { + "result": { + "context": { + "slot": 112301554 + }, + "value": { + "slot": 112301554, + "block": { + "previousBlockhash": "GJp125YAN4ufCSUvZJVdCyWQJ7RPWMmwxoyUQySydZA", + "blockhash": "6ojMHjctdqfB55JDpEpqfHnP96fiaHEcvzEQ2NNcxzHP", + "parentSlot": 112301553, + "transactions": [ + { + "transaction": [ + "OpltwoUvWxYi1P2U8vbIdE/aPntjYo5Aa0VQ2JJyeJE2g9Vvxk8dDGgFMruYfDu8/IfUWb0REppTe7IpAuuLRgIBAAkWnj4KHRpEWWW7gvO1c0BHy06wZi2g7/DLqpEtkRsThAXIdBbhXCLvltw50ZnjDx2hzw74NVn49kmpYj2VZHQJoeJoYJqaKcvuxCi/2i4yywedcVNDWkM84Iuw+cEn9/ROCrXY4qBFI9dveEERQ1c4kdU46xjxj9Vi+QXkb2Kx45QFVkG4Y7HHsoS6WNUiw2m4ffnMNnOVdF9tJht7oeuEfDMuUEaO7l9JeUxppCvrGk3CP45saO51gkwVYEgKzhpKjCx3rgsYxNR81fY4hnUQXSbbc2Y55FkwgRBpVvQK7/+clR4Gjhd3L4y+OtPl7QF93Akg1LaU9wRMs5nvfDFlggqI9PqJl+IvVWrNRdBbPS8LIIhcwbRTkSbqlJQWxYg3Bo2CTVbw7rt1ZubuHWWp0mD/UJpLXGm2JprWTePNULzHu67sfqaWF99LwmwjTyYEkqkRt1T0Je5VzHgJs0N5jY4iIU9K3lMqvrKOIn/2zEMZ+ol2gdgjshx+sphIyhw65F3J/Dbzk04LLkK+CULmN571Y+hFlXF2ke0BIuUG6AUF+4214Cu7FXnqo3rkxEHDZAk0lRrAJ8X/Z+iwuwI5cgbd9uHXZaGT2cvhRs7reawctIXtX1s3kTqM9YV+/wCpDLAp8axcEkaQkLDKRoWxqp8XLNZSKial7Rk+ELAVVKWoWLRXRZ+OIggu0OzMExvVLE5VHqy71FNHq4gGitkiKYNFWSLIE4qGfdFLZXy/6hwS+wq9ewjikCpd//C9BcCL7Wl0iQdUslxNVCBZHnCoPYih9JXvGefOb9WWnjGy14sG9j70+RSVx6BlkFELWwFvIlWR/tHn3EhHAuL0inS2pwX7ZQTAU6gDVaoqbR2EiJ47cKoPycBNvHLoKxoY9AZaBjPl6q8SKQJSFyFd9n44opAgI6zMTjYF/8Ok4VpXEESp3QaoUyTI9sOJ6oFP6f4dwnvQelgXS+AEfAsHsKXxGAIUDQENAgMEBQAGBwgIDg8IBJCER3QXl1AVDBADCQoOAAQLERITDAjb7ugh3gOuTy==", + "base64" + ], + "meta": { + "err": null, + "status": { + "Ok": null + }, + "fee": 5000, + "preBalances": [ + 1758510880, 2067120, 1566000, 1461600, 2039280, 2039280, + 1900080, 1865280, 0, 3680844220, 2039280 + ], + "postBalances": [ + 1758505880, 2067120, 1566000, 1461600, 2039280, 2039280, + 1900080, 1865280, 0, 3680844220, 2039280 + ], + "innerInstructions": [ + { + "index": 0, + "instructions": [ + { + "programIdIndex": 13, + "accounts": [1, 15, 3, 4, 2, 14], + "data": "21TeLgZXNbtHXVBzCaiRmH" + }, + { + "programIdIndex": 14, + "accounts": [3, 4, 1], + "data": "6qfC8ic7Aq99" + }, + { + "programIdIndex": 13, + "accounts": [1, 15, 3, 5, 2, 14], + "data": "21TeLgZXNbsn4QEpaSEr3q" + }, + { + "programIdIndex": 14, + "accounts": [3, 5, 1], + "data": "6LC7BYyxhFRh" + } + ] + }, + { + "index": 1, + "instructions": [ + { + "programIdIndex": 14, + "accounts": [4, 3, 0], + "data": "7aUiLHFjSVdZ" + }, + { + "programIdIndex": 19, + "accounts": [17, 18, 16, 9, 11, 12, 14], + "data": "8kvZyjATKQWYxaKR1qD53V" + }, + { + "programIdIndex": 14, + "accounts": [9, 11, 18], + "data": "6qfC8ic7Aq99" + } + ] + } + ], + "logMessages": [ + "Program QMNeHCGYnLVDn1icRAfQZpjPLBNkfGbSKRB83G5d8KB invoke [1]", + "Program QMWoBmAyJLAsA1Lh9ugMTw2gciTihncciphzdNzdZYV invoke [2]" + ], + "preTokenBalances": [ + { + "accountIndex": 4, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" + }, + { + "accountIndex": 5, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": 11513.0679, + "decimals": 6, + "amount": "11513067900", + "uiAmountString": "11513.0679" + }, + "owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" + }, + { + "accountIndex": 10, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU", + "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" + }, + { + "accountIndex": 11, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": 15138.514093, + "decimals": 6, + "amount": "15138514093", + "uiAmountString": "15138.514093" + }, + "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op", + "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" + } + ], + "postTokenBalances": [ + { + "accountIndex": 4, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" + }, + { + "accountIndex": 5, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": 11513.103028, + "decimals": 6, + "amount": "11513103028", + "uiAmountString": "11513.103028" + }, + "owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" + }, + { + "accountIndex": 10, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU", + "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" + }, + { + "accountIndex": 11, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": 15489.767829, + "decimals": 6, + "amount": "15489767829", + "uiAmountString": "15489.767829" + }, + "owner": "BeiHVPRE8XeX3Y2xVNrSsTpAScH94nYySBVQ4HqgN9at", + "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" + } + ], + "rewards": [] + } + } + ], + "blockTime": 1639926816, + "blockHeight": 101210751 + }, + "err": null + } + }, + "subscription": 14 + } +} +``` diff --git a/content/docs/rpc/websocket/blockUnsubscribe.mdx b/content/docs/rpc/websocket/blockUnsubscribe.mdx new file mode 100644 index 000000000..0d9a1dafe --- /dev/null +++ b/content/docs/rpc/websocket/blockUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: blockUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/blockUnsubscribe +h1: blockUnsubscribe RPC Method +--- + +Unsubscribe from block notifications + + + + + +### Parameters + + + subscription id to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "blockUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/index.mdx b/content/docs/rpc/websocket/index.mdx new file mode 100644 index 000000000..e654a5363 --- /dev/null +++ b/content/docs/rpc/websocket/index.mdx @@ -0,0 +1,22 @@ +--- +title: Websocket Methods +seoTitle: Solana RPC Websocket Methods +hideTableOfContents: false +h1: Solana RPC Websocket Methods +--- + +After connecting to the RPC PubSub websocket at `ws://
/`: + +- Submit subscription requests to the websocket using the methods below +- Multiple subscriptions may be active at once +- Many subscriptions take the optional + [`commitment` parameter](/docs/rpc/#configuring-state-commitment), + defining how finalized a change should be to trigger a notification. For + subscriptions, if commitment is unspecified, the default value is `finalized`. + +## RPC PubSub WebSocket Endpoint + +Default port: `8900` + +- ws://localhost:8900 +- http://192.168.1.88:8900 diff --git a/content/docs/rpc/websocket/logsSubscribe.mdx b/content/docs/rpc/websocket/logsSubscribe.mdx new file mode 100644 index 000000000..2b5c4e8ab --- /dev/null +++ b/content/docs/rpc/websocket/logsSubscribe.mdx @@ -0,0 +1,138 @@ +--- +title: logsSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/logsSubscribe +h1: logsSubscribe RPC Method +--- + +Subscribe to transaction logging + + + + + +### Parameters + + + filter criteria for the logs to receive results by account type. The following filters types are currently supported: + + + +A string with one of the following values: + +- `all` - subscribe to all transactions except for simple vote transactions +- `allWithVotes` - subscribe to all transactions, including simple vote + transactions + + + + + +An object with the following field: + +- `mentions: [ ]` - array containing a single Pubkey (as base-58 + encoded string); if present, subscribe to only transactions mentioning this + address + + + The `mentions` field currently [only supports + one](https://github.com/solana-labs/solana/blob/master/rpc/src/rpc_pubsub.rs#L481) + Pubkey string per method call. Listing additional addresses will result in an + error. + + + + + + + + +Configuration object containing the following fields: + + + + + +### Result + +`` - Subscription id \(needed to unsubscribe\) + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "logsSubscribe", + "params": [ + { + "mentions": [ "11111111111111111111111111111111" ] + }, + { + "commitment": "finalized" + } + ] +} +{ + "jsonrpc": "2.0", + "id": 1, + "method": "logsSubscribe", + "params": [ "all" ] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 24040, "id": 1 } +``` + + + + +#### Notification Format: + +The notification will be an RpcResponse JSON object with value equal to: + +- `signature: ` - The transaction signature base58 encoded. +- `err: ` - Error if transaction failed, null if transaction + succeeded. + [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) +- `logs: ` - Array of log messages the transaction instructions + output during execution, null if simulation failed before the transaction was + able to execute (for example due to an invalid blockhash or signature + verification failure) + +Example: + +```json +{ + "jsonrpc": "2.0", + "method": "logsNotification", + "params": { + "result": { + "context": { + "slot": 5208469 + }, + "value": { + "signature": "5h6xBEauJ3PK6SWCZ1PGjBvj8vDdWG3KpwATGy1ARAXFSDwt8GFXM7W5Ncn16wmqokgpiKRLuS83KUxyZyv2sUYv", + "err": null, + "logs": [ + "SBF program 83astBRguLMdt2h5U1Tpdq5tjFoJ6noeGwaY3mDLVcri success" + ] + } + }, + "subscription": 24040 + } +} +``` diff --git a/content/docs/rpc/websocket/logsUnsubscribe.mdx b/content/docs/rpc/websocket/logsUnsubscribe.mdx new file mode 100644 index 000000000..09c556d56 --- /dev/null +++ b/content/docs/rpc/websocket/logsUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: logsUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/logsUnsubscribe +h1: logsUnsubscribe RPC Method +--- + +Unsubscribe from transaction logging + + + + + +### Parameters + + + subscription id to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "logsUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/meta.json b/content/docs/rpc/websocket/meta.json new file mode 100644 index 000000000..ab3487a3a --- /dev/null +++ b/content/docs/rpc/websocket/meta.json @@ -0,0 +1,23 @@ +{ + "title": "Websocket Methods", + "pages": [ + "accountSubscribe", + "accountUnsubscribe", + "blockSubscribe", + "blockUnsubscribe", + "logsSubscribe", + "logsUnsubscribe", + "programSubscribe", + "programUnsubscribe", + "rootSubscribe", + "rootUnsubscribe", + "signatureSubscribe", + "signatureUnsubscribe", + "slotSubscribe", + "slotsUpdatesSubscribe", + "slotsUpdatesUnsubscribe", + "slotUnsubscribe", + "voteSubscribe", + "voteUnsubscribe" + ] +} diff --git a/content/docs/rpc/websocket/programSubscribe.mdx b/content/docs/rpc/websocket/programSubscribe.mdx new file mode 100644 index 000000000..c0668e209 --- /dev/null +++ b/content/docs/rpc/websocket/programSubscribe.mdx @@ -0,0 +1,211 @@ +--- +title: programSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/programSubscribe +h1: programSubscribe RPC Method +--- + +Subscribe to a program to receive notifications when the lamports or data for an +account owned by the given program changes + + + + + +### Parameters + + + +Pubkey of the `program_id`, as base-58 encoded string + + + + + +Configuration object containing the following fields: + + + + + +filter results using various filter objects + + + The resultant account must meet **ALL** filter criteria to be included in the + returned results + + + + + + +Encoding format for Account data + + + +
+ +- `base58` is slow. +- `jsonParsed` encoding attempts to use program-specific state parsers to return + more human-readable and explicit account state data. +- If `jsonParsed` is requested but a parser cannot be found, the field falls + back to `base64` encoding, detectable when the `data` field is type `string`. + +
+ +
+ +
+ +### Result + +`` - Subscription id \(needed to unsubscribe\) + +
+ + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "programSubscribe", + "params": [ + "11111111111111111111111111111111", + { + "encoding": "base64", + "commitment": "finalized" + } + ] +} +{ + "jsonrpc": "2.0", + "id": 1, + "method": "programSubscribe", + "params": [ + "11111111111111111111111111111111", + { + "encoding": "jsonParsed" + } + ] +} +{ + "jsonrpc": "2.0", + "id": 1, + "method": "programSubscribe", + "params": [ + "11111111111111111111111111111111", + { + "encoding": "base64", + "filters": [ + { + "dataSize": 80 + } + ] + } + ] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 24040, "id": 1 } +``` + + +
+ +#### Notification format + +The notification format is a single program account object as seen in the +[getProgramAccounts](/docs/rpc/http/getprogramaccounts) RPC HTTP method. + +Base58 encoding: + +```json +{ + "jsonrpc": "2.0", + "method": "programNotification", + "params": { + "result": { + "context": { + "slot": 5208469 + }, + "value": { + "pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq", + "account": { + "data": [ + "11116bv5nS2h3y12kD1yUKeMZvGcKLSjQgX6BeV7u1FrjeJcKfsHPXHRDEHrBesJhZyqnnq9qJeUuF7WHxiuLuL5twc38w2TXNLxnDbjmuR", + "base58" + ], + "executable": false, + "lamports": 33594, + "owner": "11111111111111111111111111111111", + "rentEpoch": 636, + "space": 80 + } + } + }, + "subscription": 24040 + } +} +``` + +Parsed-JSON encoding: + +```json +{ + "jsonrpc": "2.0", + "method": "programNotification", + "params": { + "result": { + "context": { + "slot": 5208469 + }, + "value": { + "pubkey": "H4vnBqifaSACnKa7acsxstsY1iV1bvJNxsCY7enrd1hq", + "account": { + "data": { + "program": "nonce", + "parsed": { + "type": "initialized", + "info": { + "authority": "Bbqg1M4YVVfbhEzwA9SpC9FhsaG83YMTYoR4a8oTDLX", + "blockhash": "LUaQTmM7WbMRiATdMMHaRGakPtCkc2GHtH57STKXs6k", + "feeCalculator": { + "lamportsPerSignature": 5000 + } + } + } + }, + "executable": false, + "lamports": 33594, + "owner": "11111111111111111111111111111111", + "rentEpoch": 636, + "space": 80 + } + } + }, + "subscription": 24040 + } +} +``` diff --git a/content/docs/rpc/websocket/programUnsubscribe.mdx b/content/docs/rpc/websocket/programUnsubscribe.mdx new file mode 100644 index 000000000..1fe460cf2 --- /dev/null +++ b/content/docs/rpc/websocket/programUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: programUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/programUnsubscribe +h1: programUnsubscribe RPC Method +--- + +Unsubscribe from program-owned account change notifications + + + + + +### Parameters + + + id of account Subscription to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "programUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/rootSubscribe.mdx b/content/docs/rpc/websocket/rootSubscribe.mdx new file mode 100644 index 000000000..f4a510796 --- /dev/null +++ b/content/docs/rpc/websocket/rootSubscribe.mdx @@ -0,0 +1,55 @@ +--- +title: rootSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/rootSubscribe +h1: rootSubscribe RPC Method +--- + +Subscribe to receive notification anytime a new root is set by the validator. + + + + + +### Parameters + +**None** + +### Result + +`integer` - subscription id \(needed to unsubscribe\) + + + + + +### Code sample + +```json +{ "jsonrpc": "2.0", "id": 1, "method": "rootSubscribe" } +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 0, "id": 1 } +``` + + + + +#### Notification Format: + +The result is the latest root slot number. + +```json +{ + "jsonrpc": "2.0", + "method": "rootNotification", + "params": { + "result": 42, + "subscription": 0 + } +} +``` diff --git a/content/docs/rpc/websocket/rootUnsubscribe.mdx b/content/docs/rpc/websocket/rootUnsubscribe.mdx new file mode 100644 index 000000000..d93f629a6 --- /dev/null +++ b/content/docs/rpc/websocket/rootUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: rootUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/rootUnsubscribe +h1: rootUnsubscribe RPC Method +--- + +Unsubscribe from root notifications + + + + + +### Parameters + + + subscription id to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "rootUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/signatureSubscribe.mdx b/content/docs/rpc/websocket/signatureSubscribe.mdx new file mode 100644 index 000000000..012df835c --- /dev/null +++ b/content/docs/rpc/websocket/signatureSubscribe.mdx @@ -0,0 +1,149 @@ +--- +title: signatureSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/signatureSubscribe +h1: signatureSubscribe RPC Method +--- + +Subscribe to receive a notification when the transaction with the given +signature reaches the specified commitment level. + + + This is a subscription to a single notification. It is automatically cancelled + by the server once the notification, `signatureNotification`, is sent by the + RPC. + + + + + + +### Parameters + + + +transaction signature, as base-58 encoded string + + + The transaction signature must be the first signature from the transaction + (see [transaction id](/docs/terminology#transaction-id) for more details). + + + + + + +Configuration object containing the following fields: + + + + + +Whether or not to subscribe for notifications when signatures are received by +the RPC, in addition to when they are processed. + + + + + +### Result + +`` - subscription id (needed to unsubscribe) + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "signatureSubscribe", + "params": [ + "2EBVM6cB8vAAD93Ktr6Vd8p67XPbQzCJX47MpReuiCXJAtcjaxpvWpcg9Ege1Nr5Tk3a2GFrByT7WPBjdsTycY9b", + { + "commitment": "finalized", + "enableReceivedNotification": false + } + ] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 0, "id": 1 } +``` + + + + +#### Notification Format: + +The notification will be an RpcResponse JSON object with value containing an +object with: + +- `slot: ` - The corresponding slot. +- `value: ` - a notification value of + [`RpcSignatureResult`](https://github.com/solana-labs/solana/blob/6d28fd455b07e3557fc6c0c3ddf3ba03e3fe8482/rpc-client-api/src/response.rs#L265-L268), + resulting in either: + - when `enableReceivedNotification` is `true` and the signature is received: + the literal string + [`"receivedSignature"`](https://github.com/solana-labs/solana/blob/6d28fd455b07e3557fc6c0c3ddf3ba03e3fe8482/rpc-client-api/src/response.rs#L286-L288), + or + - when the signature is processed: `err: `: + - `null` if the transaction succeeded in being processed at the specified + commitment level, or + - a + [`TransactionError`](https://github.com/solana-labs/solana/blob/6d28fd455b07e3557fc6c0c3ddf3ba03e3fe8482/sdk/src/transaction/error.rs#L15-L164), + if the transaction failed + +#### Example responses: + +The following is an example response of a notification from a successfully +**processed** transactions: + +```json +{ + "jsonrpc": "2.0", + "method": "signatureNotification", + "params": { + "result": { + "context": { + "slot": 5207624 + }, + "value": { + "err": null + } + }, + "subscription": 24006 + } +} +``` + +The following is an example response of a notification from a successfully +**received** transaction signature: + +```json +{ + "jsonrpc": "2.0", + "method": "signatureNotification", + "params": { + "result": { + "context": { + "slot": 5207624 + }, + "value": "receivedSignature" + }, + "subscription": 24006 + } +} +``` diff --git a/content/docs/rpc/websocket/signatureUnsubscribe.mdx b/content/docs/rpc/websocket/signatureUnsubscribe.mdx new file mode 100644 index 000000000..de504bdb1 --- /dev/null +++ b/content/docs/rpc/websocket/signatureUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: signatureUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/signatureUnsubscribe +h1: signatureUnsubscribe RPC Method +--- + +Unsubscribe from signature confirmation notification + + + + + +### Parameters + + + subscription id to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "signatureUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/slotSubscribe.mdx b/content/docs/rpc/websocket/slotSubscribe.mdx new file mode 100644 index 000000000..8e123d564 --- /dev/null +++ b/content/docs/rpc/websocket/slotSubscribe.mdx @@ -0,0 +1,65 @@ +--- +title: slotSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/slotSubscribe +h1: slotSubscribe RPC Method +--- + +Subscribe to receive notification anytime a slot is processed by the validator + + + + + +### Parameters + +**None** + +### Result + +`` - Subscription id \(needed to unsubscribe\) + + + + + +### Code sample + +```json +{ "jsonrpc": "2.0", "id": 1, "method": "slotSubscribe" } +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 0, "id": 1 } +``` + + + + +#### Notification Format: + +The notification will be an object with the following fields: + +- `parent: ` - The parent slot +- `root: ` - The current root slot +- `slot: ` - The newly set slot value + +Example: + +```json +{ + "jsonrpc": "2.0", + "method": "slotNotification", + "params": { + "result": { + "parent": 75, + "root": 44, + "slot": 76 + }, + "subscription": 0 + } +} +``` diff --git a/content/docs/rpc/websocket/slotUnsubscribe.mdx b/content/docs/rpc/websocket/slotUnsubscribe.mdx new file mode 100644 index 000000000..1e18836a1 --- /dev/null +++ b/content/docs/rpc/websocket/slotUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: slotUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/slotUnsubscribe +h1: slotUnsubscribe RPC Method +--- + +Unsubscribe from slot notifications + + + + + +### Parameters + + + subscription id to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "slotUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/slotsUpdatesSubscribe.mdx b/content/docs/rpc/websocket/slotsUpdatesSubscribe.mdx new file mode 100644 index 000000000..aa3353bef --- /dev/null +++ b/content/docs/rpc/websocket/slotsUpdatesSubscribe.mdx @@ -0,0 +1,87 @@ +--- +title: slotsUpdatesSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/slotsUpdatesSubscribe +h1: slotsUpdatesSubscribe RPC Method +--- + +Subscribe to receive a notification from the validator on a variety of updates +on every slot + + + This subscription is unstable. The format of this subscription may change in + the future, and may not always be supported. + + + + + + +### Parameters + +**None** + +### Result + +`` - Subscription id (needed to unsubscribe) + + + + + +### Code sample + +```json +{ "jsonrpc": "2.0", "id": 1, "method": "slotsUpdatesSubscribe" } +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 0, "id": 1 } +``` + + + + +### Notification Format + +The notification will be an object with the following fields: + +- `err: ` - The error message. Only present if the update is + of type "dead". +- `parent: ` - The parent slot. Only present if the update is of + type "createdBank". +- `slot: ` - The newly updated slot +- `stats: ` - The error message. Only present if the update is + of type "frozen". An object with the following fields: + - `maxTransactionsPerEntry: `, + - `numFailedTransactions: `, + - `numSuccessfulTransactions: `, + - `numTransactionEntries: `, +- `timestamp: ` - The Unix timestamp of the update in milliseconds +- `type: ` - The update type, one of: + - "firstShredReceived" + - "completed" + - "createdBank" + - "frozen" + - "dead" + - "optimisticConfirmation" + - "root" + +```shell +{ + "jsonrpc": "2.0", + "method": "slotsUpdatesNotification", + "params": { + "result": { + "parent": 75, + "slot": 76, + "timestamp": 1625081266243, + "type": "optimisticConfirmation" + }, + "subscription": 0 + } +} +``` diff --git a/content/docs/rpc/websocket/slotsUpdatesUnsubscribe.mdx b/content/docs/rpc/websocket/slotsUpdatesUnsubscribe.mdx new file mode 100644 index 000000000..437a7d91e --- /dev/null +++ b/content/docs/rpc/websocket/slotsUpdatesUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: slotsUpdatesUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/slotsUpdatesUnsubscribe +h1: slotsUpdatesUnsubscribe RPC Method +--- + +Unsubscribe from slot-update notifications + + + + + +### Parameters + + + subscription id to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "slotsUpdatesUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/rpc/websocket/voteSubscribe.mdx b/content/docs/rpc/websocket/voteSubscribe.mdx new file mode 100644 index 000000000..c57ea6f6b --- /dev/null +++ b/content/docs/rpc/websocket/voteSubscribe.mdx @@ -0,0 +1,75 @@ +--- +title: voteSubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/voteSubscribe +h1: voteSubscribe RPC Method +--- + +Subscribe to receive notification anytime a new vote is observed in gossip. +These votes are pre-consensus therefore there is no guarantee these votes will +enter the ledger. + + + This subscription is unstable and only available if the validator was started + with the `--rpc-pubsub-enable-vote-subscription` flag. The format of this + subscription may change in the future. + + + + + + +### Parameters + +**None** + +### Result + +`` - subscription id (needed to unsubscribe) + + + + + +### Code sample + +```json +{ "jsonrpc": "2.0", "id": 1, "method": "voteSubscribe" } +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": 0, "id": 1 } +``` + + + + +#### Notification Format: + +The notification will be an object with the following fields: + +- `hash: ` - The vote hash +- `slots: ` - The slots covered by the vote, as an array of u64 integers +- `timestamp: ` - The timestamp of the vote +- `signature: ` - The signature of the transaction that contained this + vote +- `votePubkey: ` - The public key of the vote account, as base-58 + encoded string + +```json +{ + "jsonrpc": "2.0", + "method": "voteNotification", + "params": { + "result": { + "hash": "8Rshv2oMkPu5E4opXTRyuyBeZBqQ4S477VG26wUTFxUM", + "slots": [1, 2], + "timestamp": null + }, + "subscription": 0 + } +} +``` diff --git a/content/docs/rpc/websocket/voteUnsubscribe.mdx b/content/docs/rpc/websocket/voteUnsubscribe.mdx new file mode 100644 index 000000000..082f984ea --- /dev/null +++ b/content/docs/rpc/websocket/voteUnsubscribe.mdx @@ -0,0 +1,47 @@ +--- +title: voteUnsubscribe +hideTableOfContents: true +altRoutes: + - /docs/rpc/voteUnsubscribe +h1: voteUnsubscribe RPC Method +--- + +Unsubscribe from vote notifications + + + + + +### Parameters + + + subscription id to cancel + + +### Result + +`` - unsubscribe success message + + + + + +### Code sample + +```json +{ + "jsonrpc": "2.0", + "id": 1, + "method": "voteUnsubscribe", + "params": [0] +} +``` + +### Response + +```json +{ "jsonrpc": "2.0", "result": true, "id": 1 } +``` + + + diff --git a/content/docs/terminology.mdx b/content/docs/terminology.mdx new file mode 100644 index 000000000..398298de9 --- /dev/null +++ b/content/docs/terminology.mdx @@ -0,0 +1,581 @@ +--- +title: Terminology +description: + "Learn the essential terminology used throughout the Solana blockchain and + development models." +keywords: + - terms + - dictionary + - definitions + - define + - programming models +--- + +The following terms are used throughout the Solana documentation and development +ecosystem. + +## account + +A record in the Solana ledger that either holds data or is an executable +program. + +Like an account at a traditional bank, a Solana account may hold funds called +[lamports](#lamport). Like a file in Linux, it is addressable by a key, often +referred to as a [public key](#public-key-pubkey) or pubkey. + +The key may be one of: + +- an ed25519 public key +- a program-derived account address (32byte value forced off the ed25519 curve) +- a hash of an ed25519 public key with a 32 character string + +## account owner + +The address of the program that owns the account. Only the owning program is +capable of modifying the account. + +See also [authority](#authority). + +## app + +A front-end application that interacts with a Solana cluster. + +## authority + +The address of a user that has some kind of permission over an account. + +For example: + +- The ability to mint new tokens is given to the account that is the 'mint + authority' for the token mint. +- The ability to upgrade a program is given to the account that is the 'upgrade + authority' of a program. + +## bank state + +The result of interpreting all programs on the ledger at a given +[tick height](#tick-height). It includes at least the set of all +[accounts](#account) holding nonzero [native tokens](#native-token). + +## block + +A contiguous set of [entries](#entry) on the ledger covered by a +[vote](#ledger-vote). A [leader](#leader) produces at most one block per +[slot](#slot). + +## blockhash + +A unique value ([hash](#hash)) that identifies a record (block). Solana computes +a blockhash from the last [entry id](#entry-id) of the block. + +## block height + +The number of [blocks](#block) beneath the current block. The first block after +the [genesis block](#genesis-block) has height one. + +## bootstrap validator + +The [validator](#validator) that produces the genesis (first) [block](#block) of +a block chain. + +## BPF loader + +The Solana program that owns and loads +[BPF](/docs/core/programs#berkeley-packet-filter-bpf) +[onchain programs](#onchain-program), allowing the program to interface with the +runtime. + +## client + +A computer program that accesses the Solana server network [cluster](#cluster). + +## commitment + +A measure of the network confirmation for the [block](#block). + +## cluster + +A set of [validators](#validator) maintaining a single [ledger](#ledger). + +## compute budget + +The maximum number of [compute units](#compute-units) consumed per transaction. + +## compute units + +The smallest unit of measure for consumption of computational resources of the +blockchain. + +## confirmation time + +The wallclock duration between a [leader](#leader) creating a +[tick entry](#tick) and creating a [confirmed block](#confirmed-block). + +## confirmed block + +A [block](#block) that has received a [super majority](#supermajority) of +[ledger votes](#ledger-vote). + +## control plane + +A gossip network connecting all [nodes](#node) of a [cluster](#cluster). + +## cooldown period + +Some number of [epochs](#epoch) after [stake](#stake) has been deactivated while +it progressively becomes available for withdrawal. During this period, the stake +is considered to be "deactivating". More info about: +[warmup and cooldown](https://docs.anza.xyz/implemented-proposals/staking-rewards#stake-warmup-cooldown-withdrawal) + +## credit + +See [vote credit](#vote-credit). + +## cross-program invocation (CPI) + +A call from one [onchain program](#onchain-program) to another. For more +information, see [calling between programs](/docs/core/cpi). + +## data plane + +A multicast network used to efficiently validate [entries](#entry) and gain +consensus. + +## drone + +An offchain service that acts as a custodian for a user's private key. It +typically serves to validate and sign transactions. + +## entry + +An entry on the [ledger](#ledger) either a [tick](#tick) or a +[transaction's entry](#transactions-entry). + +## entry id + +A preimage resistant [hash](#hash) over the final contents of an entry, which +acts as the [entry's](#entry) globally unique identifier. The hash serves as +evidence of: + +- The entry being generated after a duration of time +- The specified [transactions](#transaction) are those included in the entry +- The entry's position with respect to other entries in [ledger](#ledger) + +See [proof of history](#proof-of-history-poh). + +## epoch + +The time, i.e. number of [slots](#slot), for which a +[leader schedule](#leader-schedule) is valid. + +## fee account + +The fee account in the transaction is the account that pays for the cost of +including the transaction in the ledger. This is the first account in the +transaction. This account must be declared as Read-Write (writable) in the +transaction since paying for the transaction reduces the account balance. + +## finality + +When nodes representing 2/3rd of the [stake](#stake) have a common +[root](#root). + +## fork + +A [ledger](#ledger) derived from common entries but then diverged. + +## genesis block + +The first [block](#block) in the chain. + +## genesis config + +The configuration file that prepares the [ledger](#ledger) for the +[genesis block](#genesis-block). + +## hash + +A digital fingerprint of a sequence of bytes. + +## inflation + +An increase in token supply over time used to fund rewards for validation and to +fund continued development of Solana. + +## inner instruction + +See [cross-program invocation](#cross-program-invocation-cpi). + +## instruction + +A call to invoke a specific [instruction handler](#instruction-handler) in a +[program](#program). An instruction also specifies which accounts it wants to +read or modify, and additional data that serves as auxiliary input to the +[instruction handler](#instruction-handler). A [client](#client) must include at +least one instruction in a [transaction](#transaction), and all instructions +must complete for the transaction to be considered successful. + +## instruction handler + +Instruction handlers are [program](#program) functions that process +[instructions](#instruction) from [transactions](#transaction). An instruction +handler may contain one or more +[cross-program invocations](#cross-program-invocation-cpi). + +## keypair + +A [public key](#public-key-pubkey) and corresponding [private key](#private-key) +for accessing an account. + +## lamport + +A fractional [native token](#native-token) with the value of 0.000000001 +[sol](#sol). + +> Within the compute budget, a quantity of +> _[micro-lamports](https://github.com/solana-labs/solana/blob/ced8f6a512c61e0dd5308095ae8457add4a39e94/program-runtime/src/prioritization_fee.rs#L1-L2)_ +> is used in the calculation of [prioritization fees](#prioritization-fee). + +## leader + +The role of a [validator](#validator) when it is appending [entries](#entry) to +the [ledger](#ledger). + +## leader schedule + +A sequence of [validator](#validator) [public keys](#public-key-pubkey) mapped +to [slots](#slot). The cluster uses the leader schedule to determine which +validator is the [leader](#leader) at any moment in time. + +## ledger + +A list of [entries](#entry) containing [transactions](#transaction) signed by +[clients](#client). Conceptually, this can be traced back to the +[genesis block](#genesis-block), but an actual [validator](#validator)'s ledger +may have only newer [blocks](#block) to reduce storage, as older ones are not +needed for validation of future blocks by design. + +## ledger vote + +A [hash](#hash) of the [validator's state](#bank-state) at a given +[tick height](#tick-height). It comprises a [validator's](#validator) +affirmation that a [block](#block) it has received has been verified, as well as +a promise not to vote for a conflicting [block](#block) \(i.e. [fork](#fork)\) +for a specific amount of time, the [lockout](#lockout) period. + +## light client + +A type of [client](#client) that can verify it's pointing to a valid +[cluster](#cluster). It performs more ledger verification than a +[thin client](#thin-client) and less than a [validator](#validator). + +## loader + +A [program](#program) with the ability to interpret the binary encoding of other +onchain programs. + +## lockout + +The duration of time for which a [validator](#validator) is unable to +[vote](#ledger-vote) on another [fork](#fork). + +## message + +The structured contents of a [transaction](#transaction). Generally containing a +header, array of account addresses, recent [blockhash](#blockhash), and an array +of [instructions](#instruction). + +Learn more about the +[message formatting inside of transactions](/docs/core/transactions#message-header) +here. + +## Nakamoto coefficient + +A measure of decentralization, the Nakamoto Coefficient is the smallest number +of independent entities that can act collectively to shut down a blockchain. The +term was coined by Balaji S. Srinivasan and Leland Lee in +[Quantifying Decentralization](https://news.earn.com/quantifying-decentralization-e39db233c28e). + +## native token + +The [token](#token) used to track work done by [nodes](#node) in a cluster. + +## node + +A computer participating in a [cluster](#cluster). + +## node count + +The number of [validators](#validator) participating in a [cluster](#cluster). + +## onchain program + +The executable code on Solana blockchain that interprets the +[instructions](#instruction) sent inside of each [transaction](#transaction) to +read and modify accounts over which it has control. These programs are often +referred to as "[_smart contracts_](/docs/core/programs)" on other +blockchains. + +## PoH + +See [Proof of History](#proof-of-history-poh). + +## point + +A weighted [credit](#credit) in a rewards regime. In the [validator](#validator) +[rewards regime](https://docs.anza.xyz/consensus/stake-delegation-and-rewards), +the number of points owed to a [stake](#stake) during redemption is the product +of the [vote credits](#vote-credit) earned and the number of lamports staked. + +## private key + +The private key of a [keypair](#keypair). + +## program + +See [onchain program](#onchain-program). + +## program derived account (PDA) + +An account whose signing authority is a program and thus is not controlled by a +private key like other accounts. + +## program id + +The public key of the [account](#account) containing a [program](#program). + +## proof of history (PoH) + +A stack of proofs, each of which proves that some data existed before the proof +was created and that a precise duration of time passed before the previous +proof. Like a [VDF](#verifiable-delay-function-vdf), a Proof of History can be +verified in less time than it took to produce. + +## prioritization fee + +An additional fee user can specify in the compute budget +[instruction](#instruction) to prioritize their [transactions](#transaction). + +The prioritization fee is calculated by multiplying the requested maximum +compute units by the compute-unit price (specified in increments of 0.000001 +lamports per compute unit) rounded up to the nearest lamport. + +Transactions should request the minimum amount of compute units required for +execution to minimize fees. + +## public key (pubkey) + +The public key of a [keypair](#keypair). + +## rent + +Fee paid by [Accounts](#account) and [Programs](#program) to store data on the +blockchain. When accounts do not have enough balance to pay rent, they may be +Garbage Collected. + +See also [rent exempt](#rent-exempt) below. Learn more about rent here: +[What is rent?](/docs/intro/rent). + +## rent exempt + +Accounts that maintain a minimum lamport balance that is proportional to the +amount of data stored on the account. All newly created accounts are stored +on-chain permanently until the account is closed. It is not possible to create +an account that falls below the rent exemption threshold. + +## root + +A [block](#block) or [slot](#slot) that has reached maximum [lockout](#lockout) +on a [validator](#validator). The root is the highest block that is an ancestor +of all active forks on a validator. All ancestor blocks of a root are also +transitively a root. Blocks that are not an ancestor and not a descendant of the +root are excluded from consideration for consensus and can be discarded. + +## runtime + +The component of a [validator](#validator) responsible for [program](#program) +execution. + +## Sealevel + +Solana's parallel run-time for [onchain programs](#onchain-program). + +## shred + +A fraction of a [block](#block); the smallest unit sent between +[validators](#validator). + +## signature + +A 64-byte ed25519 signature of R (32-bytes) and S (32-bytes). With the +requirement that R is a packed Edwards point not of small order and S is a +scalar in the range of `0 <= S < L`. This requirement ensures no signature +malleability. Each transaction must have at least one signature for +[fee account](#fee-account). Thus, the first signature in transaction can be +treated as [transaction id](#transaction-id) + +## skip rate + +The percentage of [skipped slots](#skipped-slot) out of the total leader slots +in the current epoch. This metric can be misleading as it has high variance +after the epoch boundary when the sample size is small, as well as for +validators with a low number of leader slots, however can also be useful in +identifying node misconfigurations at times. + +## skipped slot + +A past [slot](#slot) that did not produce a [block](#block), because the leader +was offline or the [fork](#fork) containing the slot was abandoned for a better +alternative by cluster consensus. A skipped slot will not appear as an ancestor +for blocks at subsequent slots, nor increment the [block height](#block-height), +nor expire the oldest `recent_blockhash`. + +Whether a slot has been skipped can only be determined when it becomes older +than the latest [rooted](#root) (thus not-skipped) slot. + +## slot + +The period of time for which each [leader](#leader) ingests transactions and +produces a [block](#block). + +Collectively, slots create a logical clock. Slots are ordered sequentially and +non-overlapping, comprising roughly equal real-world time as per +[PoH](#proof-of-history-poh). + +## smart contract + +See [onchain program](#onchain-program). + +## SOL + +The [native token](#native-token) of a Solana [cluster](#cluster). + +## Solana Program Library (SPL) + +A [library of programs](https://spl.solana.com/) on Solana such as spl-token +that facilitates tasks such as creating and using tokens. + +## stake + +Tokens forfeit to the [cluster](#cluster) if malicious [validator](#validator) +behavior can be proven. + +## stake-weighted quality of service (SWQoS) + +SWQoS allows +[preferential treatment for transactions that come from staked validators](/developers/guides/advanced/stake-weighted-qos). + +## supermajority + +2/3 of a [cluster](#cluster). + +## sysvar + +A system [account](#account). [Sysvars](https://docs.anza.xyz/runtime/sysvars) +provide cluster state information such as current tick height, rewards +[points](#point) values, etc. Programs can access Sysvars via a Sysvar account +(pubkey) or by querying via a syscall. + +## thin client + +A type of [client](#client) that trusts it is communicating with a valid +[cluster](#cluster). + +## tick + +A ledger [entry](#entry) that estimates wallclock duration. + +## tick height + +The Nth [tick](#tick) in the [ledger](#ledger). + +## token + +A digitally transferable asset. + +## Token Extensions Program + +The [Token Extensions Program](https://spl.solana.com/token-2022) has the +program ID `TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb` and includes all the +same features as the [Token Program](#token-program), but comes with extensions +such as confidential transfers, custom transfer logic, extended metadata, and +much more. + +## token mint + +An account that can produce (or 'mint') tokens. Different tokens are +distinguished by their unique token mint addresses. + +## Token Program + +The [Token Program](https://spl.solana.com/token) has the program ID +`TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA`, and provides the basic +capabilities of transferring, freezing, and minting tokens. + +## tps + +[Transactions](#transaction) per second. + +## tpu + +[Transaction processing unit](https://docs.anza.xyz/validator/tpu). + +## transaction + +One or more [instructions](#instruction) signed by a [client](#client) using one +or more [keypairs](#keypair) and executed atomically with only two possible +outcomes: success or failure. + +## transaction id + +The first [signature](#signature) in a [transaction](#transaction), which can be +used to uniquely identify the transaction across the complete [ledger](#ledger). + +## transaction confirmations + +The number of [confirmed blocks](#confirmed-block) since the transaction was +accepted onto the [ledger](#ledger). A transaction is finalized when its block +becomes a [root](#root). + +## transactions entry + +A set of [transactions](#transaction) that may be executed in parallel. + +## tvu + +[Transaction validation unit](https://docs.anza.xyz/validator/tvu). + +## validator + +A full participant in a Solana network [cluster](#cluster) that produces new +[blocks](#block). A validator validates the transactions added to the +[ledger](#ledger) + +## VDF + +See [verifiable delay function](#verifiable-delay-function-vdf). + +## verifiable delay function (VDF) + +A function that takes a fixed amount of time to execute that produces a proof +that it ran, which can then be verified in less time than it took to produce. + +## vote + +See [ledger vote](#ledger-vote). + +## vote credit + +A reward tally for [validators](#validator). A vote credit is awarded to a +validator in its vote account when the validator reaches a [root](#root). + +## wallet + +A collection of [keypairs](#keypair) that allows users to manage their funds. + +## warmup period + +Some number of [epochs](#epoch) after [stake](#stake) has been delegated while +it progressively becomes effective. During this period, the stake is considered +to be "activating". More info about: +[warmup and cooldown](https://docs.anza.xyz/consensus/stake-delegation-and-rewards#stake-warmup-cooldown-withdrawal) diff --git a/content/docs/toolkit/best-practices.mdx b/content/docs/toolkit/best-practices.mdx new file mode 100644 index 000000000..598fb2bf8 --- /dev/null +++ b/content/docs/toolkit/best-practices.mdx @@ -0,0 +1,118 @@ +--- +title: Best Practices +h1: Solana Smart Contract Best Practices +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +## Optimize Compute Usage + +To prevent abuse of computational resources, each transaction is allocated a +"compute budget". This budget specifies details about +[compute units](/docs/core/fees#compute-units) and includes: + +- the compute costs associated with different types of operations the + transaction may perform (compute units consumed per operation), +- the maximum number of compute units that a transaction can consume (compute + unit limit), +- and the operational bounds the transaction must adhere to (like account data + size limits) + +When the transaction consumes its entire compute budget (compute budget +exhaustion), or exceeds a bound such as attempting to exceed the +[max call stack depth](https://github.com/anza-xyz/agave/blob/b7bbe36918f23d98e2e73502e3c4cba78d395ba9/program-runtime/src/compute_budget.rs#L138) +or +[max loaded account](/docs/core/fees#accounts-data-size-limit) +data size limit, the runtime halts the transaction processing and returns an +error. Resulting in a failed transaction and no state changes (aside from the +transaction fee being +[collected](/docs/core/fees#fee-collection)). + +### Additional References + +- [How to Optimize Compute](/developers/guides/advanced/how-to-optimize-compute). +- [How to Request Optimal Compute](/developers/guides/advanced/how-to-request-optimal-compute) + +## Saving Bumps + +> Program Derived Address (PDAs) are addresses that PDAs are addresses that are +> deterministically derived and look like standard public keys, but have no +> associated private keys. These PDAs are derived using a numerical value, +> called a "bump", to guarantee that the PDA is off-curve and cannot have an +> associated private key. It "bumps" the address off the cryptographic curve. + +Saving the bump to your Solana smart contract account state ensures +deterministic address generation, efficiency in address reconstruction, reduced +transaction failure, and consistency across invocations. + +### Additional References + +- [How to derive a PDA](/docs/core/pda#how-to-derive-a-pda) +- [PDA Bumps Core Concepts](/docs/core/pda#canonical-bump) +- [Bump Seed Canonicalization Lesson](/developers/courses/program-security/bump-seed-canonicalization) + +## Payer-Authority Pattern + +The Payer-Authority pattern is an elegant way to handle situations where the +account’s funder (payer) differs from the account’s owner or manager +(authority). By requiring separate signers and validating them in your onchain +logic, you can maintain clear, robust, and flexible ownership semantics in your +program. + +### Shank Example + +```rust +// Create a new account. +#[account(0, writable, signer, name="account", desc = "The address of the new account")] +#[account(1, writable, signer, name="payer", desc = "The account paying for the storage fees")] +#[account(2, optional, signer, name="authority", desc = "The authority signing for the account creation")] +#[account(3, name="system_program", desc = "The system program")] +CreateAccountV1(CreateAccountV1Args), +``` + +### Anchor Example + +```rust +#[derive(Accounts)] +pub struct CreateAccount<'info> { + /// The address of the new account + #[account(init, payer = payer_one, space = 8 + NewAccount::MAXIMUM_SIZE)] + pub account: Account<'info, NewAccount>, + + /// The account paying for the storage fees + #[account(mut)] + pub payer: Signer<'info>, + + /// The authority signing for the account creation + pub authority: Option>, + + // The system program + pub system_program: Program<'info, System> +} +``` + +### Additional References + +- [Metaplex Guide on Payer-Authority Pattern](https://developers.metaplex.com/guides/general/payer-authority-pattern) +- [Helium Program Library using the Payer-Authority Pattern](https://github.com/helium/helium-program-library/blob/master/programs/data-credits/src/instructions/change_delegated_sub_dao_v0.rs#L18) + +## Invariants + +Implement invariants, which are functions that you can call at the end of your +instruction to assert specific properties because they help ensure the +correctness and reliability of programs. + +```rust +require!(amount > 0, ErrorCode::InvalidAmount); +``` + +### Additional References + +- [Complete Project Code Example](https://github.com/solana-developers/developer-bootcamp-2024/blob/main/project-9-token-lottery/programs/token-lottery/src/lib.rs#L291) + +## Optimize Indexing + +You can make indexing easier by placing static size fields at the beginning and +variable size fields at the end of your onchain structures. diff --git a/content/docs/toolkit/getting-started.mdx b/content/docs/toolkit/getting-started.mdx new file mode 100644 index 000000000..0bfdb07e0 --- /dev/null +++ b/content/docs/toolkit/getting-started.mdx @@ -0,0 +1,90 @@ +--- +title: Getting Started +h1: Getting Started with the Solana Toolkit +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +The Solana Program development toolkit is publish as the +[`mucho` npm package](https://www.npmjs.com/package/mucho). The `mucho` command +will be used to run most of the Solana program development tools within the +toolkit - _mucho tools, one cli_. + +## Installation + +To get started, install The Solana Toolkit: + +```shell +npx -y mucho@latest install +``` + +This will install the latest versions of the following: + +- [Solana CLI / Agave Tool Suite](https://docs.anza.xyz/cli/): A command line + tool that allows developers to interact with the Solana blockchain, manage + accounts, send transactions, and deploy programs. +- [Mucho CLI](https://github.com/solana-developers/mucho) - a superset of + popular developer tools within the Solana ecosystem used to simplify the + development and testing of Solana blockchain programs. +- [Rust](https://doc.rust-lang.org/book/): The programming language that Solana + Smart Contracts are written in. +- [Anchor](https://www.anchor-lang.com/): A framework for writing Solana + programs that abstracts many complexities to speed up smart contract + development. +- [Fuzz Tester](https://ackee.xyz/trident/docs/latest/): Rust-based fuzzing + framework for Solana programs to help you ship secure code. +- [Code Coverage Tool](https://github.com/LimeChain/zest?tab=readme-ov-file): A + code coverage CLI tool for Solana programs. + +## Generate a keypair + +For a fresh installation of the [Solana CLI](https://docs.anza.xyz/cli/), you're +required to generate a new keypair. + +```shell +solana-keygen new +``` + +_This will store the your new keypair at the Solana CLI's default path +(`~/.config/solana/id.json`) and print the pubkey that was generated._ + +## Get your keypair's public key + +```shell +solana address +``` + +## Fund your keypair + +```shell +solana airdrop 10 --url localhost +``` + +## Set your network configuration + +Check your current config: + +```shell +solana config get +``` + +To use this toolkit, update your config to connect to localhost: + +```shell +solana config set --url localhost +``` + +To test locally, you must also spin up a local node to run on your localhost: + +```shell +mucho validator +``` + +For a more information, read the +[Local Testing Guide](/docs/toolkit/local-validator). + +## Next Steps + +Now let's [Create a Solana Project](/docs/toolkit/projects/overview)! diff --git a/content/docs/toolkit/index.mdx b/content/docs/toolkit/index.mdx new file mode 100644 index 000000000..5f0469ca4 --- /dev/null +++ b/content/docs/toolkit/index.mdx @@ -0,0 +1,17 @@ +--- +title: The Solana Toolkit +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +The Solana Program development toolkit is published as the +[`mucho` npm package](https://www.npmjs.com/package/mucho). The `mucho` command +will be used to run most of the Solana program development tools - _mucho tools, +one cli_. + +You can contribute to this +[Toolkit book on GitHub](https://github.com/solana-foundation/developer-content/tree/main/docs/toolkit). + +Now let's [Get Started](/docs/toolkit/getting-started)! diff --git a/content/docs/toolkit/local-validator.mdx b/content/docs/toolkit/local-validator.mdx new file mode 100644 index 000000000..0475297e6 --- /dev/null +++ b/content/docs/toolkit/local-validator.mdx @@ -0,0 +1,293 @@ +--- +title: Local Validator +h1: Running a Local Solana Validator Network +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +The Solana test validator is a local emulator for the Solana blockchain, +designed to provide developers with a private and controlled environment for +building and testing Solana programs without the need to connect to a public +testnet or mainnet. It also includes full support of the standard +[Solana RPC standard](/docs/rpc/http/). + +If you have the Solana CLI tool suite already installed, you can run the test +validator with the following command: + +```shell +mucho validator --help +``` + +> Install the [Solana Toolkit](/docs/toolkit/getting-started#installation) by +> running the following command: +> +> ```shell +> npx -y mucho@latest install +> ``` + +## Advantages + +- Ability to reset the blockchain state at any moment +- Ability to simulate different network conditions +- No RPC rate-limits +- No airdrop limits +- Direct onchain program deployment +- Ability to clone accounts and programs from a public cluster (i.e. devnet, + mainnet, etc) +- Ability to load accounts and programs from files +- Configurable transaction history retention +- Configurable epoch length + +## Starting the Test Validator + +To start your local validator, simply run: + +```shell +mucho validator +``` + +This command initializes a new ledger and starts the local validator running at +`http://localhost:8899`, which can be used as your Solana RPC connection url. + +## Connecting to the Test Validator + +To connect to the local test validator with the Solana CLI: + +```shell +solana config set --url localhost +``` + +This will ensure all your Solana CLI commands will be directed to your local +test validator. + +## Checking the Status of the Test Validator + +Before interacting with the test validator, it's useful to confirm its status +and ensure it is running correctly. + +```shell +solana ping +``` + +This command pings the local test validator and returns the current blockhash +and latency, verifying that it is active. + +## Deploying and Managing Programs Locally + +To deploy a compiled program (BPF) to the test validator: + +```shell +solana program deploy +``` + +This uploads and deploys a program to the blockchain. + +To check the details of a deployed program: + +```shell +solana program show +``` + +## Sending Transactions + +To transfer SOL from one account to another: + +```shell +solana transfer --from /path/to/keypair.json +``` + +This sends `AMOUNT` of SOL from the source account to the `RECIPIENT_ADDRESS`. + +## Simulating and Confirming Transactions + +Before actually sending a transaction, you can simulate it to see if it would +likely succeed: + +```shell +solana transfer --from /path/to/keypair.json \ + --simulate +``` + +To confirm the details and status of a transaction: + +```shell +solana confirm +``` + +## Viewing Recent Block Production + +To see information about recent block production, which can be useful for +debugging performance issues: + +```shell +solana block-production +``` + +## Validator Logs + +For debugging, you might want more detailed logs: + +```shell +solana logs +``` + +This streams log messages from the validator. + +### Tips for Logging + +- Increase log verbosity with the `-v` flag if you need more detailed output for + debugging. +- Use the `--rpc-port` and `--rpc-bind-address` options to customize the RPC + server settings. +- Adjust the number of CPU cores used by the validator with the `--gossip-host` + option to simulate network conditions more realistically. + +## Configuration + +View all the configuration options available for the Solana test validator: + +```shell +mucho validator --help +``` + +## Local Ledger + +By default, the ledger data is stored in a directory named `test-ledger` in your +current working directory. + +### Specifying Ledger Location + +When starting the test validator, you can specify a different directory for the +ledger data using the `--ledger` option: + +```shell +mucho validator --ledger /path/to/custom/ledger +``` + +## Resetting the Ledger + +By default the validator will resume an existing ledger, if one is found. To +reset the ledger, you can either manually delete the ledger directory or restart +the validator with the `--reset` flag: + +```shell +mucho validator --reset +``` + +If the ledger exists, this command will reset the ledger to genesis, which +resets the state by deleting all existing accounts/programs and starting fresh. + +## Cloning Programs + +To add existing onchain programs to your local environment, you can clone the +program with a new ledger. This is useful for testing interactions with other +programs that already exist on any other cluster. + +To clone an account from another cluster: + +```shell +mucho validator --reset \ + --url CLUSTER_PROGRAM_IS_DEPLOYED_TO \ + --clone PROGRAM_ADDRESS +``` + +To clone an upgradeable program and its executable data from another cluster: + +```shell +mucho validator --reset \ + --url CLUSTER_PROGRAM_IS_DEPLOYED_TO \ + --clone-upgradeable-program PROGRAM_ADDRESS +``` + +> If a ledger already exists in your working directory, you must reset the +> ledger to be able to clone a program or account. + +## Cloning Accounts + +To add existing onchain accounts to your local environment, you can clone the +account with a new ledger from any other network cluster. + +To clone an account from the cluster when a ledger already exists: + +```shell +mucho validator --reset \ + --url CLUSTER_PROGRAM_IS_DEPLOYED_TO \ + --clone ACCOUNT_ADDRESS +``` + +## Reset to Specific Account Data + +To reset the state of specific accounts every time you start the validator, you +can use a combination of account snapshots and the `--account` flag. + +First, save the desired state of an account as a JSON file: + +```shell +solana account ACCOUNT_ADDRESS --output json --output-file account_state.json +``` + +Then load this state each time you reset the validator: + +```shell +mucho validator --reset \ + --account ACCOUNT_ADDRESS account_state.json +``` + +## Runtime Features + +Solana has a feature set mechanism that allows you to enable or disable certain +blockchain features when running the test validator. By default, the test +validator runs with all runtime features activated. + +To see all the runtime features available and their statuses: + +```shell +solana feature status +``` + +To query a specific runtime feature's status: + +```shell +solana feature status
+``` + +To deactivate specific features in genesis: + +> This must be done on a fresh ledger, so if a ledger already exists in your +> working directory you must add the `--reset` flag to reset to genesis. + +```shell +mucho validator --reset \ + --deactivate-feature +``` + +To deactivate multiple features in genesis: + +```shell +mucho validator --reset \ + --deactivate-feature +``` + +## Changing Versions + +To check your current `solana-test-validator` version: + +```shell +mucho validator --version +``` + +Your test validator runs on the same version as the Solana CLI installed and +configured for use. + +To test your programs against different versions of the Solana runtime, you can +install multiple versions of the Solana CLI and switch between them using the +following command: + +```shell +solana-install init +``` + +Make sure to reset your Solana test validator's ledger after changing versions +to ensure it runs a valid ledger without corruption. diff --git a/content/docs/toolkit/meta.json b/content/docs/toolkit/meta.json new file mode 100644 index 000000000..1e3b6f810 --- /dev/null +++ b/content/docs/toolkit/meta.json @@ -0,0 +1,12 @@ +{ + "title": "The Solana Toolkit", + "pages": [ + "getting-started", + "projects", + "best-practices", + "local-validator", + "test-suite", + "troubleshooting" + ], + "defaultOpen": true +} diff --git a/content/docs/toolkit/projects/anchor-init.mdx b/content/docs/toolkit/projects/anchor-init.mdx new file mode 100644 index 000000000..715f44a37 --- /dev/null +++ b/content/docs/toolkit/projects/anchor-init.mdx @@ -0,0 +1,74 @@ +--- +title: Basic Anchor +h1: Basic Anchor Smart Contracts +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +```shell +anchor init +``` + +## Overview + +This initializes a simplistic workspace set up for Anchor smart contract +development, with the following structure: + +- `Anchor.toml`: Anchor configuration file. +- `Cargo.toml`: Rust workspace configuration file. +- `package.json`: JavaScript dependencies file. +- `programs/`: Directory for Solana program crates. +- `app/`: Directory for your application frontend. +- `tests/`: Directory for JavaScript integration tests. +- `migrations/deploy.js`: Deploy script. + +The Anchor framework abstracts away many complexities enabling fast program +development. + +## Build and Test + +To test out this project before making any modifications, just build and test: + +```shell +anchor build +``` + +```shell +anchor test +``` + +To start writing your own Anchor smart contract, navigate to +`programs/src/lib.rs`. + +## File Structure Template + +For more complex programs, using a more structured project template would be the +best practice. This can be generated with: + +```shell +anchor init --template multiple +``` + +Which creates the following layout inside of `programs/src`: + +```shell +├── constants.rs +├── error.rs +├── instructions +│ ├── initialize.rs +│ └── mod.rs +├── lib.rs +└── state + └── mod.rs +``` + +For project file structure best practices, review this +[document](/docs/toolkit/projects/project-layout). + +## Additional Resources + +- [Anchor book](https://www.anchor-lang.com/) +- [Getting Started with Anchor](/docs/programs/anchor/) +- [Program Examples](https://github.com/solana-developers/program-examples) diff --git a/content/docs/toolkit/projects/existing-project.mdx b/content/docs/toolkit/projects/existing-project.mdx new file mode 100644 index 000000000..0575bf28e --- /dev/null +++ b/content/docs/toolkit/projects/existing-project.mdx @@ -0,0 +1,88 @@ +--- +title: Existing Projects +h1: Update an Existing Project +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +If you have an existing Anchor program and want to use the +[`create-solana-program`](https://github.com/solana-program/create-solana-program) +tool, you can easily replace the generated program with your existing one: + + + + + +### Verify correct versions + +Ensure the installed Solana and Anchor versions are the same as the ones your +existing program requires. + + + + +### Run create-solana-program + +Scaffold a new Solana program using Anchor by running: + +```shell +npx create-solana-program --anchor +``` + + + + +### Migrate your program source code + +Replace the `program` folder with your existing program directory (not the +workspace directory). If you have more than one program, add more folders to the +root directory and update the `members` attribute of the top-level `Cargo.toml` +accordingly. + + + + +### Update each program's Cargo.toml + +Ensure your program’s `Cargo.toml` contains the following metadata: + +```toml title="Cargo.toml" +[package.metadata.solana] +program-id = "YOUR_PROGRAM_ADDRESS" +program-dependencies = [] +``` + + + + +### Build your program and clients + +Run the following commands to build your programs and generate the clients: + +```shell +npm install +npm run programs:build +npm run generate +``` + + + + +### Update the ID alias + +If you have a generated Rust client, update the `clients/rust/src/lib.rs` file +so the `ID` alias points to the correct generated constant. + + + + +### Update any client tests + +If you have any generated clients, update the scaffolded tests so they work with +your existing program. + + + + diff --git a/content/docs/toolkit/projects/meta.json b/content/docs/toolkit/projects/meta.json new file mode 100644 index 000000000..de70cf14d --- /dev/null +++ b/content/docs/toolkit/projects/meta.json @@ -0,0 +1,12 @@ +{ + "title": "Creating a Project", + "pages": [ + "overview", + "anchor-init", + "solana-program", + "web-app", + "mobile-app", + "existing-project", + "project-layout" + ] +} diff --git a/content/docs/toolkit/projects/mobile-app.mdx b/content/docs/toolkit/projects/mobile-app.mdx new file mode 100644 index 000000000..517461c00 --- /dev/null +++ b/content/docs/toolkit/projects/mobile-app.mdx @@ -0,0 +1,41 @@ +--- +title: Mobile App +h1: Mobile App with a Smart Contract Connection +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +```shell +yarn create expo-app --template @solana-mobile/solana-mobile-expo-template +``` + +## Overview + +This will initialize a new project using the [Expo](https://expo.dev) framework +that is specifically designed for creating mobile applications that interact +with the Solana blockchain. + +Follow this template's guide for +"[Running the app](https://docs.solanamobile.com/react-native/expo#running-the-app)" +in order to launch the template as a custom development build and get it running +on your Android emulator. Once you have built the program and are running a dev +client with Expo, the emulator will automatically update every time you save +your code. + +## Prerequisites + +To use this template, you will also need to set up the following: + +- [Android Studio and Emulator](https://docs.solanamobile.com/getting-started/development-setup) +- [React Native](https://reactnative.dev/docs/environment-setup?platform=android) +- [EAS CLI and Account](https://docs.expo.dev/build/setup/) + +## Additional Resources + +- [Solana Mobile Development](https://docs.solanamobile.com/getting-started/intro) +- [Mobile App Example - Cash App Clone](/developers/guides/dapps/cash-app) +- [Anchor book](https://www.anchor-lang.com/) +- [Getting Started with Anchor](/docs/programs/anchor/) +- [Program Examples](https://github.com/solana-developers/program-examples) diff --git a/content/docs/toolkit/projects/overview.mdx b/content/docs/toolkit/projects/overview.mdx new file mode 100644 index 000000000..0eddf93f4 --- /dev/null +++ b/content/docs/toolkit/projects/overview.mdx @@ -0,0 +1,76 @@ +--- +title: Overview +altRoutes: + - /docs/toolkit/projects +h1: Smart Contract Project Templates +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +Choose from one of the below scaffolds to generate a new project workspace: + +- [Anchor](#anchor) - A popular Rust-based framework for creating Solana smart + contracts. +- [`create-solana-program`](#create-solana-program) - In-depth workspace + generator for either Anchor smart contract development or Native smart + contract, including JavaScript and Rust clients. +- [Web App Templates](#web-app-template) - Generator for new projects that + connects a Solana smart contract to various frontend stacks, includes wallet + connector setup. + +## Anchor + +```shell +anchor init +``` + +This generates a basic workspace to be able to write an Anchor rust smart +contracts, build, test, and deploy. For more information, read the +[`anchor init` doc](/docs/toolkit/projects/anchor-init). + +## Create Solana Program + +```shell +npx create-solana-program +``` + +This generates an in-depth workspace for either Anchor smart contract +development or Native smart contract development with either a Javascript +Client, Rust Client, or both. For more information, read the +[`create-solana-program` doc](/docs/toolkit/projects/solana-program). + +## Web App Template + +```shell +npx create-solana-dapp +``` + +This initializes a new project that connects a Solana smart contract to a +typescript frontend with a wallet connector. For more information, read the +[web app template doc](/docs/toolkit/projects/web-app). + +## Mobile App Template + +```shell +yarn create expo-app --template @solana-mobile/solana-mobile-expo-template +``` + +This is initializing a new project using the Expo framework that is specifically +designed for creating mobile applications that interact with the Solana +blockchain. + +## Update an Existing Project + +```shell +npx create-solana-program +``` + +You can add the Solana program scaffold to an existing project by following this +[guide](/docs/toolkit/projects/existing-project). + +## Standard Project Layouts + +For best practices on smart contract file structure, read this +[guide](/docs/toolkit/projects/project-layout). diff --git a/content/docs/toolkit/projects/project-layout.mdx b/content/docs/toolkit/projects/project-layout.mdx new file mode 100644 index 000000000..6f596d227 --- /dev/null +++ b/content/docs/toolkit/projects/project-layout.mdx @@ -0,0 +1,73 @@ +--- +title: Project layout +h1: Smart Contract Repo File Structure +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +Typically Solana smart contracts (aka [programs](/docs/core/programs)) +workspaces will be have the following file structure: + +```shell +. +├── app +├── migrations +├── node_modules +├── programs +├── target +└── tests +``` + +The main smart contract is the `lib.rs` file, which lives insides the `programs` +directory, as shown below: + +```shell +. +├── app +├── migrations +├── node_modules +├── programs + ├── src + ├── lib.rs +├── target +└── tests +``` + +As the smart contract gets more cumbersome, you'll typically want to separate +the logic into multiple files, as shown below: + +```shell +├── programs + ├── src + ├── state.rs + ├── instructions + ├── instruction_1.rs + ├── instruction_2.rs + ├── instruction_3.rs + ├── lib.rs + ├── constants.rs + ├── error.rs + ├── mod.rs +``` + +For [native rust smart contract development](/docs/programs/rust/), you +need to explicitly write out the entrypoint and processor for the program, so +you'll need a few more files: + +```shell +├── program.rs +│ ├── src.rs +│ │ ├──assertions.rs +│ │ ├──entrypoint.rs +│ │ ├──error.rs +│ │ ├──instruction.rs +│ │ ├──lib.rs +│ │ ├──processor.rs +│ │ ├──state.rs +│ │ ├──utils.rs +│ ├── Cargo.toml +│ ├── keypair.json +│ ├── README.md +``` diff --git a/content/docs/toolkit/projects/solana-program.mdx b/content/docs/toolkit/projects/solana-program.mdx new file mode 100644 index 000000000..55be934d9 --- /dev/null +++ b/content/docs/toolkit/projects/solana-program.mdx @@ -0,0 +1,102 @@ +--- +title: Solana Programs +h1: Solana Program Scaffold +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +```shell +npx create-solana-program +``` + +[`create-solana-program`](https://github.com/solana-program/create-solana-program) +initializes an in-depth workspace with everything you need for general Solana +smart contract development. This scaffold allows you to write either native rust +smart contracts or Anchor smart contracts. + +## Program Frameworks + +After running this command, you'll have the option to choose between Shank and +Anchor for the program framework: + +- **Shank** creates a vanilla Solana smart contract with Shank macros to + generate IDLs. For more information on Shank, read its + [README](https://github.com/metaplex-foundation/shank). + +- **Anchor** creates a smart contract using the Anchor framework, which + abstracts away many complexities enabling fast program development. For more + information on the Anchor framework, read the + [Anchor book](https://www.anchor-lang.com/). + +### Anchor framework + +For **Anchor rust development**, chose Anchor when asked which program framework +to use. This will create a basic Anchor counter program with the following +project structure for your program: + +```shell +├── program.rs +│ ├── src.rs +│ │ ├── lib.rs +│ ├── Cargo.toml +│ ├── keypair.json +│ ├── README.md +``` + +### Native rust + +For **native rust development**, make sure you chose Shank when asked which +program framework to use. This will create a basic counter program with the +following project structure for your program: + +```shell +├── program.rs +│ ├── src.rs +│ │ ├── assertions.rs +│ │ ├──entrypoint.rs +│ │ ├──error.rs +│ │ ├──instruction.rs +│ │ ├──lib.rs +│ │ ├──processor.rs +│ │ ├──state.rs +│ │ ├──utils.rs +│ ├── Cargo.toml +│ ├── keypair.json +│ ├── README.md +``` + +## Generated Clients + +Next, you'll have the option to choose between a JavaScript client, a Rust +Client, or both. + +- **JavaScript Client** creates a typescript library compatible with + [web3.js](https://solana-labs.github.io/solana-web3.js/). + +- **Rust Client** creates a rust crate allowing consumers to interact with the + smart contract. + +For further workspace customization and additional information, check out the +`create-solana-program` +[README](https://github.com/solana-program/create-solana-program?tab=readme-ov-file). + +## Build + +After answering the above prompts, the workspace will be generated. To get +started, build your program and clients by running: + +```shell +cd +npm install +npm run generate +``` + +To update an existing Anchor project to have this scaffold, read this +[guide](/docs/toolkit/projects/existing-project). + +## Additional Resources + +- [Developing Rust Programs](/docs/programs/rust/) +- [Program Examples](https://github.com/solana-developers/program-examples) diff --git a/content/docs/toolkit/projects/web-app.mdx b/content/docs/toolkit/projects/web-app.mdx new file mode 100644 index 000000000..60c17d531 --- /dev/null +++ b/content/docs/toolkit/projects/web-app.mdx @@ -0,0 +1,87 @@ +--- +title: Web App +h1: Web App with a Smart Contract Connection +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +```shell +npx create-solana-dapp +``` + +This command generates a new project that connects a Solana smart contract to a +frontend with a wallet connector. It has options for multiple popular frontend +stacks and UI libraries, including: NextJS, React, Tailwind, and more. + +## Build and Test + +To test out this project before making any modifications, follow these steps: + + + + + +### Build the smart contract + +```shell +npm run anchor-build +``` + + + + +### Start the local validator + +```shell +npm run anchor-localnet +``` + + + + +### Run tests + +```shell +npm run anchor-test +``` + + + + +### Deploy to the local validator + +```shell +npm run anchor deploy --provider.cluster localnet +``` + + + + +### Build the web app + +```shell +npm run build +``` + + + + +### Run the web app + +```shell +npm run dev +``` + + + + + +## Additional Resources + +- [`create-solana-dapp` README](https://github.com/solana-developers/create-solana-dapp) +- [CRUD App Example](/developers/guides/dapps/journal) +- [Anchor book](https://www.anchor-lang.com/) +- [Getting Started with Anchor](/docs/programs/anchor/) +- [Program Examples](https://github.com/solana-developers/program-examples) diff --git a/content/docs/toolkit/test-suite/basics.mdx b/content/docs/toolkit/test-suite/basics.mdx new file mode 100644 index 000000000..d68433748 --- /dev/null +++ b/content/docs/toolkit/test-suite/basics.mdx @@ -0,0 +1,51 @@ +--- +title: Testing Basics +h1: Solana Testing Basics +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +## Installation + +Install the Solana Toolkit by running the following command: + +```shell +npx -y mucho@latest install +``` + +## Build + +```shell +mucho build +``` + +## Start Localnet + +```shell +mucho validator +``` + +## Run Tests + +Anchor Programs: + +```shell +anchor test +``` + +Native Programs: + +```shell +cargo test +``` + +## Deploy + +```shell +mucho deploy +``` + +For more information on local validator customization and commands, read the +[Solana Test Validator Guide](/developers/guides/getstarted/solana-test-validator). diff --git a/content/docs/toolkit/test-suite/code-coverage.mdx b/content/docs/toolkit/test-suite/code-coverage.mdx new file mode 100644 index 000000000..bec248969 --- /dev/null +++ b/content/docs/toolkit/test-suite/code-coverage.mdx @@ -0,0 +1,25 @@ +--- +title: Code Coverage +h1: Solana Code Coverage Tool +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +```shell +mucho coverage +``` + +## Overview + +This command will run a code coverage test on all of your Rust tests and then +generates a report as an HTML page providing metrics on where additional tests +may be needed to improve your current code coverage. + +> Currently, this tool only works on tests written in Rust and is not compatible +> with a JavaScript test suite. + +## Additional Resources + +- [Source Code](https://github.com/LimeChain/zest?tab=readme-ov-file) diff --git a/content/docs/toolkit/test-suite/fuzz-tester.mdx b/content/docs/toolkit/test-suite/fuzz-tester.mdx new file mode 100644 index 000000000..7b6fa7f62 --- /dev/null +++ b/content/docs/toolkit/test-suite/fuzz-tester.mdx @@ -0,0 +1,75 @@ +--- +title: Fuzz Tester +h1: Solana Fuzz Tester +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +> The Trident fuzz tester is still a WIP and currently only Anchor compatible +> may require some manual work to complete tests. + +## Initialize Fuzz Tests + +Navigate to an Anchor based workspace and run: + +```shell +trident init +``` + +This command does the following: + +- Builds the Anchor-based project. +- Reads the generated IDL. +- Based on the IDL creates the fuzzing template. + +## Define Fuzz Accounts + +Define `AccountsStorage` type for each `Account` you would like to use: + +```rust +#[doc = r" Use AccountsStorage where T can be one of:"] +#[doc = r" Keypair, PdaStore, TokenStore, MintStore, ProgramStore"] +#[derive(Default)] +pub struct FuzzAccounts { + author: AccountsStorage, + hello_world_account: AccountsStorage, + // No need to fuzz system_program + // system_program: AccountsStorage, +} +``` + +## Implement Fuzz Instructions + +Each Instruction in the fuzz test has to have defined the following functions: + +- `get_program_id()` specifies which program the instruction belongs to. This + function is automatically defined and should not need any updates. Its + important to use especially if you have multiple programs in your workspace, + allowing Trident to generate instruction sequences corresponding to different + programs. +- `get_data()` specifies what instruction inputs are sent to the program + instructions. +- `get_accounts()` specifies what accounts are sent to the program instructions. + +## Execute Fuzz Tests + +```shell +# Replace with the name of the +# fuzz test (for example: "fuzz_0") +trident fuzz run-hfuzz +``` + +## Debug Fuzz Tests + +```shell +# fuzzer will run the with the specified +trident fuzz debug-hfuzz +``` + +For additional documentation go [here](https://ackee.xyz/trident/docs/latest/). + +## Additional Resources + +- [Fuzz Tester Source Code](https://github.com/Ackee-Blockchain/trident). diff --git a/content/docs/toolkit/test-suite/js-test.mdx b/content/docs/toolkit/test-suite/js-test.mdx new file mode 100644 index 000000000..04aa9dbb2 --- /dev/null +++ b/content/docs/toolkit/test-suite/js-test.mdx @@ -0,0 +1,69 @@ +--- +title: JavaScript Tests +h1: JavaScript Testing Framework for Solana +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +## Add Dependency + +Navigate to your smart contract directory and run: + +```shell +npm install solana-bankrun +``` + +## Bankrun Overview + +[Bankrun](https://github.com/kevinheavey/solana-bankrun) is a fast and +lightweight framework for testing Solana programs in NodeJS. + +It uses the +[`solana-program-test`](https://crates.io/crates/solana-program-test) crate +under the hood and allows you to do things that are not possible with +`solana-test-validator`, such as jumping back and forth in time or dynamically +setting account data. + +Bankrun works by spinning up a lightweight `BanksServer` that's like an RPC node +but much faster, and creating a `BanksClient` to talk to the server. This runs +the Solana +[Banks](https://github.com/solana-labs/solana/blob/master/runtime/src/bank.rs). + +## Minimal Example + +```javascript +import { start } from "solana-bankrun"; +import { PublicKey, Transaction, SystemProgram } from "@solana/web3.js"; + +test("one transfer", async () => { + const context = await start([], []); + const client = context.banksClient; + const payer = context.payer; + const receiver = PublicKey.unique(); + const blockhash = context.lastBlockhash; + const transferLamports = 1_000_000n; + const ixs = [ + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: receiver, + lamports: transferLamports, + }), + ]; + const tx = new Transaction(); + tx.recentBlockhash = blockhash; + tx.add(...ixs); + tx.sign(payer); + await client.processTransaction(tx); + const balanceAfter = await client.getBalance(receiver); + expect(balanceAfter).toEqual(transferLamports); +}); +``` + +## Additional Resources + +- [Bankrun Docs](https://kevinheavey.github.io/solana-bankrun/) +- [Bankrun Source Code](https://github.com/kevinheavey/solana-bankrun) +- [Official Bankrun Tutorials](https://kevinheavey.github.io/solana-bankrun/tutorial/) +- [Complete Project Example](https://github.com/solana-developers/developer-bootcamp-2024/tree/main/project-2-voting/anchor/tests) diff --git a/content/docs/toolkit/test-suite/meta.json b/content/docs/toolkit/test-suite/meta.json new file mode 100644 index 000000000..902997d62 --- /dev/null +++ b/content/docs/toolkit/test-suite/meta.json @@ -0,0 +1,12 @@ +{ + "title": "Test Suite", + "pages": [ + "overview", + "basics", + "code-coverage", + "fuzz-tester", + "security-scanner", + "js-test", + "rust-tests" + ] +} diff --git a/content/docs/toolkit/test-suite/overview.mdx b/content/docs/toolkit/test-suite/overview.mdx new file mode 100644 index 000000000..bb0701110 --- /dev/null +++ b/content/docs/toolkit/test-suite/overview.mdx @@ -0,0 +1,23 @@ +--- +title: Overview +altRoutes: + - /docs/toolkit/test-suite +h1: Solana Test Suite Overview +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +Within the Solana Toolkit, there are several resources for testing Solana Smart +Contracts, including: + +- A fuzz tester. +- A code coverage tool. +- A framework for testing Solana programs in NodeJS that spins up a lightweight + `BanksServer` that's like an RPC node but much faster and creates a + `BanksClient` to talk to the server. +- A fast and lightweight library for testing Solana programs in Rust, which + works by creating an in-process Solana VM optimized for program developers. +- A tool to scan your repo for common security vulnerabilities and provide + suggestions for fixes. diff --git a/content/docs/toolkit/test-suite/rust-tests.mdx b/content/docs/toolkit/test-suite/rust-tests.mdx new file mode 100644 index 000000000..3d2eb0de5 --- /dev/null +++ b/content/docs/toolkit/test-suite/rust-tests.mdx @@ -0,0 +1,62 @@ +--- +title: Rust Tests +h1: Rust Testing Framework for Solana +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +## Add Dependency + +Navigate to your smart contract directory and run: + +```shell +cargo add --dev litesvm +``` + +## LiteSVM Overview + +[LiteSVM](https://github.com/LiteSVM/litesvm) is a fast and lightweight library +for testing Solana programs. It works by creating an in-process Solana VM +optimized for program developers. This makes it much faster to run and compile +than alternatives like +[`solana-program-test`](/docs/toolkit/test-suite/basics) and +[`solana-test-validator`](/docs/toolkit/local-validator). In a further break +from tradition, it has an ergonomic API with sane defaults and extensive +configurability for those who want it. + +## Minimal Example + +```rust +use litesvm::LiteSVM; +use solana_program::{message::Message, pubkey::Pubkey, system_instruction::transfer}; +use solana_sdk::{signature::Keypair, signer::Signer, transaction::Transaction}; + +let from_keypair = Keypair::new(); +let from = from_keypair.pubkey(); +let to = Pubkey::new_unique(); + +let mut svm = LiteSVM::new(); +svm.airdrop(&from, 10_000).unwrap(); + +let instruction = transfer(&from, &to, 64); +let tx = Transaction::new( + &[&from_keypair], + Message::new(&[instruction], Some(&from)), + svm.latest_blockhash(), +); +let tx_res = svm.send_transaction(tx).unwrap(); + +let from_account = svm.get_account(&from); +let to_account = svm.get_account(&to); +assert_eq!(from_account.unwrap().lamports, 4936); +assert_eq!(to_account.unwrap().lamports, 64); + +``` + +## Additional Resources + +- [Source Code](https://github.com/LiteSVM/litesvm) +- [Complete Project Example](https://github.com/cavemanloverboy/nawnce/blob/main/src/lib.rs) +- [More Complex Project Example](https://github.com/pyth-network/per) diff --git a/content/docs/toolkit/test-suite/security-scanner.mdx b/content/docs/toolkit/test-suite/security-scanner.mdx new file mode 100644 index 000000000..e045403be --- /dev/null +++ b/content/docs/toolkit/test-suite/security-scanner.mdx @@ -0,0 +1,26 @@ +--- +title: Security Scanner +h1: Security Vulnerability Scanner +--- + +> This is a beta version of the [Solana Toolkit](/docs/toolkit/), and is +> still a WIP. Please post all feedback as a GitHub issue +> [here](https://github.com/solana-foundation/developer-content/issues/new?title=%5Btoolkit%5D%20). + +## Static Analysis Tools + +[Radar](https://github.com/Auditware/radar?tab=readme-ov-file) is static +analysis tool for Anchor rust programs. It allows you to write, share, and +utilize templates to identify security issues in rust-based smart contracts +using a powerful python based rule engine that enables automating detection of +vulnerable code patterns through logical expressions. + +[Xray](https://github.com/sec3-product/x-ray) is an open-source, cross-platform +command-line interface (CLI) tool designed for static analysis of Solana +programs and smart contracts written in Rust. + +## Common Security Exploits and Protections + +Read [Sealevel Attacks](https://github.com/coral-xyz/sealevel-attacks) for +examples of common exploits unique to the Solana programming model and +recommended idioms for avoiding these attacks using the Anchor framework. diff --git a/content/docs/toolkit/troubleshooting.mdx b/content/docs/toolkit/troubleshooting.mdx new file mode 100644 index 000000000..37c2c8f4b --- /dev/null +++ b/content/docs/toolkit/troubleshooting.mdx @@ -0,0 +1,17 @@ +--- +title: Troubleshooting +h1: Troubleshooting Solana Programs +--- + +When diagnosing problems with Solana development, you can easily gather loads of +troubleshooting information with the following command: + +```shell +npx mucho info +``` + +## Solana Stack Exchange + +The [Solana stack exchange](https://solana.stackexchange.com/) is the best place +for conversations around Solana development. If you ever get stuck, ask your +question [here](https://solana.stackexchange.com/questions/ask). diff --git a/content/guides/advanced/auto-approve.mdx b/content/guides/advanced/auto-approve.mdx new file mode 100644 index 000000000..a06920a4e --- /dev/null +++ b/content/guides/advanced/auto-approve.mdx @@ -0,0 +1,92 @@ +--- +date: 2024-04-25T00:00:00Z +difficulty: beginner +title: How to auto approve transactions +seoTitle: How to auto approve transactions on Solana +description: + By auto approving transactions dApps and games can create a more fluid user + experience. This is especially interesting for on-chain games. +tags: + - games + - wallets + - transactions + - unity +keywords: + - tutorial + - auto approve + - transactions + - blockchain developer + - blockchain tutorial + - web3 developer +--- + +Auto approving transactions means that the user does not have to manually +confirm every transaction. This is especially useful for on-chain games where +you want to have fluid game play. Here are some options on how this can be +achieved. + +## Wallet Auto Approve + +A few popular Solana wallet applications have transaction "auto approve" +functionality directly with in them. Some accomplish this with burner wallets. +This is a very convenient solution, but it may limit your players to using one +of these wallets. Players may also be resistant to activate the feature since it +may be seen as a security risk. + +- [Solflare auto approve](https://twitter.com/solflare_wallet/status/1625950688709644324) +- [Phantom auto approve](https://phantom.app/learn/blog/auto-confirm) + +## Local Keypair + +Another way to add transaction auto approval is to create a keypair in your +game/dApp and let the player transfer some SOL to that wallet and then use it to +pay for transaction fees. The only problem with this is that you need to handle +the security for this wallet and the keys can get lost if the users clear their +browser cache. + +Here, you can find some example source code on how to implement transaction auto +approval using local keypairs: + +- [Example Source Code](https://github.com/solana-developers/solana-game-examples/blob/main/seven-seas/unity/Assets/SolPlay/Scripts/Services/WalletHolderService.cs) +- [Example Game Seven seas](https://solplay.de/sevenseas/) + +## Server Backend + +Using a server backend for your game or dApp, you can configure a system that +will allow your backend to handle paying the [Solana fees](/docs/core/fees) +yourself. This backend would allow you to create and sign transactions for the +user, marking your secure backend keypair the `fee payer`, and enabling your +application to interact with it via an API endpoint. + +To accomplish this, your client application (i.e. game or dApp) would send +parameters to your backend server. The backend server would then authenticate +that request and the build a transaction with the required data from the user. +The backend would then sign and send this transaction to the Solana blockchain, +confirm the transaction was successful, and send a confirmation message to the +client. + +This server backend method is an easy and convenient solution, but you need to +handle the user's authentication and security. So that could add complexity to +your application's infrastructure and architecture. + +## Session Keys + +Session Keys are ephemeral (short lived) keypairs with fine-grained +program/instruction scoping for tiered access in your Solana programs. They +allow users to interact with apps by signing transactions locally using a +temporary keypair. The temporary keypair acts a bit like an oauth token from web +2, allowing access for a certain amount of time. + +Session keys make for a really great user experience, but they do need some +extra work to implement in the on-chain program. You can use the `session-keys` +crate maintained by [Magic Block](https://www.magicblock.gg/) using their +[official documentation](https://docs.magicblock.gg/Onboarding/Session%20Keys/integrating-sessions-in-your-program). + +## Shadow Signer + +Shadow signer is a feature within the +[Honeycomb Protocol](https://twitter.com/honeycomb_prtcl) that allows you to +sign transactions. + +- [How Shadow Signer works](https://twitter.com/honeycomb_prtcl/status/1777807635795919038) +- [Docs](https://docs.honeycombprotocol.com/services/) diff --git a/content/guides/advanced/how-to-optimize-compute.mdx b/content/guides/advanced/how-to-optimize-compute.mdx new file mode 100644 index 000000000..24274d3f1 --- /dev/null +++ b/content/guides/advanced/how-to-optimize-compute.mdx @@ -0,0 +1,232 @@ +--- +featured: true +date: 2024-03-15T00:00:00Z +difficulty: intermediate +title: "How to Optimize Compute Usage on Solana" +description: + "Minimizing the amount of compute a program uses is critical both for the + performance and cost of executing transactions. This guide will show you how + to optimize compute usage in your programs on Solana." +tags: + - rust + - compute +keywords: + - tutorial + - priority fees + - compute usage + - offline signing + - transactions + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer +--- + +When developing on Solana, it's important to keep in mind the compute usage of +your programs. Program compute usage has an impact on both the max performance +your users can have, as well as increase the cost of executing transactions with +priority fees. + +Optimizing your Compute Unit (CU) usage has the following benefits: + +1. A smaller transaction is more likely to be included in a block. +2. Cheaper instructions make your program more composable. +3. Lowers overall amount of block usage, enabling more transactions to be + included in a block. + +In this guide, we'll cover how to optimize your program's compute usage to +ensure it's as efficient as possible. + +## What are the Current Compute Limitations? + +Solana programs have a few compute limitations to be aware of: + +- **Max Compute per block**: 48 million CU +- **Max Compute per account per block**: 12 million CU +- **Max Compute per transaction**: 1.4 million CU + +Keeping your program's compute usage within these limits is important to ensure +your program can be executed in a timely manner and at a reasonable cost. +Especially when your program starts to get used by a large number of users, you +want to make sure that your program's compute usage is as efficient as possible +to avoid hitting the max compute per account cap. + +## How to Measure Compute Usage + +When building out your Solana program, you'll want to check how much compute +different parts of your program are using. You can use the `compute_fn` macro to +measure compute unit usage of different snippets of code. + +You measure your compute usage with the following code: + +```rust +compute_fn!("My message" => { + // Your code here +}); +``` + +The output of this macro will give you the compute usage before and after your +code, helping you understand what parts of your program are using the most +compute. You can find an example of using this macro in the +[cu_optimizations repository.](https://github.com/solana-developers/cu_optimizations/blob/main/counterAnchor/anchor/programs/counter/src/lib.rs#L20) + +## Optimizing your Program + +### Logging + +While logging is a great way to understand what is going on inside your program, +logging is also very expensive. You should avoid logging non-essential +information in your programs to keep your program usage down. + +For example, both base58 encoding and concatenation are expensive operations: + +```rust +// 11962 CU !! +// Base58 encoding is expensive, concatenation is expensive +compute_fn! { "Log a pubkey to account info" => + msg!("A string {0}", ctx.accounts.counter.to_account_info().key()); +} + +// 357 CU - string concatenation is expensive +compute_fn! { "Log a pubkey simple concat" => + msg!("A string {0}", "5w6z5PWvtkCd4PaAV7avxE6Fy5brhZsFdbRLMt8UefRQ"); +} +``` + +If you do want to log a pubkey, you can use `.key()` and `.log()` to efficiently +log it with lower compute usage: + +```rust +// 262 cu +compute_fn! { "Log a pubkey" => + ctx.accounts.counter.to_account_info().key().log(); +} +``` + +### Data Types + +Larger data types use more Compute Units overall. Make sure you actually need a +larger data type such as a `u64` before you use it, as it can incur much higher +usage overall compared to a smaller data type such as a `u8`. + +```rust +// 357 +compute_fn! { "Push Vector u64 " => + let mut a: Vec = Vec::new(); + a.push(1); + a.push(1); + a.push(1); + a.push(1); + a.push(1); + a.push(1); +} + +// 211 CU +compute_fn! { "Vector u8 " => + let mut a: Vec = Vec::new(); + a.push(1); + a.push(1); + a.push(1); + a.push(1); + a.push(1); + a.push(1); +} +``` + +Overall these data type differences can add up to costing a lot more throughout +your program. + +### Serialization + +Serialization and deserialization are both expensive operations depending on the +account struct. If possible, use zero copy and directly interact with the +account data to avoid these expensive operations. + +```rust +// 6302 CU +pub fn initialize(_ctx: Context) -> Result<()> { + Ok(()) +} + +// 5020 CU +pub fn initialize_zero_copy(_ctx: Context) -> Result<()> { + Ok(()) +} +``` + +```rust +// 108 CU - total CU including serialization 2600 +let counter = &mut ctx.accounts.counter; +compute_fn! { "Borsh Serialize" => + counter.count = counter.count.checked_add(1).unwrap(); +} + +// 151 CU - total CU including serialization 1254 +let counter = &mut ctx.accounts.counter_zero_copy.load_mut()?; +compute_fn! { "Zero Copy Serialize" => + counter.count = counter.count.checked_add(1).unwrap(); +} +``` + +Using the above examples, you can potentially save half or more of your total CU +usage by using zero copy within your program. + +### Program Derived Addresses + +Using Program Derived Addresses(PDAs) is a common practice within your program, +but it's important to be aware of the compute usage of `find_program_address` +and how you can optimize it. + +If `find_program_address` has to take a long time to find a valid address, +meaning it has a high bump, the overall compute unit usage will be higher. You +can optimize finding the PDAs after initialization by saving the bump into an +account and using it in the future. + +```rust +pub fn pdas(ctx: Context) -> Result<()> { + let program_id = Pubkey::from_str("5w6z5PWvtkCd4PaAV7avxE6Fy5brhZsFdbRLMt8UefRQ").unwrap(); + + // 12,136 CUs + compute_fn! { "Find PDA" => + Pubkey::find_program_address(&[b"counter"], ctx.program_id); + } + + // 1,651 CUs + compute_fn! { "Find PDA" => + Pubkey::create_program_address(&[b"counter", &[248_u8]], &program_id).unwrap(); + } + + Ok(()) +} + +#[derive(Accounts)] +pub struct PdaAccounts<'info> { + #[account(mut)] + pub counter: Account<'info, CounterData>, + // 12,136 CUs when not defining the bump + #[account( + seeds = [b"counter"], + bump + )] + pub counter_checked: Account<'info, CounterData>, +} + +#[derive(Accounts)] +pub struct PdaAccounts<'info> { + #[account(mut)] + pub counter: Account<'info, CounterData>, + // only 1600 if using the bump that is saved in the counter_checked account + #[account( + seeds = [b"counter"], + bump = counter_checked.bump + )] + pub counter_checked: Account<'info, CounterData>, +} +``` + +## Further Compute Optimizations + +There are many other ways to optimize your program's compute usage, such as +writing in native instead of anchor, but it all comes at a cost. If you want the +absolute best compute usage on your program, you should evaluate and test +different methods to see what works best for your specific use case. diff --git a/content/guides/advanced/how-to-request-optimal-compute.mdx b/content/guides/advanced/how-to-request-optimal-compute.mdx new file mode 100644 index 000000000..54415fe5f --- /dev/null +++ b/content/guides/advanced/how-to-request-optimal-compute.mdx @@ -0,0 +1,161 @@ +--- +date: 2024-03-19T00:00:00Z +difficulty: intermediate +title: How to Request Optimal Compute Budget +description: + "Learn how to to use transaction simulation to get the compute units consumed + and build an optimal transaction." +tags: + - compute +--- + +All transactions on Solana use +[Compute Units (CU)](/docs/terminology#compute-units), which +measure the computational resources your transaction uses on the network. When +you pay +[priority fees](/developers/guides/advanced/how-to-use-priority-fees) +on your transactions, you must specify the exact amount of compute units you +expect to use; otherwise, you will overpay for your transaction. This guide will +provide step-by-step instructions on optimizing the compute units for your +transaction requests. + +## How to Request Compute Budget + +For precise control over your transaction's computational resources, use the +`setComputeUnitLimit` instruction from the Compute Budget program. This +instruction allocates a specific number of compute units for your transaction, +ensuring you only pay for what you need. + +```typescript +// import { ComputeBudgetProgram } from "@solana/web3.js" + +const modifyComputeUnits = ComputeBudgetProgram.setComputeUnitLimit({ + units: 300, +}); +``` + +This instruction will allocate a specific amount of compute units for your +transaction. How do we come up with the number to use? + +The +[simulateTransaction RPC method](/docs/rpc/http/simulatetransaction) +will return the estimated compute units consumed given a transaction. + +The +[Solana helpers npm package](https://www.npmjs.com/package/@solana-developers/helpers) +includes +[`getSimulationComputeUnits`](https://github.com/solana-developers/helpers?tab=readme-ov-file#get-simulated-compute-units-cus-for-transaction-instructions), +a small function that uses `simulateTransaction` to calculate the compute units. +You can then set the compute units in your new transaction, and send the new +transaction for an optimal result. + +``` +npm i @solana-developers/helpers +``` + +The syntax is simply: + +```typescript +getSimulationComputeUnits( + connection: Connection, + instructions: Array, + payer: PublicKey, + lookupTables: Array +); +``` + +For example: + +```typescript +const units = await getSimulationComputeUnits( + connection, + transactions, + payer.publicKey, +); +``` + +Using `getSimulationComputeUnits`, you can build an optimal transaction that use +an appropriate amount of compute units for what the transaction consumes: + +```typescript +// import { ... } from "@solana/web3.js" + +async function buildOptimalTransaction( + connection: Connection, + instructions: Array, + signer: Signer, + lookupTables: Array, +) { + const [microLamports, units, recentBlockhash] = await Promise.all([ + 100 /* Get optimal priority fees - https://solana.com/developers/guides/advanced/how-to-use-priority-fees*/, + getSimulationComputeUnits( + connection, + instructions, + signer.publicKey, + lookupTables, + ), + connection.getLatestBlockhash(), + ]); + + instructions.unshift( + ComputeBudgetProgram.setComputeUnitPrice({ microLamports }), + ); + if (units) { + // probably should add some margin of error to units + instructions.unshift(ComputeBudgetProgram.setComputeUnitLimit({ units })); + } + return { + transaction: new VersionedTransaction( + new TransactionMessage({ + instructions, + recentBlockhash: recentBlockhash.blockhash, + payerKey: signer.publicKey, + }).compileToV0Message(lookupTables), + ), + recentBlockhash, + }; +} +``` + + + +Credit to Sammmmmy, aka [@stegaBOB](https://twitter.com/stegaBOB), for the +source code of these two functions. + + + +## Special Considerations + +Compute units for transactions are not always stable. For example, the compute +usage can change if the transaction you are executing has a call to +`find_program_address`, such as when finding a program derived address. + +If you have a variable compute usage on your transactions, you can do one of two +things: + +1. Run a test over your transactions over some time to find out the ceiling + compute unit usage and use that number. + +2. Take the compute units returned from `simulateTransaction` and add a + percentage to the total. For example, if you chose to add 10% more CU and the + result you received from `simulateTransaction` was 1000 CU, you would set + 1100 CU on your transaction. + +## Conclusion + +Requesting the optimal compute units for your transaction is essential to help +you pay less for your transaction and to help schedule your transaction better +on the network. Wallets, dApps, and other services should ensure their compute +unit requests are optimal to provide the best experience possible for their +users. + +## More Resources + +You can learn more about the Compute Budget and related topics with these +resources: + +- documentation for the [Compute Budget](/docs/core/fees#compute-budget) +- Guide on + [how to use priority fees](/developers/guides/advanced/how-to-use-priority-fees) +- Guide on + [how to optimize compute units in programs](/developers/guides/advanced/how-to-optimize-compute) diff --git a/content/guides/advanced/how-to-use-priority-fees.mdx b/content/guides/advanced/how-to-use-priority-fees.mdx new file mode 100644 index 000000000..c15857584 --- /dev/null +++ b/content/guides/advanced/how-to-use-priority-fees.mdx @@ -0,0 +1,160 @@ +--- +date: 2024-04-07T00:00:00Z +difficulty: intermediate +title: "How to use Priority Fees on Solana" +description: + "Priority Fees are a new feature on Solana that allow you to specify an + additional fee to your transactions. These fees help make your transaction + more economically compelling to include in a block." +tags: + - web3js +keywords: + - tutorial + - priority fees + - offline signing + - transactions + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer +--- + +This guide is meant to be a reference for developers who want to add priority +fees to their transactions on Solana. We will cover priority fees, how to use +them, special considerations, and best practices to estimate them. + +## What are Priority Fees? + +Prioritization Fees are an optional fee, priced in +[micro-lamports](/docs/terminology#lamport) per +[Compute Unit](/docs/terminology#compute-units) (e.g. small amounts of SOL), +appended to transactions to make them economically compelling for validator +nodes to include in blocks on the network. This additional fee will be on top of +the base [Transaction Fee](/docs/core/fees) already set, which is 5000 lamports +per signature in your transaction. + +## Why Should I Use Priority Fees? + +When a transaction journeys through a validator, one of the critical stages of +the validator is scheduling the transaction. A validator is economically +incentivized to schedule transactions with the highest fee per compute unit +associated, guaranteeing users use resources optimally. A user can still have +their transaction executed with no priority fee attached but with a lesser +guarantee. When blocks are saturated with transactions with priority fees, +validators will drop transactions without priority fees. + +## How do I Implement Priority Fees? + +When adding priority fees to a transaction, keep in mind the amount of compute +units (CU) used for your transaction. The higher the CU required for the +transaction, the more fees you will pay when adding priority fees. + +Using the [Compute Budget Program](/docs/core/fees#compute-budget), you can +change the CU requested for your transaction and add any additional priority fee +required. Do note that your CU request must be equal to or greater than the CU +needed for the transaction; otherwise, the transaction will fail. + +Let's take a simple transfer SOL transaction and add priority fees. A +[transfer SOL transaction takes 300 CU](https://explorer.solana.com/tx/5scDyuiiEbLxjLUww3APE9X7i8LE3H63unzonUwMG7s2htpoAGG17sgRsNAhR1zVs6NQAnZeRVemVbkAct5myi17). +To best optimize our transaction, request exactly 300 CU with the Compute Budget +Program when adding additional priority fees. + +```typescript +// import { ... } from "@solana/web3.js" + +const modifyComputeUnits = ComputeBudgetProgram.setComputeUnitLimit({ + units: 300, +}); + +const addPriorityFee = ComputeBudgetProgram.setComputeUnitPrice({ + microLamports: 20000, +}); + +const transaction = new Transaction() + .add(modifyComputeUnits) + .add(addPriorityFee) + .add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount, + lamports: 10000000, + }), + ); +``` + +Viewing +[this transaction](https://explorer.solana.com/tx/5scDyuiiEbLxjLUww3APE9X7i8LE3H63unzonUwMG7s2htpoAGG17sgRsNAhR1zVs6NQAnZeRVemVbkAct5myi17) +on the Solana Explorer, see that we used +`ComputeBudgetProgram.setComputeUnitLimit` to set the Compute Unit Limit to 300 +CUs while also adding a priority fee of 20000 micro-lamports with +`ComputeBudgetProgram.setComputeUnitPrice`. + +## How Do I Estimate Priority Fees? + +The best way to estimate priority fees for a given transaction is to query the +historical priority fees required to land a transaction given the correct +accounts. The +[getRecentPrioritizationFees](/docs/rpc/http/getrecentprioritizationfees) JSON +RPC API method will retrieve the lowest priority fees used recently to land a +transaction in a block. + +When using `getRecentPrioritizationFees`, provide the accounts used in your +transaction; otherwise, you'll find the lowest fee to land a transaction +overall. Account contention within a block decides priority, and validators will +schedule accordingly. + +This RPC method will return the highest fee associated with the provided +accounts, which then becomes the base fee to consider when adding priority fees. + +```shell +curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" -d ' + { + "jsonrpc":"2.0", "id":1, + "method": "getRecentPrioritizationFees", + "params": [ + ["CxELquR1gPP8wHe33gZ4QxqGB3sZ9RSwsJ2KshVewkFY"] + ] + } +' +``` + +Different approaches to setting Priority Fees exist, and some +[third-party APIs](https://docs.helius.dev/solana-rpc-nodes/alpha-priority-fee-api) +are available to determine the best fee to apply. Given the dynamic nature of +the network, there will not be a "perfect" way to set priority fees, and careful +analysis should be used before choosing a path forward. + +## Special Considerations + +If you use priority fees with a +[Durable Nonce](/developers/guides/advanced/introduction-to-durable-nonces) +Transaction, you must ensure the `AdvanceNonce` instruction is your +transaction's first instruction. This is critical to ensure your transaction is +successful; otherwise, it will fail. + +```typescript +const advanceNonce = SystemProgram.nonceAdvance({ + noncePubkey: nonceAccountPubkey, + authorizedPubkey: nonceAccountAuth.publicKey, +}); + +const modifyComputeUnits = ComputeBudgetProgram.setComputeUnitLimit({ + units: 300, +}); + +const addPriorityFee = ComputeBudgetProgram.setComputeUnitPrice({ + microLamports: 20000, +}); + +const transaction = new Transaction() + .add(advanceNonce) + .add(modifyComputeUnits) + .add(addPriorityFee) + .add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: toAccount, + lamports: 10000000, + }), + ); +``` diff --git a/content/guides/advanced/introduction-to-durable-nonces.mdx b/content/guides/advanced/introduction-to-durable-nonces.mdx new file mode 100644 index 000000000..31aeb23fb --- /dev/null +++ b/content/guides/advanced/introduction-to-durable-nonces.mdx @@ -0,0 +1,623 @@ +--- +date: 2024-06-29T00:00:00Z +difficulty: advanced +title: "Durable & Offline Transaction Signing using Nonces" +description: + "One-stop shop for Solana's Durable Nonces: an easy way to power your Solana + dapps" +tags: + - cli + - web3js +keywords: + - tutorial + - durable nonces + - offline signing + - transactions + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer +altRoutes: + - /developers/guides/introduction-to-durable-nonces +--- + +This guide is meant to be a one-stop shop for Solana's Durable Nonces: a highly +under-utilized and under-appreciated way to power your Solana dapps and make +your user's experience more reliable and deterministic. + +> The code for this guide can be found in +> [this repository](https://github.com/0xproflupin/solana-durable-nonces), and +> it's advisable to follow along and run the examples locally to get a better +> grasp of Durable Nonces + +## Durable Nonce Applications + +Before we dive deep into Durable Nonces, its important to understand that +durable nonces provide an opportunity to create and sign a transaction that can +be submitted at any point in the future, and much more. This opens up a wide +range of use cases that are otherwise not possible or too difficult to +implement: + +1. **Scheduled Transactions**: One of the most apparent applications of Durable + Nonces is the ability to schedule transactions. Users can pre-sign a + transaction and then submit it at a later date, allowing for planned + transfers, contract interactions, or even executing pre-determined investment + strategies. + +2. **Multisig Wallets**: Durable Nonces are very useful for multi-signature + wallets where one party signs a transaction, and others may confirm at a + later time. This feature enables the proposal, review, and later execution of + a transaction within a trustless system. + +3. **Programs Requiring Future Interaction**: If a program on Solana requires + interaction at a future point (such as a vesting contract or a timed release + of funds), a transaction can be pre-signed using a Durable Nonce. This + ensures the contract interaction happens at the correct time without + necessitating the presence of the transaction creator. + +4. **Cross-chain Interactions**: When you need to interact with another + blockchain, and it requires waiting for confirmations, you could sign the + transaction with a Durable Nonce and then execute it once the required + confirmations are received. + +5. **Decentralized Derivatives Platforms**: In a decentralized derivatives + platform, complex transactions might need to be executed based on specific + triggers. With Durable Nonces, these transactions can be pre-signed and + executed when the trigger condition is met. + +## Introduction to Durable Nonces + +### Double Spend + +Imagine you're buying an NFT on MagicEden or Tensor. You have to sign a +transaction that allows the marketplace's program to extract some SOL from your +wallets. + +What is stopping them from reusing your signature to extract SOL again? Without +a way to check if the transaction was already submitted once, they can keep +submitting the signed transaction until there's no SOL left in their wallet. + +This is known as the problem of Double-Spend and is one of the core issues that +blockchains like Solana solve. + +A naive solution could be to crosscheck all transactions made in the past and +see if we find the signature there. This is not practically possible, as the +size of the Solana ledger is >80 TB. + +### Recent Blockhashes + +Solution: Crosscheck signatures within only a set period of recent time, and +discard the transaction if it gets "too" old. + +Recent Blockhashes are used to achieve this. A blockhash contains a 32-byte +SHA-256 hash. It is used to indicate when a client last observed the ledger. +Using recent blockhashes, transactions are checked in the last 150 blocks. If +they are found, they are rejected. They are also rejected if they get older than +150 blocks. The only case they are accepted is if they are unique and the +blockhash is more recent than 150 blocks (~80-90 seconds). + +As you can imagine, a side-effect of using recent blockhashes is the forced +mortality of a transaction even before its submission. + +Another issue with blockhashes is the forced non-uniqueness of signed +transactions in very small timeframes. In some cases, if the transactions are +executed very quickly in succession, some get the same recent blockhashes with +high probability, thus +[making them duplicate and avoid their execution](https://solana.stackexchange.com/questions/1161/how-to-avoid-sendtransactionerror-this-transaction-has-already-been-processed?rq=1). + +To summarize: + +1. What if I don't want to send the transaction right away? +2. What if I want to sign the transaction offline as I don't want to keep my + keys on a device that is connected to the net? +3. What if I want to co-sign the transaction from multiple devices owned by + multiple people, and the co-signing takes more than 90 seconds, like in a + case of a multi-sig operated by a DAO? +4. What if I want to sign and send a burst of transactions and don't want them + to fail due to duplication? + +The solution lies with Durable Nonces⚡️ + +### Durable Nonces + +Durable Transaction Nonces, which are 32-byte in length (usually represented as +base58 encoded strings), are used in place of recent blockhashes to make every +transaction unique (to avoid double-spending) while removing the mortality on +the unexecuted transaction. + +> How do they make transactions unique to avoid double spending? +> +> If nonces are used in place of recent blockhashes, the first instruction of +> the transaction needs to be a `nonceAdvance` instruction, which changes or +> advances the nonce. This ensures that every transaction which is signed using +> the nonce as the recent blockhash, irrespective of being successfully +> submitted or not, will be unique. + +Let's look at a couple of accounts that are important for using durable nonces +with Solana transactions. + +### Nonce Account + +The Nonce Account is the account that stores the value of the nonce. This +account is owned by the `SystemProgram` and is rent-exempt; thus needs to +maintain the minimum balance for rent exemption (around 0.0015 SOL). + +### Nonce Authority + +Nonce authority is the account that controls the Nonce Account. It has the +authority to generate a new nonce, advance the nonce or withdraw SOL from the +Nonce Account. By default, the account that creates the Nonce Account is +delegated as the Nonce Authority, but it's possible to transfer the authority +onto a different keypair account. + +## Durable Nonces with Solana CLI + +Now that we know what Durable Nonces are, it's time to use them to send durable +transactions. + +> If you do not have the Solana CLI installed, please go through +> [this tutorial](/docs/intro/installation) and set up the Solana CLI and +> create a keypair with some airdropped SOL on devnet + +### Create Nonce Authority + +Let's start with creating a new keypair which we will use as our Nonce +authority. We can use the keypair currently configured in our Solana CLI, but +it's better to make a fresh one (make sure you're on devnet). + +```shell +solana-keygen new -o nonce-authority.json +``` + +Set the current Solana CLI keypair to `nonce-authority.json` and airdrop some +SOL in it. + +```shell +solana config set -k ~//nonce-authority.json +solana airdrop 2 +``` + +Okay, we're set. Let's create our nonce account. + +### Create Nonce Account + +Create a new keypair `nonce-account` and use the `create-nonce-account` +instruction to delegate this keypair as the Nonce Account. We will also transfer +0.0015 SOL to the Nonce Account from the Nonce Authority, which is usually just +above the minimum quantity needed for rent exemption. + +```shell +solana-keygen new -o nonce-account.json +solana create-nonce-account nonce-account.json 0.0015 +``` + +Output + +```shell +Signature: skkfzUQrZF2rcmrhAQV6SuLa7Hj3jPFu7cfXAHvkVep3Lk3fNSVypwULhqMRinsa6Zj5xjj8zKZBQ1agMxwuABZ +``` + +Upon searching the +[signature](https://solscan.io/tx/skkfzUQrZF2rcmrhAQV6SuLa7Hj3jPFu7cfXAHvkVep3Lk3fNSVypwULhqMRinsa6Zj5xjj8zKZBQ1agMxwuABZ?cluster=devnet) +on the explorer, we can see that the Nonce Account was created and the +`InitializeNonce` instruction was used to initialize a nonce within the account. + +### Fetch Nonce + +We can query the value of the stored Nonce as follows. + +```shell +solana nonce nonce-account.json +``` + +Output + +```shell +AkrQn5QWLACSP5EMT2R1ZHyKaGWVFrDHJ6NL89HKtwjQ +``` + +This is the base58 encoded hash that will be used in place of recent blockhashes +while signing a transaction. + +### Display Nonce Account + +We can inspect the details of a Nonce Account in a prettier formatted version + +```shell +solana nonce-account nonce-account.json +``` + +Output + +```shell +Balance: 0.0015 SOL +Minimum Balance Required: 0.00144768 SOL +Nonce blockhash: AkrQn5QWLACSP5EMT2R1ZHyKaGWVFrDHJ6NL89HKtwjQ +Fee: 5000 lamports per signature +Authority: 5CZKcm6PakaRWGK8NogzXvj8CjA71uSofKLohoNi4Wom +``` + +### Advancing Nonce + +As discussed before, advancing the Nonce or changing the value of the nonce is +an important step for making subsequent transactions unique. The Nonce Authority +needs to sign the transaction with the `nonceAdvance` instruction. + +```shell +solana new-nonce nonce-account.json +``` + +Output + +```shell +Signature: 4nMHnedguiEtHshuMEm3NsuTQaeV8AdcDL6QSndTZLK7jcLUag6HCiLtUq6kv21yNSVQLoFj44aJ5sZrTXoYYeyS +``` + +If we check the nonce again, the value of the nonce has changed or advanced. + +```shell +solana nonce nonce-account.json +``` + +Output + +```shell +DA8ynAQTGctqQXNS2RNTGpag6s5p5RcrBm2DdHhvpRJ8 +``` + +### Withdraw from Nonce Account + +We transferred 0.0015 SOL when creating the Nonce Account. The Nonce Authority +can transfer these funds back to itself or some other account. + +```shell +solana withdraw-from-nonce-account nonce-account.json nonce-authority.json 0.0000001 +``` + +Output + +```shell +Signature: 5zuBmrUpqnubdePHVgzSNThbocruJZLJK5Dut7DM6WyoqW4Qbrc26uCw3nq6jRocR9XLMwZZ79U54HDnGhDJVNfF +``` + +We can check the status of the Nonce Account after the withdrawal; the balance +should have changed. + +```shell +solana nonce-account nonce-account.json +``` + +Output + +```shell +Balance: 0.0014999 SOL +Minimum Balance Required: 0.00144768 SOL +Nonce blockhash: DA8ynAQTGctqQXNS2RNTGpag6s5p5RcrBm2DdHhvpRJ8 +Fee: 5000 lamports per signature +Authority: 5CZKcm6PakaRWGK8NogzXvj8CjA71uSofKLohoNi4Wom +``` + +## Live Example: DAO Offline Co-Signing + +We will use an example where a DAO committee needs to transfer some SOL to a new +wallet. Two co-signers are needed before sending the SOL, where `co-sender` pays +for the transaction and `sender` sends the SOL. To add to this, the `co-sender` +is very careful when it comes to connecting his device to the internet and thus +wants to sign the transaction offline. + +Let's create three new keypairs, which will act as the two members of the DAO +and the receiver. Although, for this example, we are creating the keypairs in +the same system, we will assume that these accounts are on different systems to +replicate an IRL scenario. + +```shell +solana-keygen new -o sender.json +# pubkey: H8BHbivzT4DtJxL4J4X53CgnqzTUAEJfptSaEHsCvg51 + +solana-keygen new -o co-sender.json +# pubkey: HDx43xY4piU3xMxNyRQkj89cqiF15hz5FVW9ergTtZ7S + +solana-keygen new -o receiver.json +# pubkey: D3RAQxwQBhMLum2WK7eCn2MpRWgeLtDW7fqXTcqtx9uC +``` + +Let's add some SOL to the member wallets. + +```shell +solana airdrop -k sender.json 0.5 +solana airdrop -k co-sender.json 0.5 +``` + +### Using Recent Blockhashes + +Before we try to sign and send a durable transaction, let's see how transactions +are normally submitted using recent blockhashes. + +> Its important to note that although we'll attempt to achieve the above using +> recent blockhashes, the expected outcome is failure, which will help us +> appreciate why durable nonces are necessary here. + +The first step is to build a transfer transaction from `sender` to `receiver` +and sign it with `co-sender`'s wallet. + +To sign an offline transaction, we need to use: + +1. `--sign-only`: which prevents clients from sending the transaction. +2. `--blockhash`: which lets us specify a recent blockhash so that the client + does not try to fetch for it in an offline setting. + +- We can get a recent blockhash from + [solscan](https://solscan.io/blocks?cluster=devnet). Just copy the first + blockhash from the list. +- We will also need the pubkey of `sender`: + `H8BHbivzT4DtJxL4J4X53CgnqzTUAEJfptSaEHsCvg51` +- You can even turn off your internet when you sign this transaction using the + `co-sender`'s wallet :). + +```shell +solana transfer receiver.json 0.1 \ + --sign-only \ + --blockhash F13BkBgNTyyuruUQFSgUkXPMJCfPvKhhrr217eiqGfVE \ + --fee-payer co-sender.json \ + --from H8BHbivzT4DtJxL4J4X53CgnqzTUAEJfptSaEHsCvg51 \ + --keypair co-sender.json +``` + +Output + +```shell +Blockhash: F13BkBgNTyyuruUQFSgUkXPMJCfPvKhhrr217eiqGfVE +Signers (Pubkey=Signature): + HDx43xY4piU3xMxNyRQkj89cqiF15hz5FVW9ergTtZ7S=2gUmcb4Xwm3Dy9xH3a3bePsWVKCRMtUghqDS9pnGZDmX6hqtWMfpubEbgcai5twncoAJzyr9FRn3yuXVeSvYD4Ni +Absent Signers (Pubkey): + H8BHbivzT4DtJxL4J4X53CgnqzTUAEJfptSaEHsCvg51 +``` + +The transaction is signed by `co-sender`'s wallet, who will pay the tx fee. +Also, we are notified about the pending signature from the `sender`'s wallet +(`H8BHbivzT4DtJxL4J4X53CgnqzTUAEJfptSaEHsCvg51`). + +In a real-world scenario, `co-sender` can share their `Pubkey=Signature` pair +with the `sender` who will need this sign and submit the transaction. This share +may take more than a minute to happen. Once the `sender` receives this pair, +they can initiate the transfer. + +```shell +solana transfer receiver.json 0.1 \ + --allow-unfunded-recipient \ + --blockhash F13BkBgNTyyuruUQFSgUkXPMJCfPvKhhrr217eiqGfVE \ + --from sender.json \ + --keypair sender.json \ + --signer HDx43xY4piU3xMxNyRQkj89cqiF15hz5FVW9ergTtZ7S=2gUmcb4Xwm3Dy9xH3a3bePsWVKCRMtUghqDS9pnGZDmX6hqtWMfpubEbgcai5twncoAJzyr9FRn3yuXVeSvYD4Ni +``` + +Output + +```shell +Error: Hash has expired F13BkBgNTyyuruUQFSgUkXPMJCfPvKhhrr217eiqGfVE +``` + +The transfer is not successful because the hash has expired. How do we overcome +this issue of expired blockhashes? Using Durable Nonces! + +### Using Durable Nonces + +We will use the `nonce-account.json` and `nonce-authority.json` keypairs that we +created earlier. We already have a nonce initialized in the `nonce-account`. +Let's advance it to get a new one first, just to be sure that the `nonce` isn't +already used. + +```shell +solana new-nonce nonce-account.json +solana nonce-account nonce-account.json +``` + +Output + +```shell +Signature: 3z1sSU7fmdRoBZynVLiJEqa97Ja481nb3r1mLu8buAgwMnaKdF4ZaiBkzrLjPRzn1HV2rh4AHQTJHAQ3DsDiYVpF + +Balance: 0.0014999 SOL +Minimum Balance Required: 0.00144768 SOL +Nonce blockhash: HNUi6La2QpGJdfcAR6yFFmdgYoCvFZREkve2haMBxXVz +Fee: 5000 lamports per signature +Authority: 5CZKcm6PakaRWGK8NogzXvj8CjA71uSofKLohoNi4Wom +``` + +Perfect, now let's start with offline co-signing the transaction with +`co-signer`'s wallet, but this time, we'll use the `Nonce blockhash` printed +above, which is basically the `nonce` stored in the `nonce-account` as the +blockhash for the transfer transaction. + +```shell +solana transfer receiver.json 0.1 \ + --sign-only \ + --nonce nonce-account.json \ + --blockhash HNUi6La2QpGJdfcAR6yFFmdgYoCvFZREkve2haMBxXVz \ + --fee-payer co-sender.json \ + --from H8BHbivzT4DtJxL4J4X53CgnqzTUAEJfptSaEHsCvg51 \ + --keypair co-sender.json +``` + +Output + +```shell +Blockhash: HNUi6La2QpGJdfcAR6yFFmdgYoCvFZREkve2haMBxXVz +Signers (Pubkey=Signature): + HDx43xY4piU3xMxNyRQkj89cqiF15hz5FVW9ergTtZ7S=5tfuPxsXchbVFU745658nsQr5Gqhb5nRnZKLnnovJ2PZBHbqUbe7oB5kDbnq7tjeJ2V8Mywa4gujUjT4BWKRcAdi +Absent Signers (Pubkey): + H8BHbivzT4DtJxL4J4X53CgnqzTUAEJfptSaEHsCvg51 +``` + +This is very similar to the one we signed using the recent blockhash. Now we'll +sign and send the transaction with the `sender`'s wallet. + +```shell +solana transfer receiver.json 0.1 \ + --nonce nonce-account.json \ + --nonce-authority nonce-authority.json \ + --blockhash HNUi6La2QpGJdfcAR6yFFmdgYoCvFZREkve2haMBxXVz \ + --from sender.json \ + --keypair sender.json \ + --signer HDx43xY4piU3xMxNyRQkj89cqiF15hz5FVW9ergTtZ7S=5tfuPxsXchbVFU745658nsQr5Gqhb5nRnZKLnnovJ2PZBHbqUbe7oB5kDbnq7tjeJ2V8Mywa4gujUjT4BWKRcAdi +``` + +Output + +```shell +Signature: anQ8VtQgeSMoKTnQCubTenq1J7WKxAa1dbFMDLsbDWgV6GGL135G1Ydv4QTNd6GptP3TxDQ2ZWi3Y5qnEtjM7yg +``` + +The transaction is successfully submitted! + +If we check it on the +[explorer](https://solscan.io/tx/anQ8VtQgeSMoKTnQCubTenq1J7WKxAa1dbFMDLsbDWgV6GGL135G1Ydv4QTNd6GptP3TxDQ2ZWi3Y5qnEtjM7yg?cluster=devnet), +we can see that an `AdvanceNonce` instruction was prepended to the transaction, +as we discussed before. This is done to avoid using the same nonce again. + +Voila, we've gone through a very real-life use case of Durable Nonces. Now let's +see how to use them in transactions using JavaScript and the +[`@solana/web3.js`](https://solana-labs.github.io/solana-web3.js/v1.x/) package. + +## Durable Nonces with Solana Web3.js + +We'll use a similar example of making a simple transfer to demonstrate how to +send transactions using durable nonces. + +### Create Nonce Authority (Web3.js) + +```ts +const nonceAuthKP = Keypair.generate(); +``` + +_If you need SOL, you can use the +[faucet.solana.com](https://faucet.solana.com/)_ to get some. + +### Create Nonce Accounts (Web3.js) + +```ts +const nonceKeypair = Keypair.generate(); +const tx = new Transaction(); + +// the fee payer can be any account +tx.feePayer = nonceAuthKP.publicKey; + +// to create the nonce account, you can use fetch the recent blockhash +// or use a nonce from a different, pre-existing nonce account +tx.recentBlockhash = (await connection.getLatestBlockhash()).blockhash; + +tx.add( + // create system account with the minimum amount needed for rent exemption. + // NONCE_ACCOUNT_LENGTH is the space a nonce account takes + SystemProgram.createAccount({ + fromPubkey: nonceAuthKP.publicKey, + newAccountPubkey: nonceKeypair.publicKey, + lamports: 0.0015 * LAMPORTS_PER_SOL, + space: NONCE_ACCOUNT_LENGTH, + programId: SystemProgram.programId, + }), + // initialise nonce with the created nonceKeypair's pubkey as the noncePubkey + // also specify the authority of the nonce account + SystemProgram.nonceInitialize({ + noncePubkey: nonceKeypair.publicKey, + authorizedPubkey: nonceAuthKP.publicKey, + }), +); + +// sign the transaction with both the nonce keypair and the authority keypair +tx.sign(nonceKeypair, nonceAuthKP); + +// send the transaction +const sig = await sendAndConfirmRawTransaction( + connection, + tx.serialize({ requireAllSignatures: false }), +); +console.log("Nonce initiated: ", sig); +``` + +### Fetch Nonce Account (Web3.js) + +```ts +const accountInfo = await connection.getAccountInfo(nonceKeypair.publicKey); +const nonceAccount = NonceAccount.fromAccountData(accountInfo.data); +``` + +### Sign Transaction using Durable Nonce + +```ts +// make a system transfer instruction +const ix = SystemProgram.transfer({ + fromPubkey: publicKey, + toPubkey: publicKey, + lamports: 100, +}); + +// make a nonce advance instruction +const advanceIX = SystemProgram.nonceAdvance({ + authorizedPubkey: nonceAuthKP.publicKey, + noncePubkey: noncePubKey, +}); + +// add them to a transaction +const tx = new Transaction(); +tx.add(advanceIX); +tx.add(ix); + +// use the nonceAccount's stored nonce as the recentBlockhash +tx.recentBlockhash = nonceAccount.nonce; +tx.feePayer = publicKey; + +// sign the tx with the nonce authority's keypair +tx.sign(nonceAuthKP); + +// make the owner of the publicKey sign the transaction +// this should open a wallet popup and let the user sign the tx +const signedTx = await signTransaction(tx); + +// once you have the signed tx, you can serialize it and store it +// in a database, or send it to another device. You can submit it +// at a later point, without the tx having a mortality +const serialisedTx = bs58.encode( + signedTx.serialize({ requireAllSignatures: false }), +); +console.log("Signed Durable Transaction: ", serialisedTx); +``` + +## Live Example: Poll Simulation App + +The Poll Simulation app simulates a real-life poll mechanism, wherein voters are +allowed to vote for a given set of times. Once the time comes for determining +the results of the poll: the votes are counted, the count is publicly announced +to everyone, and the winner is declared. + +This is tough to build on-chain, as changing the state of an account on-chain is +a public action, and hence if a user votes for someone, others would know, and +hence the count won't be hidden from the public until the voting has been +completed. + +Durable nonces can be used to partially fix this. Instead of signing and sending +the transaction when voting for your candidate, the dapp can let the user sign +the transaction using durable nonces, serialize the transaction as shown above +in the web3.js example, and save the serialized transactions in a database until +the time comes for counting. + +For counting the votes, the dapp then needs to sync, send, or submit all the +signed transactions one by one. With each submitted transaction, the state +change will happen on-chain, and the winner can be decided. + +### Live App + +- The app is live on: + [**https://durable-nonces-demo.vercel.app/**](https://durable-nonces-demo.vercel.app/) + +- Information on how to use the dapp can be found + [here](https://github.com/0xproflupin/solana-durable-nonces/blob/main/durable-nonces-demo/README.md#how-to-use-the-dapp) + +- Information on how to build the dapp locally can be found + [here](https://github.com/0xproflupin/solana-durable-nonces/blob/main/durable-nonces-demo/README.md#how-to-build-the-dapp-locally) + +## References + +- [Neodyme Blog: Nonce Upon a Time, or a Total Loss of Funds](https://neodyme.io/blog/nonce-upon-a-time/) +- [Solana Durable Nonces CLI](https://docs.anza.xyz/cli/examples/durable-nonce) +- [Solana Durable Transaction Nonces Proposal](https://docs.anza.xyz/implemented-proposals/durable-tx-nonces) diff --git a/content/guides/advanced/meta.json b/content/guides/advanced/meta.json new file mode 100644 index 000000000..811237af8 --- /dev/null +++ b/content/guides/advanced/meta.json @@ -0,0 +1,13 @@ +{ + "title": "Advanced", + "pages": [ + "auto-approve", + "how-to-optimize-compute", + "how-to-request-optimal-compute", + "how-to-use-priority-fees", + "introduction-to-durable-nonces", + "stake-weighted-qos", + "testing-with-jest-and-bankrun", + "verified-builds" + ] +} diff --git a/content/guides/advanced/stake-weighted-qos.mdx b/content/guides/advanced/stake-weighted-qos.mdx new file mode 100644 index 000000000..1ddfcd21d --- /dev/null +++ b/content/guides/advanced/stake-weighted-qos.mdx @@ -0,0 +1,160 @@ +--- +featured: false +date: 2024-03-20T00:00:00Z +difficulty: intermediate +seoTitle: "Stake-weighted Quality of Service on Solana" +title: "A Guide to Stake-weighted Quality of Service on Solana" +description: + "Stake-weighed QoS (Quality-of-Service) is an implementation feature which, + when enabled, allows leaders (block producers) to identify and prioritize + transactions proxied through a staked validator as an additional sybil + resistance mechanism." +tags: + - rust +keywords: + - guide + - stake-weighted QoS + - Quality-of-Service + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer +altRoutes: + - /developers/guides/advanced/stake-weighted-qos-guide +--- + +## What is Stake-weighted Quality of Service (QoS)? + +Stake-weighed QoS (Quality-of-Service) is an implementation feature which, when +enabled, allows leaders (block producers) to identify and prioritize +transactions proxied through a staked validator as an additional sybil +resistance mechanism. Given that Solana is a proof of stake network, it is +natural to extend the utility of stake-weighting to transaction quality of +service. Under this model, a validator with 0.5% stake would have the right to +transmit up to 0.5% of the packets to the leader and will be capable of +resisting sybil attacks from the rest of the network. + +Operators who enable this feature will improve the security and performance of +the network by reducing the likelihood that low-or-no-stake (lower quality) +validators are able to “drown out” transactions emanating from higher-quality +(higher stake) validators (aka enhanced Sybil Resistance). + +One potential benefit of implementing Stake-weighted QoS could be realized if +certain agreements between Validators and RPC nodes are in place. RPC nodes may +land more transactions in blocks by agreeing to peer with Validators, and +Validators may sell more capacity to RPC nodes. These agreements must be made +directly between RPC operators and Validators and include the implementation of +the steps captured below in this doc to complete the peering. + +## Who does Stake-weighted QoS benefit? + +Commercial RPC infrastructure operators and exchanges will likely be among the +major beneficiaries of Stake-weighted QoS. RPC operators will be in an ideal +position to acquire or negotiate deals with staked validators enabling them to +achieve an improved percentage of transactions landed in blocks overall. +Exchanges (or other entities) who host their own validator nodes and RPC nodes +on the same infrastructure will be able to enable the feature internally, +comfortable that the RPC nodes running on their own infrastructure can be +trusted. + +## Why is Stake-weighted QoS important? + +With Stake-weighted QoS enabled, a validator holding 1% stake will have the +right to transmit up to 1% of the packets to the leader. In this way, validators +with higher stake are guaranteed to receive higher quality of service which +prevents lower-quality validator (with less at stake) from maliciously flooding +out these transactions, increasing overall Sybil Resistance. + +Put another way, imagine what the world would be like if cars with one passenger +could ride in the carpool lane uninhibited. Soon the carpool lane, which is +designed to move more human beings using the same stretch of highway, would be +rendered useless. The overall functionality of the highway would be impaired and +fewer commuters would be able to reach their destinations. This effect is +similar to what happens when low-staked validators are allowed to submit +transactions to the Leader with the same priority as high-staked validators. + +## Who should enable Stake-weighted QoS? + +Stake-weighted QoS should be enabled by Validator nodes paired with highly +trusted RPC nodes. This is helpful in situations such as running an RPC and +Validator in the same infrastructure where the trust level is already high. +Stake-weighted QoS works best for high trust configurations and requires the +Validator and RPC to come to an agreement in advance prior to enabling the +feature. It is strongly recommended that Validators not attempt to enable +Stake-weighted QoS with untrusted RPCs. + +Stake should be applied to block producing validators. It is not necessary, +recommended or effective to delegate stake to RPC servers. + +## How does Stake-weighted QoS work? + +With Stake-weighted QoS enabled, RPC nodes paired with a validator gain a +“virtual” stake in regards to how that leader treats inbound TPU (Transaction +Processing Unit) traffic from that RPC node, something which is not normally +possible. By definition, RPC nodes are “unstaked” and “non-voting” aka +“non-consensus” and are unable to access the benefits of prioritized +transactions by way of staking in the same way that consensus nodes do. How do +you use Stake-weighted QoS to land transactions? Enabling Stake-weighted QoS +requires configuring a validator node and an RPC node to form a trusted peer +relationship. This involves separate configuration steps for both the validator +node and the RPC node listed below. Operators wanting to enable Stake-weighted +QoS will need the following before starting: + +A validator with stake running on the network AND A RPC peered to the validator + +Stake-weighted QoS will not work unless BOTH sides are properly configured. + +### Configuring the Validator node + +On the validator, you'll have to enable +`--staked-nodes-overrides /path/to/overrides.yml`. The +`--staked-nodes-overrides` flag helps the validator prioritize transactions +being sent from known sources to apply stake to their transactions. This can +help a validator prioritize certain transactions over known hosts over others, +enabling the usage of Stake-weighted QoS with RPCs. RPCs should not be staked in +any way. + +Today, Stake-weighted QoS gives a stake-weighted priority to 80% of a leader's +TPU capacity. However, there are configuration options which can be used to +virtually assign different stake-weights to TPU peers, including assigning +unstaked peers virtual stake. + +The overrides file for `--staked-nodes-overrides` looks like this: + +```yml +staked_map_id: + pubkey1: 1000000000000000 + pubkey2: 4000000000000000 +``` + +`staked_map_id` contains a map of identity public key to the stake amount in +lamports to apply to each RPC. When set, the validator will prioritize QUIC +connections with the RPC found at that identity publicKey, assigning an amount +of stake to their transactions. The 80% of the leader's TPU capacity will be +split proportionally based on the lamport amounts specified in the +`staked-nodes-overrides` file and existing cluster stake. + +### Configuring the RPC node + +On the RPC you will have to use `--rpc-send-transaction-tpu-peer` to forward +transactions to a specific leader. The exact usage would be +`--rpc-send-transaction-tpu-peer HOST:PORT`. The Host is the ip address of the +leader you have the `staked-nodes-overrides` enabled on and the Port is the QUIC +TPU port of that host. The QUIC TPU port for a leader can be identified by +making an RPC call to [getClusterNodes](/docs/rpc/http/getclusternodes). + +The peering would looking like the following: + +![Diagram of RPCs peering with Validator for Stake-weighted Qos](/assets/guides/stake-weighted-qos-guide/peered-RPCs-guide.png) + +## Conclusion + +Stake-weighted QoS is an optional feature which arrived in v1.14 of the Solana +client, now known as Agave. Agave is a forked version of the Solana Labs client +which has become the active branch used by the Anza team, a spin out +organization composed of the former Solana Labs engineering team. + +The Stake-weighted QoS feature will most likely be useful for RPC infrastructure +operators who are in position to establish trusted relationships with staked +node operators. It will also be useful for Exchanges, who run both RPC nodes and +validator nodes and are able to establish high trust connections internally. diff --git a/content/guides/advanced/testing-with-jest-and-bankrun.mdx b/content/guides/advanced/testing-with-jest-and-bankrun.mdx new file mode 100644 index 000000000..5a10fb4fb --- /dev/null +++ b/content/guides/advanced/testing-with-jest-and-bankrun.mdx @@ -0,0 +1,396 @@ +--- +featured: false +date: 2024-08-08T00:00:00Z +difficulty: intermediate +title: "Speed up Solana program tests with Jest and Bankrun" +description: + "Testing programs is important. Bankrun offers a lightweight alternative to + the local validator for testing Solana programs and enables features like + custom account data and time travel. Tests can be up to 10 times faster using + bankrun." +tags: + - typescript + - testing +keywords: + - tutorial + - testing + - bankrun + - jest + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer +--- + +Testing your Solana programs is a critical part of the development process to +ensure that your program behaves as expected and can even speedup your +development. This guide will walk you through how you can test your Solana +programs using [Bankrun](https://kevinheavey.github.io/solana-bankrun/), a super +fast test runner for Solana programs. . + +Most Solana tests use the [Mocha framework](https://mochajs.org/) for writing +the tests and [Chai](https://www.chaijs.com/) for assertions. However, you can +use any testing framework that you are comfortable with. In this guide we will +have a look at [Jest](https://jestjs.io/) and +[Bankrun](https://kevinheavey.github.io/solana-bankrun/). With Bankrun, you can +accelerate your tests by almost 10x, gain the ability to modify program time, +and write custom account data. + +## Presets + +There are a few presets that will set you up with a basic testing environment +for your Solana programs. These presets are for example: + +```bash +npx create-solana-dapp my-dapp +``` + +`create-solana-dapp` will set you up with a Solana web project with various +configuration options, including a Next.js or React client, the Tailwind UI +Library, and a simple Anchor program. The tests are written using Jest and can +be run with the `anchor test` command. + +```bash +npx create-solana-game my-game +``` + +`create-solana-game` will set you up with a Solana game project that includes +[Jest](https://jestjs.io/), [Mocha](https://mochajs.org/) and +[Bankrun](https://kevinheavey.github.io/solana-bankrun/) tests and a NextJS app +app and an additional Unity Game engine client using Solana Wallet adapter. The +Mocha and Bankrun tests can both be run using the `anchor test` command. + +You can also find many test examples in the +[Solana Program Examples](https://github.com/solana-developers/program-examples). + +## Anchor test + +Using the Anchor framework, you can run `anchor test` command to perform the +pre-configured `test` command within the `anchor.toml` file. + +```toml title="Anchor.toml" +test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts" +``` + +This will run the tests in the tests directory using the `ts-mocha` command with +a timeout of 1,000,000 milliseconds. + +What anchor does here is that it starts up a local validator, deploys the +program from your anchor workspace and runs the test against the defined network +in the `Anchor.toml`. + +> Tip: You can also run `anchor test --detach` to let the validator continue +> running after the tests have finished which lets you inspect your transactions +> in the [Solana Explorer](https://explorer.solana.com/?cluster=custom). + +You can also define your own test command in the `anchor.toml` file. For example +you can first run the mocha tests against the local validator and then run the +jest tests using bankrun by combining them using `&&`: + +```toml title="Anchor.toml" +test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts && yarn run jest" +``` + +This would run the mocha tests first and then run the jest tests. + +You can also define your own test command. For example: + +```toml title="Anchor.toml" +super_test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 only_super_test.ts" +jest_tests = "yarn test" +``` + +These you would then run using: + +```bash +anchor run super_test +anchor run jest_tests +``` + +> Note though that in this case anchor does not start a local validator for you. +> So you will need to deploy the program yourself to your local cluster or run +> them against a public test network. Also Anchor environment variables will +> only be available when run trough the `anchor test` commands not when running +> tests with `yarn test` for example. + +## Migrating from Mocha to Jest + +In this part we will learn how to migrate from [Mocha](https://mochajs.org/) to +[Jest](https://jestjs.io/). Jest is another Javascript testing framework similar +to Mocha. It already has an integrated test runner so you don't need to use Chai +anymore. + +First you need to install Jest: + +```bash +yarn add --dev jest +``` + +Then you add a new command to your `package.json`: + +```json title="package.json" +{ + "scripts": { + "test": "jest" + } +} +``` + +Then you can run the tests with: + +```bash +yarn test +``` + +Since we want to run our tests with Typescript we need to install the `ts-jest` +package and also create a Jest configuration file: + +```bash +yarn add --dev ts-jest @jest/globals @types/jest +yarn ts-jest config:init +``` + +This will create a `jest.config.js` file in your project. Now you can update +your `Anchor.toml` file to run the Jest tests: + +```toml title="Anchor.toml" +test = "yarn test" +``` + +### Jest Troubleshooting + +1. In case you get a + `SyntaxError: Cannot use import statement outside a module` + error, you either did not create a jest config or you need to add the + following to your `jest.config.js` file: + +```js title="jest.config.js" +module.exports = { + transform: { + "^.+\\.tsx?$": "ts-jest", + }, + testEnvironment: "node", + moduleFileExtensions: ["ts", "tsx", "js", "jsx", "json", "node"], +}; +``` + +2. Since running tests against the local validator can take quite some time, if + you get an error saying that you can not log after the test is finished your + test is probably timing out. Different from Mocha, Jest does not have a + default timeout. You can set a timeout in your `jest.config.js` file: + +```js title="jest.config.js" +module.exports = { + testTimeout: 10000, +}; +``` + +Or you can set a timeout for a single test: + +```js title="your.test.js" +test("test name", async () => { + // your test code +}, 10000); +``` + +3. If you get an error saying that Anchor environment variables are missing you + are probably trying to use the AnchorProvider without running the tests + through `anchor test` or `anchor run test`. Just update your Anchor.toml to + run the Jest tests `yarn test` to run them in the anchor environment with all + the environment variables set. + +## Bankrun + +Instead of using `solana-test-validator` you can also use +[Solana Bankrun](https://kevinheavey.github.io/solana-bankrun/). It acts +similarly to the local validator but is more lightweight and faster. Some really +helpful features are +[writing custom account data](https://kevinheavey.github.io/solana-bankrun/tutorial/#writing-arbitrary-accounts) +and +[time travel](https://kevinheavey.github.io/solana-bankrun/tutorial/#time-travel) +which makes it much easier to test time based programs and programs that rely on +specific account data. + +To use Bankrun and Bankrun Anchor you need to add it to your `package.json`: + +```bash +yarn add solana-bankrun anchor-bankrun +``` + +To switch from a Mocha Anchor test to a +[Bankrun Anchor](https://github.com/kevinheavey/anchor-bankrun) test you only +need to change the provider to be a +[`BankrunProvider`](https://kevinheavey.github.io/solana-bankrun/tutorial/#anchor-integration) +and create a context using `startAnchor` within each of your test files: + +```js title="my-test.test.ts" /startAnchor/ +// ... imports here +describe('My test', () => { + test('Test allocate counter + increment tx', async () => { + const context = await startAnchor(".", [], []); + const client = context.banksClient; + + const provider = new BankrunProvider(context); + anchor.setProvider(provider); + + // ... testing logic + } +} +``` + +`startAnchor` will automatically add your program from your Anchor workspace to +the Bankrun bank. You can also +[add additional accounts and programs](https://kevinheavey.github.io/solana-bankrun/tutorial/#writing-arbitrary-accounts) +to the bank by passing them in the `startAnchor` function. There are a few +things that are different to running tests against the local validator though +which we will cover in the next section. + +### Bankrun differences to the local validator + +Bankrun uses a BanksServer (a simplified version of +[Solana's bank](https://github.com/solana-labs/solana/blob/master/runtime/src/bank.rs) +that processes transactions and manages accounts) and a BanksClient to simulate +the Solana network, enabling advanced testing features like time manipulation +and dynamic account data setting. Unlike the `solana-test-validator`, bankrun +allows efficient testing by running against a local instance of the network +state, making it ideal for testing Solana programs without the overhead of a +full validator. The behaviour for transactions is pretty similar, but there are +a few differences to the local validator: + +#### Airdrops + +- Bankrun does not support Airdrops. The standard signer used in the + BankrunProvider will be automatically funded with some Sol. If you need + another funded account you can create one by passing in an additional account + in the `startAnchor` function. + +```js +let secondKeypair: Keypair = new anchor.web3.Keypair(); + +context = await startAnchor( + "",[], + [ + { + address: secondKeypair.publicKey, + info: { + lamports: 1_000_000_000, // 1 SOL equivalent + data: Buffer.alloc(0), + owner: SYSTEM_PROGRAM_ID, + executable: false, + }, + }, + ] +); +provider = new BankrunProvider(context); +``` + +#### Confirming transactions + +Since Bankrun is directly working on the bank you will not need to confirm your +transactions. So the `connection.confirmTransaction()` function will not be +available. You can just leave it out. + +#### Getting account data + +While you can still use `connection.getAccount` to retrieve account data, the +preferred method in the bankrun framework is to use `client.getAccount`, which +returns a `Promise`. This method aligns better with the testing +framework's design. However, if you prefer consistency with how accounts are +retrieved in the rest of your Solana codebase, you can continue using +connection.getAccount. Choose the method that best fits your specific use case. + +```js +await client.getAccount(playerPDA).then(info => { + const decoded = program.coder.accounts.decode( + "playerData", + Buffer.from(info.data), + ); + console.log("Player account info", JSON.stringify(decoded)); + expect(decoded).toBeDefined(); + expect(parseInt(decoded.energy)).toEqual(99); +}); +``` + +#### Signing transactions with another keypair + +By default when using `program.function.rpc()` the transaction will be +automatically signed with the `provider.wallet` keypair. If you want to sign the +transaction with another keypair you can create a second provider and then use +that one to sign transaction with another keypair. + +```js +let secondKeypair: Keypair = new anchor.web3.Keypair(); + +let context = await startAnchor( +"",[], +[ + { + address: secondKeypair.publicKey, + info: { + lamports: 1_000_000_000, + data: Buffer.alloc(0), + owner: SYSTEM_PROGRAM_ID, + executable: false, + }, + }, + ] +); +beneficiaryProvider = new BankrunProvider(context); +beneficiaryProvider.wallet = new NodeWallet(secondKeypair); + +secondProgram = new Program(IDL as Vesting, beneficiaryProvider); +``` + +### Using Bankrun for native programs + +You can also use Bankrun for +[native programs](/developers/guides/getstarted/intro-to-native-rust). The main +difference is that you use `start` instead of `startAnchor` to start the Bankrun +bank. You can then use the `client` to interact with the bank. + +```js +const context = await start( + [{ name: "counter_solana_native", programId: PROGRAM_ID }], + [], +); +const client = context.banksClient; +``` + +Instead of using `program.instruction().rpc()` you can use the +`await client.processTransaction(tx)`. + +In the Solana program examples you can find a +[full native Bankrun example](https://github.com/solana-developers/program-examples/blob/main/basics/counter/native/tests/counter.test.ts). + +### Bankrun trouble shooting + +1. If you encounter an `Unknown action 'undefined'` error when sending a + transaction using Bankrun, you are likely trying to send two identical + transactions with the same blockhash. Request a new recent blockhash before + sending the second transaction or add some seed or parameter to your + instructions to make sure they will result in different transaction hashes. + +2. If you encounter `Clock handle timeout` error you can just restart your + terminal and run the tests again. + +## Conclusion + +Testing your Solana programs is essential for ensuring they behave as expected. +Bankrun offers a lightweight and fast alternative to the local validator, making +your tests up to 10 times faster. It enables powerful features like custom +account data and time travel, which can significantly enhance your testing +capabilities. Additionally, Jest is a great alternative to Mocha for writing +tests and can be easily integrated with Bankrun. + +However, it's important to note a few disadvantages of using Bankrun compared to +the local validator: + +1. Environment Representation: Tests run with Bankrun may not fully represent a + live or testnet environment. +2. Code Reusability: Some code used in local validator tests might not be + reusable with Bankrun. +3. Dependency: Using Bankrun and Bankrun Anchor introduces dependencies specific + to these tools. + +Despite these drawbacks, Bankrun is a valuable tool that can greatly improve +your development workflow. diff --git a/content/guides/advanced/verified-builds.mdx b/content/guides/advanced/verified-builds.mdx new file mode 100644 index 000000000..f9c41a9be --- /dev/null +++ b/content/guides/advanced/verified-builds.mdx @@ -0,0 +1,832 @@ +--- +date: 2024-09-26T00:00:00Z +difficulty: intermediate +title: "How to Verify a Program" +description: + "Verified builds is a way to link your program to its source code and let + everyone independently verify that the program was indeed built from that + provided source code." +tags: + - web3js +keywords: + - tutorial + - verified builds + - security.txt + - verified source code + - find a programs source code + - security + - blockchain tutorial +--- + +This guide is meant to be a reference for developers who want to implement +verified builds for their programs on Solana. We will cover what verified builds +are, how to use them, special considerations, and best practices to ensure the +authenticity of your program onchain. + +# What are verified builds? + +Verified builds ensure that the executable program you deploy to Solana’s +network matches the source code in your repository. By doing this, developers +and users can have confidence that the program running onchain corresponds +exactly to the public codebase, promoting transparency and security. + +The verification process involves comparing the hash of the onchain program with +the hash of the locally built program from the source code. This ensures no +discrepancies between the two versions. + +> While a verified build should not be considered more secure than an unverified +> build, the build enables developers to self verify the source code matches +> what is deployed onchain. Using the source code, a developer can then validate +> what the code executes when sending a transaction. + +The verified builds pipeline was thought out and is maintained by +[Ellipsis Labs](https://ellipsislabs.xyz/) and [OtterSec](https://osec.io/). For +more details, follow the guide in the +[original verified builds](https://github.com/Ellipsis-Labs/solana-verifiable-build) +repository as well and the verify build process directly into the +[Anza](https://www.anza.xyz/) tool suite, once supported there. + +# How does it work? + +The verification process is done by comparing the hash of the onchain program +with the hash of the locally built program from the source code. You build your +program in a controlled environment using the Solana Verify CLI and Docker. This +ensures that the build process is deterministic and consistent across different +systems. Once you have the executable, you can deploy it to the Solana network. +During the build process a +[PDA](https://explorer.solana.com/address/63XDCHrwZu3mXsw2RUFb4tbNpChuUHx4eA5aJMnHkpQQ/anchor-account) +of the [verify program](https://github.com/otter-sec/otter-verify) will be +created. This PDA contains all the data necessary to verify the program. The PDA +contains the program address, git url, commit hash and the arguments used to +build the program. + +Using the data in the PDA everyone can run the verify program command locally +and check if the program was built from the provided source code. Then everyone +can verify for themselves completely trustlessly or can run their own +[verify API](https://github.com/otter-sec/solana-verified-programs-api) +maintained by [OtterSec](https://github.com/otter-sec) to provide an easy access +point for users to check the verification. You can already see these +[API calls](https://verify.osec.io/status/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY) +being used in the +[Solana Explorer](https://explorer.solana.com/address/E1fcPKuV1UJEsTJPpab2Jr8y87ZN73i4CHTPLbWQE6CA/verified-build) +and +[SolanaFM](https://solana.fm/address/E1fcPKuV1UJEsTJPpab2Jr8y87ZN73i4CHTPLbWQE6CA/transactions?cluster=mainnet-alpha), +among other places. + +# Why should I use verified builds? + +Using verified builds provides the following benefits: + +- Security: Guarantee that the program running onchain matches the source code, + preventing malicious alterations. + +- Transparency: Allows other users and developers to validate that the onchain + program is trustworthy by comparing it with the public codebase. + +- Trust: Increase user confidence, as verified builds demonstrate that your + program's onchain behavior is aligned with your public code. When building + verifiable programs, you minimize risks associated with running unauthorized + or malicious code. It also ensures you comply with best practices and give + security researchers an easy way to contact you. Also wallets and other tools + can allow transactions from your program more easily as long as it is + verified. + +- Discoverability: When you provide a verified build of you program everyone can + find your source code, docs, program SDK or IDL and they can also easily + contact you via github in case there is an issue. + +# How do I create verified builds? + +To create verified builds, you'll need to follow these steps: + +Summary: + +- Commit your code to a public repository +- Build a verified build in docker +- Deploy the verified build +- Verify the deployed program against public API + +If you verify your program which is not build in a docker container it will most +likely fail because Solana program builds are not deterministic across different +systems. + + + + + +### Install Docker and Cargo + +Install the necessary tools ensure you have Docker and Cargo installed. Docker +provides a controlled build environment to ensure consistency, and Cargo is used +for managing Rust packages. + +- Docker: Follow the steps on the + [Docker website](https://docs.docker.com/engine/install/) to install Docker + for your platform. Once installed, ensure the Docker service is running + following this guide further. +- Cargo: If you don’t already have Cargo installed, you can install it by + running the following command: + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + + + + +### Install the Solana Verify CLI + +The Solana Verify CLI is the primary tool used to verify builds. Solana Verify +CLI is currently maintained by [Ellipsis Labs](https://ellipsislabs.xyz/) and +can be installed using Cargo. + +You can install it by running: + +```bash +cargo install solana-verify +``` + +If you need a specific version of the CLI, you can pin the version with: + +```bash +cargo install solana-verify --version $VERSION +``` + +If desired, you can install a version directly from a specific commit: + +```bash +cargo install solana-verify --git https://github.com/Ellipsis-Labs/solana-verifiable-build --rev 13a1db2 +``` + + + + +### Prepare project + +To verify against a repository it needs to have a `Cargo.lock` file in the root +directory of your repository. If you only have one program in your repository +and a `cargo.lock` file in your root you can directly go to the next step and +build your program. + +If your program is in a subfolder and you have a rust workspace you need to +create a workspace `Cargo.toml` file in the root directory of your repository. + +You can use this `Cargo.toml` example as a preset: + +```toml title="Cargo.toml" +[workspace] +members = ["program/programs/*"] +resolver = "2" + +[profile.release] +overflow-checks = true +lto = "fat" +codegen-units = 1 + +[profile.release.build-override] +opt-level = 3 +incremental = false +codegen-units = 1 +``` + +Make sure that your program is in the `workspace/members` array and that the +`Cargo.toml` of your program has the correct `lib` name configured. + +> Important is the `lib name` not the package name! + +Something like this: + +```toml title="waffle/Cargo.toml" +[package] +name = "waffle" +version = "0.1.0" +edition = "2021" + +[lib] +name = "waffle" +crate-type = ["cdylib", "lib"] + +[dependencies] +solana-program = "2.1.0" +``` + +In this [repository](https://github.com/solana-developers/verified-program) you +can see an example of a workspace with a program in a subfolder. Notice also +that when the program is in a subfolder you later need to add this folder as +`--mount-path` to the `verify-from-repo` command. + +In this [repository](https://github.com/solana-developers/solana-game-preset) +you can find an anchor example. In this +[repository](https://github.com/solana-developers/verified-program-root) you can +find a native rust example. + +With this `Cargo.toml` file in place you can then run `cargo generate-lockfile` +to create a lock file and continue to building your program. + + + + +### Building Verifiable Programs + +To verifiably build your Solana program, navigate to the directory containing +your workspace's `Cargo.toml` file and run: + +```bash +solana-verify build +``` + +This will copy your environment into a docker container and build it in a +deterministic way. + +> Make sure that you actually deploy the verified build and don't accidentally +> overwrite it with `anchor build` or `cargo build-sbf` since these will most +> likely not result into the same hash and though your verification will fail. + +For projects with multiple programs, you can build a specific program by using +the library name (not the package name): + +```bash +solana-verify build --library-name $PROGRAM_LIB_NAME +``` + +This process ensures deterministic builds and can take some time, especially on +certain systems (e.g., M1 MacBook) because it is running within a docker +container. For faster builds, using a Linux machine running x86 architecture is +recommended. + +Once the build completes, you can retrieve the hash of the executable using the +following command: + +```bash +solana-verify get-executable-hash target/deploy/$PROGRAM_LIB_NAME.so +``` + + + + +### Deploying Verifiable Programs + +Once you have built your program and retrieved its hash, you can deploy it to +the Solana network. It is recommended to use a multi-signature or governance +solution like [Squads Protocol](https://squads.so/protocol) for safe +deployments, but you can also directly deploy with: + +```bash +solana program deploy -u $NETWORK_URL target/deploy/$PROGRAM_LIB_NAME.so --program-id $PROGRAM_ID --with-compute-unit-price 50000 --max-sign-attempts 100 --use-rpc +``` + +A currently fitting low priority fee you can request from your rpc provider for +example [Quicknode](https://www.quicknode.com/gas-tracker/solana). + +To verify the deployed program matches the built executable, run: + +```bash +solana-verify get-program-hash -u $NETWORK_URL $PROGRAM_ID +``` + +> You may have different versions deployed on different +> [Solana clusters](/docs/core/clusters) (i.e. devnet, testnet, mainnet). +> Ensure you use the correct network URL for the desired Solana cluster you want +> to verify a program against. Remote verification will only work on mainnet. + +Now you can already get the hash of your program and compare it to your binary +hash from earlier if you want: + +```bash +solana-verify get-program-hash $PROGRAM_ID +``` + + + + +### Verifying against repositories + +To verify a program against its public repository, use: + +```bash +solana-verify verify-from-repo -u $NETWORK_URL --program-id $PROGRAM_ID https://github.com/$REPO_PATH --commit-hash $COMMIT_HASH --library-name $PROGRAM_LIB_NAME --mount-path $MOUNT_PATH +``` + +> While you run the verified build in your program directory, when running +> `verify-from-repo` you need to add the `--mount-path` flag. This will be the +> path to the folder containing the `Cargo.toml` that contains your program's +> library name. + +This command compares the onchain program hash with the executable hash built +from the source at the specified commit hash. + +At the end the command will ask you if you want to upload your verification data +onchain. If you do that the Solana Explorer will immediately show your program's +verification data. Until it was verified by a remote build it will show as +unverified. Learn how you can verify your program against a public API in the +next step. + +If you want to lock the verification to a certain release, you can append the +`--commit-hash` flag to the command. + + + + +### Verify against public API + +Finally you can also directly verify the program against anyone that is running +the verify API:: + +```bash +solana-verify verify-from-repo --remote -um --program-id PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY https://github.com/Ellipsis-Labs/phoenix-v1 +``` + +> It is recommended to use a payed RPC Url because otherwise you may run into +> rate limits of the free RPCs. So instead of `-um` you should use +> `--url yourRpcUrl` for a more reliable verification. + +The `--remote` flag sends a build request to the OtterSec API, which triggers a +remote build of your program. Once the build is complete, the system verifies +that the onchain hash of your program matches the hash of the generated build +artifact from your repository. + +The default is the +[OtterSec API](https://github.com/otter-sec/solana-verified-programs-api). + +Make sure to pick yes when you are asked to upload the verification data +onchain. This is used by the API to verify that you uploaded the verification +data. + +You can also trigger a remote job manually by using: + +```bash +solana-verify remote submit-job --program-id --uploader
+``` + +Where the uploader is the address that has the authority to write to the PDA. +That should be program authority in most cases. If your program is controlled by +a multisig please continue in the +[multisig verification](#how-to-verify-your-program-when-its-controlled-by-a-multisig-like-squads) +part of this guide below. + +This will submit a job to the OtterSec API and you can then verify the job +status with: + +```bash +solana-verify remote get-job-status --job-id +``` + +Once the verification has completed successfully, which may take awhile, you +will be able to see your program as verified in the +[OtterSec API for single programs](https://verify.osec.io/status/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY) +and in the +[Solana Explorer](https://explorer.solana.com/address/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY/verified-build), +[SolanaFM](https://solana.fm/address/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY?cluster=mainnet-alpha), +[SolScan](https://solscan.io/account/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY#programVerification) +and eventually also on the community-run website +[SolanaVerify.org](https://www.solanaverify.org/) maintained by +[0xDeep](https://x.com/0xDeep) and the +[OtterSec verified programs API](https://verify.osec.io/verified-programs) and +at last in the +[Verified Programs Dune Dashboard](https://dune.com/jonashahn/verified-programs/dedf21e1-9b71-42c8-89f9-02ed94628657) +contributing to a more healthy Solana ecosystem. + + + + + +## How to verify your program when its controlled by a Multisig like Squads + +For the remote verification to work you need to write the verification data into +a PDA signed by the program authority. If your program is controlled by a +multisig you can export this write PDA transaction and submit it through +[Squads Protocol](https://squads.so/protocol) or another multisig solution of +your choice. + + + + + +### 1. Build the verifiable program + +First build the program: + +```bash +solana-verify build +``` + +This will build a verifiable build using a docker container using the solana +version specified in the `Cargo.lock` file. + + + + +### 2. Deploy the program + +```bash +solana config set --url "PayedMainnetRPCAddress" // the public endpoint will be rate limited too much +solana program deploy target/deploy/verify_squads.so +``` + +For the rest of this multisig guide, we will use an example program ID of +`6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD`. + + + + +### 3. Commit and verify against repository + +Once that is done we commit the project to to github. Here is an example: +https://github.com/solana-developers/verify-squads + +Optional: See if you can verify locally first (this command uses example program +ID `6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD`): + +```bash +solana-verify verify-from-repo https://github.com/solana-developers/verify-squads --program-id 6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD +``` + +Just to make sure your parameters are correct. + + + + +### 4. Transfer program authority to multisig + +If you have not yet transfer your programs authority to the multisig and copy +the multisig authority. You need it in the next step. + + + + +### 5. Export PDA transaction + +When you have you program authority locally you are prompted to upload the build +data onchain when using the command `solana-verify verify-from-repo`. + +Since you can not do that when you are using a multisig you need to export the +PDA transaction manually and then trigger the transaction through Squads. + +```bash +solana-verify export-pda-tx https://github.com/solana-developers/verify-squads --program-id 6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD --uploader --encoding base58 --compute-unit-price 0 +``` + +This will return you a base58 transaction. If you want a base64 encoded +transaction for use in a transaction inspector, you can use `--encoding base64`. + +```bash +P6vBfcPaaXb8fZoT3NBAYEcdtEj7tubA1k2gBxmFKZ3UWF5YyrmDMFTvLKALCJoUuRsPAjMckudYruCu3eeWQtuDrFbEMLxLFutnKXac974fnkMivcwUdY66VLjbxQT6ATmcy7F4hBtz1G4P1h6iBJLhb8WtrtgY3i4qq45MUEb7RjuMEfUFXKrNgPdGxkz5xvMHq3dxKRcpmEK5k2DkeW6SUQYBVe19Ga3B9GyhTX8k3CMt9JCEah13WyRnQd8GjoK6sTEvGJym6xDNvmd8yiJYSNcaYwEJsjHEUf4Yh6kAC7ki2KRvVAr3NVe1gjqK9McrwSQjtUatvydTG8Zovcr7PPUEMf3yPMgKXjZLB2QpkH63yTTYdNAnWFuv9E6b6nYRqye5XcNi436yKw5U14fXh65yK34bgYLi9328UT1huJELsJU9BRGnGUmb6GWp6c2WL5BhnzgNTSnt9TXFfEgUMzhvKzpVBxLP44hwqqBdyUhHFysCF37531PnmiESq8x1xou23xJ6FcQbc199754MkqQd7tX9CUznGzAEqHGkzn3VBoJnojsKtgYmiTYbdRsT1CU18MbYEE7WvGAvXyxxbpNzbAcc94HrnM6cqRGmwhEBroPfFghTdmzg9D +``` + + + + +### 6. Submit transaction through Squads + +Go to the squads transaction builder and import the base58 encoded transaction. +Make sure that in the simulation the transaction only has a call to the osec +verify program and the computer budget program and nothing else! + + + + +### 7. Submit remote verification job + +Once the transaction to squads was successful you can submit the remote job: + +```bash +solana-verify remote submit-job --program-id 6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD +--uploader +``` + +This is it! You have verified your program against a public repository and +submitted a remote job to the OtterSec API. You should be able to see it reflect +in the solana explorer and other places now. + + + + +### 8. Updating the program (Optional) + +When you update your program you need to export a new PDA transaction and submit +it through Squads again. + +Doing an update to the program: + +```bash +solana-verify build +solana program write-buffer target/deploy/verify_squads.so --with-compute-unit-price 50000 --max-sign-attempts 50 +``` + +Then transfer that buffer authority to the multisig or directly create the +buffer with the authority of the multisig. + +```bash +solana program set-buffer-authority Fu3k79g53ZozAj47uq1tXrFy4QbQYh7y745DDsxjtyLR --new-buffer-authority 3JG6ULvZVCrkKtSSskKNJGe8RNZGFe8Ruev9KUhxzK5K +``` + + + + +### 9. Export and submit new PDA transaction + +Don't forget to commit your changes to github. Export the PDA upgrade +transaction again: + +```bash +solana-verify export-pda-tx https://github.com/solana-developers/verify-squads --program-id 6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD --uploader 3JG6ULvZVCrkKtSSskKNJGe8RNZGFe8Ruev9KUhxzK5K +``` + +Submit the transaction through Squads again. + +You can see an example transaction here: + +https://solana.fm/tx/4zJ1vK2KToAwxuEYzTMLqPkcebjoi9rdeeyxtEEx9L5Q4vWDA8h6Rr4kPRuRxcV7ZLKMr6qx1LTWb6x3ZpUJaFUW?cluster=mainnet-alpha + +Then submit for another remote build: + +```bash +solana-verify remote submit-job --program-id 6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD --uploader 3JG6ULvZVCrkKtSSskKNJGe8RNZGFe8Ruev9KUhxzK5K +``` + +Should result in something like this: + +```shell +Verification request sent with request id: b63339d2-163e-49ac-b55d-3454c1c2b5b3 +Verification in progress... ⏳ [00:18:02] ✅ Process completed. (Done in 18 +minutes) Program 6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD has been verified. +✅ The provided GitHub build matches the on-chain hash. On Chain Hash: +96f8c3d9400258f7759408d1f6f8435b4a24d9b52f5a0340d97907e567cb8773 Executable +Hash: 96f8c3d9400258f7759408d1f6f8435b4a24d9b52f5a0340d97907e567cb8773 Repo URL: +https://github.com/Woody4618/verify-squads/tree/0fb0a2e30c15c51732c0ad5e837975a6f7bbc7ed +Check the verification status at: +https://verify.osec.io/status/6XBGfP17P3KQAKoJb2s5M5fR4aFTXzPeuC1af2GYkvhD Job +url: https://verify.osec.io/job/b63339d2-163e-49ac-b55d-3454c1c2b5b3 +``` + +Congratulations you have verified your program after a multisig upgrade! + + + + + +## Verify from docker image + +You can also verify your program against a docker image by running the following +command: + +```bash +solana-verify verify-from-image -e +examples/hello_world/target/deploy/hello_world.so -i +ellipsislabs/hello_world_verifiable_build:latest -p +2ZrriTQSVekoj414Ynysd48jyn4AX6ZF4TTJRqHfbJfn +``` + +This command loads up the image stored at +`ellipsislabs/hello_world_verifiable_build:latest`, and verifies that the hash +of the executable path in the container is the same as the hash of the on-chain +program supplied to the command. Because the build was already uploaded to an +image, there is no need for a full rebuild of the executable which can take a +long time. + +The Dockerfile that creates the image +`ellipsislabs/hello_world_verifiable_build:latest` can be found in the ellipsis +labs repository +[/examples/hello_world](https://github.com/Ellipsis-Labs/solana-verifiable-build/tree/master/examples/hello_world). + +Below is the expected output: + +```bash +Verifying image: "ellipsislabs/hello_world_verifiable_build:latest", on network +"https://api.mainnet-beta.solana.com" against program ID +2ZrriTQSVekoj414Ynysd48jyn4AX6ZF4TTJRqHfbJfn Executable path in container: +"examples/hello_world/target/deploy/hello_world.so" + +Executable hash: +08d91368d349c2b56c712422f6d274a1e8f1946ff2ecd1dc3efc3ebace52a760 Program hash: +08d91368d349c2b56c712422f6d274a1e8f1946ff2ecd1dc3efc3ebace52a760 Executable +matches on-chain program data ✅ +``` + +## Example verified build + +Here’s an example of verifying an example program with the ID +`FWEYpBAf9WsemQiNbAewhyESfR38GBBHLrCaU3MpEKWv` using the source code from this +[repository](https://github.com/solana-developers/verified-program): + +```bash +solana-verify verify-from-repo https://github.com/solana-developers/verified-program --url YOUR-RPC-URL --program-id FWEYpBAf9WsemQiNbAewhyESfR38GBBHLrCaU3MpEKWv --mount-path waffle --library-name waffle --commit-hash 5b82b86f02afbde330dff3e1847bed2d42069f4e +``` + +By default the `verify-from-repo` command takes the last commit on the main +branch. You can also define a certain commit in case you want to continue +working on the repository by using the `commit-hash` parameter: +`--commit-hash 5b82b86f02afbde330dff3e1847bed2d42069f4e` + +Finally you can also directly verify the program against the OtterSec API: + +```bash +solana-verify verify-from-repo https://github.com/solana-developers/verified-program --url YOUR-RPC-URL --remote --program-id FWEYpBAf9WsemQiNbAewhyESfR38GBBHLrCaU3MpEKWv --mount-path waffle --library-name waffle --commit-hash 5b82b86f02afbde330dff3e1847bed2d42069f4e +``` + +The `--remote` command sends a build request to the OtterSec API, which triggers +a remote build of your program. Once the build is complete, the system verifies +that the onchain hash of your program matches the hash of the generated build +artifact from your repository. + +## Popular programs that are already verified + +### Phoenix + +```shell +solana-verify verify-from-repo -um --program-id PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY https://github.com/Ellipsis-Labs/phoenix-v1 +``` + +Final Output: + +```shell +Executable Program Hash from repo: 6877a5b732b3494b828a324ec846d526d962223959534dbaf4209e0da3b2d6a9 +On-chain Program Hash: 6877a5b732b3494b828a324ec846d526d962223959534dbaf4209e0da3b2d6a9 +Program hash matches ✅ +``` + +### Squads V3 + +```shell +solana-verify verify-from-repo https://github.com/Squads-Protocol/squads-mpl --commit-hash c95b7673d616c377a349ca424261872dfcf8b19d --program-id SMPLecH534NA9acpos4G6x7uf3LWbCAwZQE9e8ZekMu -um --library-name squads_mpl --bpf +``` + +> Notice we needed to specify the `library-name` because the Squads repo +> includes multiple programs. We use the `--bpf` flag because `squads_mpl` was +> previously verified with Anchor. + +Final Output: + +```shell +Executable Program Hash from repo: 72da599d9ee14b2a03a23ccfa6f06d53eea4a00825ad2191929cbd78fb69205c +On-chain Program Hash: 72da599d9ee14b2a03a23ccfa6f06d53eea4a00825ad2191929cbd78fb69205c +Program hash matches ✅ +``` + +### Drift V2 + +```shell +solana-verify verify-from-repo -um --program-id dRiftyHA39MWEi3m9aunc5MzRF1JYuBsbn6VPcn33UH https://github.com/drift-labs/protocol-v2 --commit-hash 110d3ff4f8ba07c178d69f9bfc7b30194fac56d6 --library-name drift +``` + +Final Output: + +```shell +Executable Program Hash from repo: e31d58edeabc3c30bf6f2aa60bfaa5e492b41ec203e9006404b463e5adee5828 +On-chain Program Hash: e31d58edeabc3c30bf6f2aa60bfaa5e492b41ec203e9006404b463e5adee5828 +Program hash matches ✅ +``` + +### Marginfi V2 + +```shell +solana-verify verify-from-repo -um --program-id MFv2hWf31Z9kbCa1snEPYctwafyhdvnV7FZnsebVacA https://github.com/mrgnlabs/marginfi-v2 --commit-hash d33e649e415c354cc2a1e3c49131725552d69ba0 --library-name marginfi -- --features mainnet-beta +``` + +Final Output: + +```shell +Executable Program Hash from repo: 890d68f48f96991016222b1fcbc2cc81b8ef2dcbf280c44fe378c523c108fad5 +On-chain Program Hash: 890d68f48f96991016222b1fcbc2cc81b8ef2dcbf280c44fe378c523c108fad5 +Program hash matches ✅ +``` + +# Conclusion + +Using [verified builds on Solana](/developers/guides/advanced/verified-builds) +ensures the integrity and trustworthiness of your programs on the network and +allow developers to find your SDKs directly from a Solana Explorer. By +leveraging tools like the Solana Verify CLI and Docker, you can maintain +verifiable and secure builds that align with your source code. Always take the +necessary precautions to use consistent environments, and consider governance +solutions for safe upgrades and deployments. + +## Security + Disclaimer + +While verified builds are a powerful tool for ensuring the integrity of your +Solana programs it is not completely trustless in the default setup. The docker +images are built and hosted by the Solana Foundation. + +Be aware that you are building your project in a downloaded docker image and +that your whole setup gets copied into that docker image for building including +potentially sensitive information. + +If you want to have a completely trustless setup you can build the docker images +yourself and host them on your own infrastructure. This way you can be sure that +the docker images are not tampered with. You can find the scripts to create your +own docker images in the +[Verified builds repository](https://github.com/Ellipsis-Labs/solana-verifiable-build) +and you can fork it and run the github actions yourself or validate that they +are correct. + +Furthermore for the remote verification you are trusting the OtterSec API and +the +[Solana Explorer](https://explorer.solana.com/address/PhoeNiXZ8ByJGLkxNfZRnkUfjvmuYqLR89jjFHGqdXY) +to a certain degree. + +The API or Solana Explorer may potentially display incorrect information if +compromised. + +If you want to have a completely trustless setup you can run the +[Verify API](https://github.com/otter-sec/solana-verified-programs-api) yourself +or run the program verification locally yourself using the `verify-from-repo` +command using the on chain verify data that is saved in a +[PDA](https://explorer.solana.com/address/63XDCHrwZu3mXsw2RUFb4tbNpChuUHx4eA5aJMnHkpQQ/anchor-account) +that is derived from the programs deploy authority and the +[verify program](https://explorer.solana.com/address/verifycLy8mB96wd9wqq3WDXQwM4oU6r42Th37Db9fC). + +The verify program is deployed by the [OtterSec team](https://osec.io/) and is +not yet frozen so it can be upgraded at any time. + +The Solana Foundation, OtterSec and the Ellipsis Labs team are not responsible +for any losses or damages that may occur from using the verified builds +pipeline. + +# Security.txt for Solana programs + +In addition to verified builds you can also add a `security.txt` file to your +program. In the future, once implemented, the `security.txt` will hold the +verifier public key for easy access to the verification data stored in the +verification PDA. The PDA containing all the information needed to build and +verify a program is derived from the programs address and the verifier pubkey. +By default this is the same pubkey that built and deployed the program. But it +can also be another pubkey that can be specified in the `security.txt`. + +The `security.txt` feature allows developers to embed contact and security +information directly within their Solana smart contracts. Inspired by +[securitytxt.org](https://securitytxt.org), this approach provides a +standardized way for security researchers to reach out to project maintainers, +even if they only know the contract's address. + +## Why use security.txt? + +For many projects, especially smaller or private ones, identifying the +developers from just the contract address can be difficult and time-consuming. +Embedding a `security.txt` file within the program ensures that security +researchers can easily contact the correct people, potentially preventing +exploits and ensuring timely bug reports. + +## How to implement security.txt + +To add a `security.txt` to your Solana program, include the following steps: + +Add the `solana-security-txt` dependency to your `Cargo.toml`: + +```toml title="Cargo.toml" +[dependencies] +solana-security-txt = "1.1.1" +``` + +Use the `security_txt!` macro in your contract to define your security +information. You can include contact details, project URLs, and even a security +policy. Here's an example: + +```rust +#[cfg(not(feature = "no-entrypoint"))] +use {default_env::default_env, solana_security_txt::security_txt}; + +#[cfg(not(feature = "no-entrypoint"))] +security_txt! { + name: "MyProject", + project_url: "https://myproject.com", + contacts: "email:security@myproject.com,discord:security#1234", + policy: "https://myproject.com/security-policy", + + // Optional Fields + preferred_languages: "en,de", + source_code: "https://github.com/solana-developers/solana-game-preset", + source_revision: "5vJwnLeyjV8uNJSp1zn7VLW8GwiQbcsQbGaVSwRmkE4r", + source_release: "", + encryption: "", + auditors: "Verifier pubkey: 5vJwnLeyjV8uNJSp1zn7VLW8GwiQbcsQbGaVSwRmkE4r", + acknowledgements: "Thank you to our bug bounty hunters!" +} +``` + +Once the `security.txt` information is embedded in your program, it can be +easily queried via tools like the Solana Explorer, ensuring that your contact +and security details are available to anyone looking to report potential issues. + +## Best practices + +- Use Links: For information likely to change (e.g., contact details), it's + recommended to link to a web page rather than hard-coding them into the + contract. This avoids the need for frequent program upgrades. + +- Verification: Before deploying, verify the format and content using the + `query-security-txt` tool, which can validate both onchain programs and local + binaries: + +```bash +query-security-txt target/bpfel-unknown-unknown/release/my_contract.so +``` + +By embedding security contact information directly into your contract, you make +it easier for researchers to reach you, fostering better security and +communication within the Solana ecosystem. + +This is +[an example of how security.txt looks in the Solana Explorer](https://explorer.solana.com/address/HPxKXnBN4vJ8RjpdqDCU7gvNQHeeyGnSviYTJ4fBrDt4/security?cluster=devnet) + +The `security.txt` project is maintained by +[Neodyme Labs](https://github.com/neodyme-labs) diff --git a/content/guides/dapps/cash-app.mdx b/content/guides/dapps/cash-app.mdx new file mode 100644 index 000000000..2abd7b142 --- /dev/null +++ b/content/guides/dapps/cash-app.mdx @@ -0,0 +1,2524 @@ +--- +date: March 18, 2024 +difficulty: Intermediate +title: "Cash App on Solana" +description: + "Solana developer quickstart guide to learn how to create a React Native + mobile app that is both Android and iOS compatible. This app will mimic a cash + app experience but run on the Solana blockchain, showcasing that web3 products + can have the same user experience as web2 products. To build this, we will + need to write an Anchor program, integrate the Solana Name Service SDK, and + integrate Solana pay." +tags: + - quickstart + - dApp + - mobile + - anchor + - rust + - react-native + - expo +keywords: + - solana dapp + - on-chain + - rust + - anchor program + - mobile dapp + - create dapp + - create solana dapp + - tutorial + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer +--- + +In this guide, you will learn how to create a react-native mobile app that is +both Android and iOS compatible. This app will mimic a Cash App experience but +run on the Solana blockchain, showcasing that web3 products can have the same +user experience as web2 products. To build this, we will need to write an Anchor +program, integrate the Solana Name Service SDK, and integrate Solana Pay. + +## What You Will Learn + +- Setting up your environment +- Creating a Solana mobile dApp +- Anchor program development +- Anchor PDAs and accounts +- Deploying a Solana program +- Testing a Solana program +- Connecting an onchain program to a mobile React Native UI +- Solana Pay +- Solana Name Service + +## What You Will Build + +You will learn to build a finance application similar to Cash App. This will be +a web3 mobile app with a wallet adaptor, devnet deployed anchor program, and +custom UI to interact with the anchor program. + +### Home Screen + +![Cash Balance](/assets/guides/cash-app/HomeScreen.png) +![Cash Out Modal](/assets/guides/cash-app/CashOutModal.png) + +### Payment Screen + +![Payment Screen](/assets/guides/cash-app/PaymentScreen.png) +![Request Screen](/assets/guides/cash-app/RequestScreen.png) + +### QR Screen + +![QR Screen](/assets/guides/cash-app/QRScreen.png) +![QR Modal](/assets/guides/cash-app/QRModal.png) + +### Activity Screen + +![Activity Screen](/assets/guides/cash-app/ActivityScreen.png) + +## Prerequisites + +Setup the following tools on your local development environment: + +- [Rust](https://www.rust-lang.org/tools/install) +- [Node.js](https://nodejs.org/en/download) +- [Solana CLI & Anchor](/docs/intro/installation) +- [Android Studio and emulator set up](https://docs.solanamobile.com/getting-started/development-setup) +- [React Native Setup](https://reactnative.dev/docs/environment-setup?platform=android) +- [EAS CLI and Account Setup](https://docs.expo.dev/build/setup/) + +For an introduction to Solana program development with the Anchor framework, +review this guide: + +- [Basic CRUD dApp on Solana](https://github.com/solana-foundation/developer-content/blob/main/content/guides/dapps/journal.md#writing-a-solana-program-with-anchor) + +For an introduction to Solana Mobile development, review the Solana Mobile docs: + +- [Solana Mobile Introduction](https://docs.solanamobile.com/getting-started/intro) + +## Project Design Overview + +Let's start by quickly mapping out the entire dApp design. To create a clone of +Cash App, we want to have the following features: + +1. Account creation +2. Deposit and withdraw funds +3. User-to-user money transfer +4. QR code generation +5. Connect with friends +6. Activity tracking +7. Send payment requests to friends + +To enable these functionalities, we will do the following: + +1. Write a Solana program that allows for users to initialize a new account + on-chain and set up a user name _(similar to $Cashtag)_ with + [Solana Name Service](https://sns.guide/). With the username being set via + SNS, you can then get public key information directly from an account's + username. +2. Add instructions to the Solana program for a user to be able to deposit funds + from their wallet into their cash account and withdrawal funds from their + cash account into their wallet. +3. Add instructions for a user to be able to directly send funds from their own + cash account to another cash account, request funds from a specified cash + account, and accept or decline payment requests. +4. Integrate [Solana Pay](https://docs.solanapay.com/) to enable QR code + generation. Solana pay also allows you to specify the amount and memo for the + requested transaction directly in the QR code. +5. Add an instruction for a user to be able to add friends by pushing the user + provided public key to a friends vector saved to the user's account state, + which can then be displayed on the front end similar to Cash App. +6. Add an activity tab that queries the cash account state of the connected user + to show pending requests and pending payments. +7. Add in an additional account type for payment requests and write instructions + for creating a request, accepting a request and processing the payment + transfer, and declining the request and closing the pending request account. + +## Solana Mobile App Template Set Up + +Since this project will be a mobile app, we can get started with the Solana +mobile expo app template: + +```shell +yarn create expo-app --template @solana-mobile/solana-mobile-expo-template +``` + +This is initializing a new project using the Expo framework that is specifically +designed for creating mobile applications that interact with the Solana +blockchain. + +Name the project `cash-app-clone` then navigate into the directory. + +Follow the +[Running the app](https://docs.solanamobile.com/react-native/expo#running-the-app) +guide to launch the template as a custom development build and get it running on +your Android emulator. Once you have built the program and are running a dev +client with expo, the emulator will automatically update every time you save +your code. + + + +You must have +[fake wallet](https://github.com/solana-mobile/mobile-wallet-adapter/tree/main/android/fakewallet) +running on the same Android emulator to be able to test out transactions, as +explained in the +[Solana mobile development set up docs](https://docs.solanamobile.com/getting-started/development-setup) +or you must have a real wallet app, like Phantom or Solflare, installed and set +up on your emulator. + + + +## Writing a Solana Program with Cash App Functionalities + +### Initialize the Anchor Workspace + +An Anchor workspace needs to be initialized to enable Solana program +development, deployment, and testing within this repository. + +```shell +cd cash-app-clone + +anchor init cash-app +``` + +Once the Anchor workspace has been initialized, navigate to to +`cash-app/programs/cash-app/src/lib.rs` to start writing the program code. + +Your Anchor program should already be defined by initializing the Anchor work +space and should look as follows: + +```rust +use anchor_lang::prelude::*; + +declare_id!("3dQeymKBEWf32Uzyzxm3Qyopt6uyHJdXxtvrpJdk7vCE"); + +#[program] +pub mod cash_app { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize {} +``` + +### Define Your Account State + +```rust +#[account] +#[derive(InitSpace)] +pub struct CashAccount { + pub owner: Pubkey, + #[max_len(100)] + pub friends: Vec, +} +``` + +Since we are able to directly query the SOL balance of PDA accounts, we don't +have to keep track of the user's account balance here. + +### Write Instructions + +Now that the state is defined, we need to create an instruction to initialize an +account when a new user signs up for Cash App. This will initialize a new +`cash_account` and the PDA of this account will be derived from the string +`"cash-account"` and the public key of the user's wallet. + +```rust +#[program] +pub mod cash_app { + use super::*; + + pub fn initialize_account(ctx: Context) -> Result<()> { + let cash_account = &mut ctx.accounts.cash_account; + cash_account.owner = *ctx.accounts.signer.key; + cash_account.friends = Vec::new(); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InitializeAccount<'info> { + #[account( + init, + seeds = [b"cash-account", signer.key().as_ref()], + bump, + payer = signer, + space = 8 + CashAccount::INIT_SPACE + )] + pub cash_account: Account<'info, CashAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +``` + +Because the `InitSpace` macro was used when defining the `CashAccount` state, it +can be called to calculate the space that this program will take up on chain. +The space is needed in order to calculate how much rent the payer will need to +pay to hold the program on chain. + +Next we will need to add an instruction to this program that allows a user to +deposit funds into their cash account: + +```rust +#[program] +pub mod cash_app { + use super::*; + + //... + + pub fn deposit_funds(ctx: Context, amount: u64) -> Result<()> { + require!(amount > 0, ErrorCode::InvalidAmount); + + let ix = system_instruction::transfer( + &ctx.accounts.signer.key(), + ctx.accounts.cash_account.to_account_info().key, + amount, + ); + + invoke( + &ix, + &[ + ctx.accounts.signer.clone(), + ctx.accounts.cash_account.to_account_info(), + ], + )?; + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct DepositFunds<'info> { + #[account( + mut, + seeds = [b"cash-account", signer.key().as_ref()], + bump, + )] + pub cash_account: Account<'info, CashAccount>, + #[account(mut)] + /// CHECK: This account is only used to transfer SOL, not for data storage. + pub signer: AccountInfo<'info>, + pub system_program: Program<'info, System>, +} + +#[error_code] +pub enum ErrorCode { + #[msg("The provided amount must be greater than zero.")] + InvalidAmount, +} + +``` + +The `deposit_funds` function constructs a system instruction to transfer SOL +from the user's wallet to the user's cash account PDA. Solana programs are +designed to be isolated for security reasons; they don't have direct access to +each other's state or functions. If one program needs to run an instruction +handler that is part of another program, it must do so through a cross-program +invocation (CPI). Since the funds are coming from the signer's wallet, which is +an account owned by the signer not the program, the function has to interact +with the System Program to modify the balance of the accounts. The transfer +instruction from the System Program is then executed using `invoke`, which +safely performs the CPI by taking in the transfer instruction and a slice of +accounts that the instruction will interact with. + +`invoke` ensures that all operations are performed securely and in compliance +with the rules set by the Solana network and the specific programs involved. It +verifies that: + +- Only authorized modifications to account data are performed. +- The necessary signatures for operations that require them are present. +- The operation does not violate the program's constraints or Solana's network + rules. + +Next, we need to add an instruction that allows a user to withdraw funds from +their cash account: + +```rust +#[program] +pub mod cash_app { + use super::*; + + //... + + pub fn withdraw_funds(ctx: Context, amount: u64) -> Result<()> { + require!(amount > 0, ErrorCode::InvalidAmount); + + let cash_account = &mut ctx.accounts.cash_account.to_account_info(); + let wallet = &mut ctx.accounts.signer.to_account_info(); + + require!(*cash_account.owner == ctx.accounts.signer.key(), ErrorCode::InvalidSigner); + + **cash_account.try_borrow_mut_lamports()? -= amount; + **wallet.try_borrow_mut_lamports()? += amount; + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct WithdrawFunds<'info> { + #[account( + mut, + seeds = [b"cash-account", signer.key().as_ref()], + bump, + )] + pub cash_account: Account<'info, CashAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[error_code] +pub enum ErrorCode { + #[msg("The provided amount must be greater than zero.")] + InvalidAmount, + + #[msg("Signer does not have access to call this instruction.")] + InvalidSigner, +} +``` + +Unlike the `deposit_funds` instruction, the `withdraw_funds` instruction +directly adjusts the [Lamports](/docs/terminology#lamport) +`cash_account` and the user's wallet by using `try_borrow_mut_lamports()`. This +transfer of funds can be done without the overhead of a CPI because the +`cash_account` is owned by the same program executing the function. This is more +efficient but requires careful handling to ensure security. + +When Solana Program transfers lamports from an account that it owns, the sender +account must be owned by the program but the recipient account does not have to +be owned by the program. Since lamports can not be created or destroyed when +changing account balances, any decrement performed needs to be balanced with an +equal increment somewhere else, otherwise you will get an error. In the above +`withdraw_funds` instruction, the program is transferring the exact same amount +of lamports from the cash account into the users wallet. + +Since we are directly manipulating the lamports in an account, we want to ensure +that the signer of the instruction is the same as the owner of the account so +that only the owner can call this instruction. This is why the following +validation check was implemented: +`require!(cash_account.owner == ctx.accounts.signer, ErrorCode::InvalidSigner)`. + +For error handling. the `#[error_code]` Anchor macro is used, which generates +`Error` and `type Result = Result ` types to be used as return +types from Anchor instruction handlers. Importantly, the attribute implements +`From` on the `ErrorCode` to support converting from the user defined error enum +into the generated `Error`. + +Now let's create an instruction for transferring funds from one user to another. + +```rust +#[program] +pub mod cash_app { + use super::*; + + //... + + pub fn transfer_funds( + ctx: Context, + _recipient: Pubkey, + amount: u64, + ) -> Result<()> { + require!(amount > 0, ErrorCode::InvalidAmount); + + let from_cash_account = &mut ctx.accounts.from_cash_account.to_account_info(); + let to_cash_account = &mut ctx.accounts.to_cash_account.to_account_info(); + + require!(*cash_account.owner == ctx.accounts.signer.key(), ErrorCode::InvalidSigner); + + **from_cash_account.try_borrow_mut_lamports()? -= amount; + **to_cash_account.try_borrow_mut_lamports()? += amount; + + Ok(()) + } +} + +#[derive(Accounts)] +#[instruction(recipient: Pubkey)] +pub struct TransferFunds<'info> { + #[account( + mut, + seeds = [b"cash-account", signer.key().as_ref()], + bump, + )] + pub from_cash_account: Account<'info, CashAccount>, + + #[account( + mut, + seeds = [b"cash-account", recipient.key().as_ref()], + bump, + )] + pub to_cash_account: Account<'info, CashAccount>, + pub system_program: Program<'info, System>, + pub signer: Signer<'info>, +} +``` + +In the above instruction, the `TransferFunds` Context data structure consists of +an additional account. The `Context` is a struct that includes references to all +the accounts needed for the operation. Since we need information from both the +sender and recipient accounts for this instruction, we need to include both +accounts in the `Context`. + +We are once again directly transferring lamports between accounts, since the +program owns the `cash_account` account. The seeds for the cash account PDAs are +created from the public key of the cash account owner so the instruction needs +to take the recipient's public key as a parameter and pass that to the +`TransferFunds` Context data structure. Then the `cash_account` PDA can be +derived for both the `from_cash_account` and the `to_cash_account`. + +Since both of the accounts are listed in the `#[derive(Accounts)]` macro, they +are deserialized and validated so you can simply call both of the accounts with +the Context `ctx` to get the account info and update the account balances from +there. + +To be able to send funds to another user, similar to Cash App, both users must +have created an account. We're sending funds to the user's `cash_account` PDA, +not the user's wallet. So each user needs to initialize a cash account by +calling the `initialize_account` instruction to create their unique PDA derived +from their wallet public key. We'll need to keep this in mind when designing the +UI/UX of the onboarding process for this dApp later on to ensure every user +calls the `initialize_account` instruction when signing up for an account. + +Now that the basic payment functionality is enabled, we want to be able to +interact with friends. So we need to add instructions for adding friends, +requesting payments from friends, and accepting/rejecting payment requests. + +Adding a friend is as simple as just pushing a new public key to the `friends` +vector in the `CashAccount` state. + +```rust +#[program] +pub mod cash_app { + use super::*; + + //... + pub fn add_friend(ctx: Context, pubkey: Pubkey) -> Result<()> { + let cash_account = &mut ctx.accounts.cash_account; + cash_account.friends.push(pubkey); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct AddFriend<'info> { + #[account( + mut, + seeds = [b"cash-account", signer.key().as_ref()], + bump, + )] + pub cash_account: Account<'info, CashAccount>, + #[account(mut)] + /// CHECK: This account is only used to transfer SOL, not for data storage. + pub signer: AccountInfo<'info>, + pub system_program: Program<'info, System>, +} +``` + +In the `add_friend` function, there is a design limitation. The `vec` of friends +has a limit to how many friends a user to can add. To enhance this program to +allow limitless friends, the way that friends are stored must be changed. + +### Multiple Accounts Types in One Program + +There are several different ways to approach requesting payments from friends. +In this example, we will make each payment request its own PDA account in order +to simplify querying active requests, deleting completed requests, and updating +both the sender and recipient cash accounts. + +Each time a new payment request is created, the instruction will create a new +PDA account that holds data for the payment's sender, recipient, and amount. + +To have multiple account types within one program, you just need to define the +data structure for each account type and have instructions to be able to +initialize each account type. We already have the state data structure and init +account instruction for the cash account, now we'll just add this for the +pending request account. + +```rust +#[account] +#[derive(InitSpace)] +pub struct PendingRequest { + pub sender: Pubkey, + pub recipient: Pubkey, + pub amount: u64, +} + +#[derive(Accounts)] +pub struct InitializeRequest<'info> { + #[account( + init, + seeds = [b"pending-request", signer.key().as_ref()], + bump, + payer = signer, + space = 8 + PendingRequest::INIT_SPACE + )] + pub pending_request: Account<'info, PendingRequest>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[program] +pub mod cash_app { + use super::*; + + //... + + pub fn new_request(ctx: Context, sender: Pubkey, amount: u64) -> Result<()> { + let pending_request = &mut ctx.accounts.pending_request; + pending_request.recipient = *ctx.accounts.signer.key; + pending_request.sender = sender; + pending_request.amount = amount; + Ok(()) + } +} +``` + +Now that we are able to send payment requests, we need to be able to accept or +decline those payments. So let's add in those instructions now. + +```rust +#[program] +pub mod cash_app { + use super::*; + + //... + + pub fn decline_request(_ctx: Context) -> Result<()> { + Ok(()) + } +} + +#[derive(Accounts)] +pub struct DeclineRequest<'info> { + #[account( + mut, + seeds = [b"pending-request", signer.key().as_ref()], + bump, + close = signer, + )] + pub pending_request: Account<'info, PendingRequest>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +To decline a request, the `pending_request` account needs to be closed. By +specifying the `close` constraint in the account macro for the `DeclineRequest` +data structure, the account simply closes when the correct signer signs the +`decline_request` instruction. + +For `accept_request`, we also want the account to close upon completion of the +instruction but the requested funds need to be transferred to the correct +recipient first. + +```rust +#[program] +pub mod cash_app { + use super::*; + + //... + + pub fn accept_request(ctx: Context) -> Result<()> { + let amount = ctx.accounts.pending_request.amount; + + let from_cash_account = &mut ctx.accounts.from_cash_account.to_account_info(); + let to_cash_account = &mut ctx.accounts.to_cash_account.to_account_info(); + + **from_cash_account.try_borrow_mut_lamports()? -= amount; + **to_cash_account.try_borrow_mut_lamports()? += amount; + + Ok(()) + } +} + +#[derive(Accounts)] +pub struct AcceptRequest<'info> { + #[account( + mut, + seeds = [b"pending-request", signer.key().as_ref()], + bump, + close = signer, + )] + pub pending_request: Account<'info, PendingRequest>, + #[account( + mut, + seeds = [b"cash-account", pending_request.sender.key().as_ref()], + bump, + )] + pub from_cash_account: Account<'info, CashAccount>, + #[account( + mut, + seeds = [b"cash-account", pending_request.recipient.key().as_ref()], + bump, + )] + pub to_cash_account: Account<'info, CashAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +The `AcceptRequest` struct has references to three accounts since we need all +three to complete the request. The `recipient` and `sender` public keys are +pulled from the `pending_request` account state and used to derive the two +`cash_account` accounts needed for this transaction. + +We're now able to deposit funds, withdraw funds, send funds to another user, +request funds from another user, add friends, and accept/decline requests, which +covers all of the functionality in Cash App. We'll just add one optimization to +this program before testing. + +### Integrating a Counter for Unique PDAs + +Since a user can have multiple pending requests, we want each request to have a +unique PDA. So we can update the above code to include a counter in the PDA. The +counter will need to be tracked in the user's cash account state, so now +`ProcessRequest` needs to take in the cash account along with the pending +request account. So lets first update both account data structures. + +```rust +#[account] +#[derive(InitSpace)] +pub struct CashAccount { + pub signer: Pubkey, + pub friends: Vec, + pub request_counter: u64, +} + +#[account] +#[derive(InitSpace)] +pub struct PendingRequest { + pub sender: Pubkey, + pub recipient: Pubkey, + pub amount: u64, + pub id: u64, +} +``` + +Now we need to update the `InitializeRequest`, `DeclineRequest`, and +`AcceptRequest` structs to include the requester's cash account so that the +counter can be queried and incremented and the `pending_request` account can use +the value of the counter in its PDA generation. + +```rust +#[derive(Accounts)] +pub struct InitializeRequest<'info> { + #[account( + init, + seeds = [b"pending-request", signer.key().as_ref(), cash_account.pending_request_counter.to_le_bytes().as_ref()], + bump, + payer = signer, + space = 8 + PendingRequest::INIT_SPACE + )] + pub pending_request: Account<'info, PendingRequest>, + #[account( + mut, + seeds = [b"cash-account", signer.key().as_ref()], + bump, + close = signer, + )] + pub cash_account: Account<'info, CashAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[derive(Accounts)] +pub struct DeclineRequest<'info> { + #[account( + mut, + seeds = [b"pending-request", signer.key().as_ref(), pending_request.id.to_le_bytes().as_ref()], + bump, + close = signer, + )] + pub pending_request: Account<'info, PendingRequest>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[derive(Accounts)] +pub struct AcceptRequest<'info> { + #[account( + mut, + seeds = [b"pending-request", signer.key().as_ref(), pending_request.id.to_le_bytes().as_ref()], + bump, + close = signer, + )] + pub pending_request: Account<'info, PendingRequest>, + #[account( + mut, + seeds = [b"cash-account", pending_request.sender.key().as_ref()], + bump, + )] + pub from_cash_account: Account<'info, CashAccount>, + #[account( + mut, + seeds = [b"cash-account", pending_request.recipient.key().as_ref()], + bump, + )] + pub to_cash_account: Account<'info, CashAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +Lastly, we need to update the initialization of each account. The +`pending_request_counter` should start at 0 and increment with each new request +sent from that specific cash account. + +```rust +#[program] +pub mod cash_app { + use super::*; + + //... + + pub fn initialize_account(ctx: Context) -> Result<()> { + let cash_account = &mut ctx.accounts.cash_account; + cash_account.signer = *ctx.accounts.signer.key; + cash_account.friends = Vec::new(); + cash_account.request_counter = 0; + Ok(()) + } + + pub fn new_request(ctx: Context, recipient: Pubkey, amount: u64) -> Result<()> { + let cash_account = &mut ctx.accounts.cash_account; + let pending_request = &mut ctx.accounts.pending_request; + pending_request.sender = *ctx.accounts.signer.key; + pending_request.recipient = recipient; + pending_request.amount = amount; + pending_request.id = cash_account.request_counter; + cash_account.request_counter += 1; + Ok(()) + } +} +``` + +Now your Solana program should match the final version +[here](https://github.com/solana-developers/cash-app-clone/blob/main/cash-app/anchor/cash-app/src/lib.rs): + +### Build and Deploy the Program + +First, we need to deploy the Anchor program. For testing purposes, you can +either deploy to your localnet or to devnet. + +- `Devnet` is a public test network provided by Solana that more closely + resembles mainnet. It operates with a broader set of validators and easily + enables testing CPIs, oracles, and wallet services. +- `Localnet` is a private instance of the Solana blockchain running locally on + your machine. It enables more control of the environment but doesn't + completely mimic real-world conditions of the blockchain. + +In the next section of this guide, you'll need the program deployed to localnet +to run the Anchor test suite, so deploy to localnet now. + +Navigate to `cash-app-clone/cash-app` in your terminal. + +```shell +solana-test-validator +``` + +This runs a local test validator to simulate the Solana blockchain environment +on your own machine. Note: You cannot deploy to localnet unless your test +validator is running. + +```shell +anchor build +``` + +This builds your program's workspace. It targets Solana's BPF runtime and emits +each program's IDL in the `target/idl` folder and the corresponding typescript +types in the `target/types` folder. If your program is doesn't build, then there +is an error in your code that needs to be addressed. + +```shell +anchor deploy --provider.cluster localnet +``` + +This command deploys your program to the specified cluster and generates a +program ID public key. If you choose to deploy to localnet, you must be running +`solana-test-validator` to be able to deploy. + +```shell +anchor keys sync +``` + +This syncs the program's `declare_id!` pubkey with the program's actual pubkey. +It specifically updates the `lib.rs` and `Anchor.toml` files. + +### Testing an Anchor Program + +Testing Solana Anchor programs involves simulating the behavior of the Solana +program and ensuring it operates as expected. For the below test, we'll cover +the following: + +- Creates Accounts for User A and User B +- Deposits funds into User A's account +- Withdraws funds from User A's account +- Transfers funds from User A's account to User B's account +- User A adds User B as a friend +- User A requests funds from User B +- User B accepts the request +- User A requests funds again from User B +- User B declines the request + +When initializing an Anchor workspace, a file for TypeScript tests is generated. +Navigate to `cash-app-clone/cash-app/tests/cash-app.ts` to find the testing +template, which will already have the required modules imported. + +First we need to set up our environment to interact with the Solana blockchain. + +```tsx +describe("cash-app", () => { + const provider = anchor.AnchorProvider.env(); + const program = anchor.workspace.CashApp as Program; +}); +``` + +`provider` enables you to facilitate interactions between your application +(client-side) and the Solana blockchain, which includes a wallet that holds the +keypair used to sign transactions. + +`program` now represents your Anchor program and can be used to call functions +defined in your on-chain program, pass in required accounts, and handle the +program's data. It simplifies interacting with the Solana blockchain by +abstracting many of the lower-level details. + +Next, we need to define the wallet accounts that will be interacting with the +Solana program as well as their `cash_account` PDAs. `myWallet` is the +provider's wallet, meaning that it is already integrated with the +`AnchorProvider` and is configured when the `provider` is initialized. Since +`yourWallet` is a new wallet being generated, it will need to be funded with SOL +by requesting an airdrop. + +```tsx +it("A to B user flow", async () => { + const myWallet = provider.wallet as anchor.Wallet; + const yourWallet = new anchor.web3.Keypair(); + + const [myAccount] = await anchor.web3.PublicKey.findProgramAddress( + [Buffer.from("cash-account"), myWallet.publicKey.toBuffer()], + program.programId, + ); + + const [yourAccount] = await anchor.web3.PublicKey.findProgramAddress( + [Buffer.from("cash-account"), yourWallet.publicKey.toBuffer()], + program.programId, + ); + + console.log("requesting airdrop"); + const airdropTx = await provider.connection.requestAirdrop( + yourWallet.publicKey, + 5 * anchor.web3.LAMPORTS_PER_SOL, + ); + await provider.connection.confirmTransaction(airdropTx); + + let yourBalance = await program.provider.connection.getBalance( + yourWallet.publicKey, + ); + console.log("Your wallet balance:", yourBalance); +}); +``` + +Now we can interact with the Solana program. First we need to initialize each +user's `cash_account`. + +```tsx +const initMe = await program.methods + .initializeAccount() + .accounts({ + cashAccount: myAccount, + signer: myWallet.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); +console.log(`Use 'solana confirm -v ${initMe}' to see the logs`); + +await anchor.getProvider().connection.confirmTransaction(initMe); + +const initYou = await program.methods + .initializeAccount() + .accounts({ + cashAccount: yourAccount, + signer: yourWallet.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([yourWallet]) + .rpc(); +console.log(`Initialized your account : ${initYou}' `); + +await anchor.getProvider().connection.confirmTransaction(initYou); +``` + +By calling the program namespace `program.methods`, you're able to interact with +the instructions handlers of that program. When a transaction is sent using the +`provider` _(or methods derived from it, such as `program.rpc()`)_, the signing +by `myWallet` is implicitly handled. The `provider` automatically includes the +wallet configured with it _(myWallet in this case)_ as a signer for any +transactions it sends. This means you do not need to manually specify `myWallet` +in the `.signers()` array when constructing a transaction, because it's +inherently assumed to be a signer through the provider's configuration. However, +`yourWallet` is a new keypair which is not automatically associated with the +`provider`, so must explicitly tell Anchor to use yourWallet for signing any +transaction where it's required. + +Since any other instruction call is handled exactly as described above, you can +complete this test example independently. + +Lastly, run your test suite against your localnet. + +```shell +anchor test +``` + +## Connecting a Solana Program to a React-Native Expo App + +Now that we have a working Solana program, we need to integrate this with the UI +of the dApp. + +### Android Emulator + +Lets get the Android emulator running so we can see in real time the UI updates +that we will make throughout this guide. + +You must have an EAS account and be logged into your account in the EAS CLI, to +set this up follow [the expo documentation](https://docs.expo.dev/build/setup/). + +Navigate to the `cash-app-clone` directory in your terminal and run: + +```shell +eas build --profile development --platform android +``` + +Then in a new terminal window run: + +```shell +npx expo start --dev-client +``` + +Install the build on your Android emulator and keep it running in a separate +window. Every time you save a file, the emulator will refresh. + +### Initial Program Connection + +We can create a custom hook that accepts the public key of the user as a +parameter that is designed to interact with our deployed Solana program. By +providing the program ID, the rpc endpoint that the program was deployed to, the +IDL of the program, and the PDA of a specified user, we can create the logic +required to manage interactions with the Solana program. Create a new file under +`utils/useCashAppProgram.tsx`, to implement this function. + +Since we want this app to be publicly available, deploy your program to devnet +and use that public key instead of `11111111111111111111111111111111`. + +```tsx +export function UseCashAppProgramAccount(user: PublicKey) { + const cashAppProgramId = new PublicKey("11111111111111111111111111111111"); + + const [connection] = useState( + () => new Connection("https://api.devnet.solana.com"), + ); + + const [cashAppPDA] = useMemo(() => { + const accountSeed = [Buffer.from("cash_account"), user.toBuffer()]; + return PublicKey.findProgramAddressSync(accountSeed, cashAppProgramId); + }, [cashAppProgramId]); + + const cashAppProgram = useMemo(() => { + return new Program( + idl as CashAppProgram, + cashAppProgramId, + { connection }, + ); + }, [cashAppProgramId]); + + const value = useMemo( + () => ({ + cashAppProgram: cashAppProgram, + cashAppProgramId: cashAppProgramId, + cashAppPDA: cashAppPDA, + }), + [cashAppProgram, cashAppProgramId, cashAppPDA], + ); + + return value; +} +``` + +Since there is only one `cash_account` account per public key, it is easy to +calculate the `cashAccountPDA` by taking in the user's public key as a parameter +and using that to calculate what the public key of the Cash App PDA for each +individual user is. + +Since the IDL is generated as a JSON file when building the program, we can just +import it to this file. + +This function returns: + +- `cashAppPDA` - The connect user's Program Derived Address (PDA) for their cash + account +- `cashAppProgramID` - The public key of the deployed Solana program on devnet +- `cashAppProgram` - The Cash App program which provides the IDL deserialized + client representation of an Anchor program. + +The `Program` class provides the IDL deserialized client representation of an +Anchor program. This API is a one stop shop for all things related to +communicating with on-chain programs. It enables sending transactions, +deserializing accounts, decoding instruction data, listening to events, etc. + +The `cashAppProgram` object, created from the `Program` class, provides a set of +dynamically generated properties, known as `namespaces`. `Namespaces` map +one-to-one to program methods and accounts, which we will be using a lot later +in this project. The `namespace` is generally used as follows: +`program..` + +To get information for specific `pending_request` accounts associated with a +specific public key, we'll need to take in the pending request ID as a +parameter. + +```tsx +export function UsePendingRequestAccount( + user: PublicKey, + count: number, + cashAppProgramId: PublicKey, +) { + const [connection] = useState( + () => new Connection("https://api.devnet.solana.com"), + ); + + bigNumber = new BN(count); + const [pendingRequestPDA] = useMemo(() => { + const pendingRequestSeed = [ + Buffer.from("cash_account"), + user.toBuffer(), + bigNumber.toBuffer(), + ]; + return PublicKey.findProgramAddressSync( + pendingRequestSeed, + cashAppProgramId, + ); + }, [cashAppProgramId]); + + const value = useMemo( + () => ({ + pendingRequestPDA: pendingRequestPDA, + }), + [pendingRequestPDA], + ); + + return value; +} +``` + +### Styling and Themes + +React Native uses a styling system that is based on the standard CSS properties, +but it's specifically tailored for mobile development. Styles are defined using +JavaScript objects, which enables dynamic generation of styles by leveraging +JavaScript's capabilities. To achieve a design that mimics the look and feel of +Cash App, we'll create a StyleSheet Object that will be use throughout this +dApp. This style sheet will feature a monochrome grayscale color palette, bold +text, and rounded shapes. + +```jsx +import { StyleSheet, Dimensions } from "react-native"; + +const { width } = Dimensions.get("window"); + +const styles = StyleSheet.create({ + container: { + flex: 1, + backgroundColor: "#141414", + alignItems: "center", + justifyContent: "center", + }, + header: { + backgroundColor: "#1b1b1b", + width: "100%", + padding: 20, + alignItems: "center", + }, + headerText: { + color: "#fff", + fontSize: 24, + fontWeight: "bold", + }, + button: { + width: 80, + height: 80, + justifyContent: "center", + alignItems: "center", + backgroundColor: "#333", + borderRadius: 40, + }, + buttonGroup: { + flexDirection: "column", + paddingVertical: 4, + }, + buttonText: { + color: "#fff", + fontSize: 18, + fontWeight: "600", + textAlign: "center", + }, + cardContainer: { + width: width - 40, + backgroundColor: "#222", + borderRadius: 20, + padding: 20, + marginVertical: 10, + shadowColor: "#000", + shadowOffset: { width: 0, height: 1 }, + shadowOpacity: 0.22, + shadowRadius: 2.22, + elevation: 3, + }, + modalView: { + backgroundColor: "#444", + padding: 35, + alignItems: "center", + borderTopLeftRadius: 50, + borderTopRightRadius: 50, + shadowColor: "#000", + shadowOffset: { + width: 0, + height: -2, + }, + shadowOpacity: 0.8, + shadowRadius: 4, + elevation: 5, + width: "100%", + height: "40%", + }, + cardTitle: { + fontSize: 20, + fontWeight: "bold", + marginBottom: 5, + }, + cardContent: { + fontSize: 16, + color: "#666", + }, +}); + +export default styles; +``` + +Along with setting the `StyleSheet`, we also need to update the theme. A theme +creates a more uniform look and feel throughout the entire application. Navigate +to `App.tsx`, and update the code to only use `DarkTheme`. + +### Navigation Bar and Pages Set up + +To follow the UI/UX of Cash App, we'll need the following screens: Home, Pay, +Scan, and Activity. + +Navigate to `HomeNavigator.tsx` and update the `` to include the +following screens: + +```tsx + + ({ + header: () => , + tabBarIcon: ({ focused, color, size }) => { + switch (route.name) { + case "Home": + return ( + + ); + case "Pay": + return ( + + ); + case "Scan": + return ( + + ); + case "Activity": + return ( + + ); + } + }, + })} + > + + + + + + +``` + +In addition to this, you'll need to create new files for each of these screens. +Navigate to `src/screens` and create a file for `PayScreen.tsx`, +`ScanScreen.tsx`, and `ActivityScreen.tsx`. + +Each file needs to have a function correlating to the screen name that follows +the same format as the HomeScreen in this template. + +```tsx +export function HomeScreen() { + return ; +} +``` + +### Creating Components + +Throughout this guide, we'll be using a modular approach to building features, +so we can focus on one component at a time. + +#### Account Balance Component + +Let's start with the home screen. To mimic Cash App, all we need is a container +that displays your account balance, a button to deposit funds into your account, +and a button to withdraw funds from your account. + +In the expo template we are using, there is already similar functionality. +However, this code is for your connected wallet balance rather than the cash +account's balance. So we need to connect this feature to our deployed Solana +program and query the balance of the user's `cash_account` instead. + +First simplify the home screen to just: + +```tsx +export function HomeScreen() { + const { selectedAccount } = useAuthorization(); + + return ( + + {selectedAccount ? ( + <> + + + ) : ( + <> + Solana Cash App + + {" "} + Sign in with Solana (SIWS) to link your wallet. + + + + )} + + ); +} +``` + +Then click into `AccountDetailFeature` and update the styling to use +`cardContainer`, add in a "Cash Balance" label for the card container, and +delete the `AccountTokens` component, as shown below: + +```tsx +export function AccountDetailFeature() { + const { selectedAccount } = useAuthorization(); + + if (!selectedAccount) { + return null; + } + const theme = useTheme(); + + return ( + <> + + + Cash Balance + + + + + + + + ); +} +``` + +NOTE: The `StyleSheet` that we created earlier should be imported to every page. + +Now click into the `AccountBalance` function. To update this query, we simply +need to change the public key that is being passed through the `useGetBalance` +function. We can grab the `cashAppPDA` from the `UseCashAppProgram` function we +created earlier. + +```tsx +export function AccountBalance({ address }: { address: PublicKey }) { + const { cashAppPDA } = UseCashAppProgram(address); + + const query = useGetBalance(cashAppPDA); + const theme = { + ...MD3DarkTheme, + ...DarkTheme, + colors: { + ...MD3DarkTheme.colors, + ...DarkTheme.colors, + }, + }; + + return ( + <> + + + ${query.data ? lamportsToSol(query.data) : "0.00"} + + + + ); +} +``` + +#### Deposit and Withdraw Components + +Next, we need to update the buttons to deposit and withdraw funds. Go to the +`AccountButtonGroup` function. + +To be able to call and execute an instruction from the deployed Solana program, +we can use the program namespaces which map one-to-one to program methods and +accounts. + +```tsx +const [connection] = useState( + () => new Connection("https://api.devnet.solana.com"), +); + +const depositFunds = useCallback( + async (program: Program) => { + let signedTransactions = await transact( + async (wallet: Web3MobileWallet) => { + const [authorizationResult, latestBlockhash] = await Promise.all([ + authorizeSession(wallet), + connection.getLatestBlockhash(), + ]); + + const depositInstruction = await program.methods + .depositFunds(pubkey, newDepositAmount) + .accounts({ + user: authorizationResult.publicKey, + fromCashAccount: cashAppPDA, + }) + .instruction(); + + const depositTransaction = new Transaction({ + ...latestBlockhash, + feePayer: authorizationResult.publicKey, + }).add(depositInstruction); + + const signedTransactions = await wallet.signTransactions({ + transactions: [depositTransaction], + }); + + return signedTransactions[0]; + }, + ); + + let txSignature = await connection.sendRawTransaction( + signedTransactions.serialize(), + { + skipPreflight: true, + }, + ); + + const confirmationResult = await connection.confirmTransaction( + txSignature, + "confirmed", + ); + + if (confirmationResult.value.err) { + throw new Error(JSON.stringify(confirmationResult.value.err)); + } else { + console.log("Transaction successfully submitted!"); + } + }, + [authorizeSession, connection, cashAppPDA], +); +``` + +This function uses React's `useCallback` hook to create a memoized callback +function that handles the process of depositing funds within the connected +Solana program. It accepts a `Program` parameter which is an Anchor program +interface for the `CashApp` dApp. + +Since the `namespace` is generally used as follows: +`program..`, in the above code, we are +creating an `instruction` to `depositFunds` with the specified `accounts`. + +Then this instruction can be added to a `Transaction` and signed with the +connected wallet. + +Lastly, the signed transaction is then sent by using the `sendRawTransaction` +method from `connection` object. + +The `connection` object is an instance of the `Connection` class from the +`solanaweb3.js` library, which is a connection to a fullnode JSON RPC endpoint. + +Now that we have the function for `depositFunds`, you'll need to do follow the +same formate to create a `withdrawFunds` function using the program namespace +for the withdrawFunds instruction. + +```tsx +const withdrawInstruction = await program.methods + .withdrawFunds(pubkey, newDepositAmount) + .accounts({ + user: authorizationResult.publicKey, + fromCashAccount: cashAppPDA, + }) + .instruction(); +``` + +**Additional documentation:** + +- [Transactions and Instructions](/docs/core/transactions) +- [Connection Class](https://solana-labs.github.io/solana-web3.js/v1.x/classes/Connection.html) +- Library for + [wallets](https://github.com/solana-mobile/mobile-wallet-adapter/tree/main/android/walletlib) + to provide the Mobile Wallet Adapter transaction signing services to dapps + +Npm packages to be installed and imported: + +- @solana-mobile/mobile-wallet-adapter-protocol-web3js +- @coral-xyz/anchor +- @solana/web3.js + +Now we can connect these functions to buttons on the UI. + +We'll follow a very similar structure to the current `AccountButtonGroup` +function, but we need different functionality. So delete everything within the +function. + +Since Cash App also uses modals when clicking on the "Add Cash" and "Cash Out" +buttons, we'll have a withdraw and deposit modal. We'll also need to take in a +user input value for the amount to be deposited or withdrawn. Lastly, we'll need +to call the `depositFunds` and `withdrawFunds` functions we just created. + +```tsx +export function AccountButtonGroup({ address }: { address: PublicKey }) { + const [showWithdrawModal, setShowWithdrawModal] = useState(false); + const [showDepositModal, setShowDepositModal] = useState(false); + const [genInProgress, setGenInProgress] = useState(false); + const [depositAmount, setDepositAmount] = useState(new anchor.BN(0)); + const newDepositAmount = new anchor.BN(depositAmount * 1000000000); + const [withdrawAmount, setWithdrawAmount] = useState(new anchor.BN(0)); + const newWithdrawAmount = new anchor.BN(withdrawAmount * 1000000000); + const { authorizeSession, selectedAccount } = useAuthorization(); + const { cashAppProgram } = UseCashAppProgram(address); + + const [connection] = useState( + () => new Connection("https://api.devnet.solana.com"), + ); + + const DepositModal = () => ( + { + setShowDepositModal(!showDepositModal); + }} + > + + + Add Cash + + + setDepositModalVisible(false)} + > + + + + + + ); + + const WithdrawModal = () => ( + { + setShowWithdrawModal(!showWithdrawModal); + }} + > + + + Cash Out + + + setShowWithdrawModal(false)} + > + + + + + + ); + return ( + <> + + + + + + ); +} +``` + +That wraps up all the functionality we need on the home screen for a Cash App +clone. Now we can move onto the pay screen, which involves transferring funds +from one user to another. + +#### Pay Component + +For the pay page, we'll need to call the `transferFunds` function from the cash +app Solana program. To do this, we'll be using the same process that was +described for `depositFunds`. However, the `TransferFunds` struct described in +the CashApp Solana Program requires 2 `cash_account` accounts rather than the +one account that is required for `depositFunds`. So what needs to change is +simply to add calculations of the PDAs of both the sender account and the +recipient's account, as shown below: + +```tsx +const [recipientPDA] = useMemo(() => { + const recipientSeed = [Buffer.from("cash-account"), recipient.toBuffer()]; + return PublicKey.findProgramAddressSync([recipientSeed], cashAppProgramId); +}, [cashAppProgramId]); + +const transferInstruction = await program.methods + .transferFunds(pubkey, newTransferAmount) + .accounts({ + user: authorizationResult.publicKey, + fromCashAccount: cashAppPDA, + toCashAccount: recipientPDA, + }) + .instruction(); +``` + +In order to calculate the recipient's PDA, the public key of the recipient must +be passed through as a parameter of the `transferFunds` function, along with the +amount to transfer and the public key of the signer. + +#### Request Component + +For the request page, we'll need to call the `newRequest` function from the cash +app Solana program. This function also requires multiple accounts. Here you'll +need the `pending_request` account and the `cash_account` of the signer. + +```tsx +const [pendingRequestPDA] = useMemo(() => { + const pendingRequestSeed = [ + Buffer.from("pending-request"), + requester.toBuffer(), + ]; + return PublicKey.findProgramAddressSync( + [pendingRequestSeed], + cashAppProgramId, + ); +}, [cashAppProgramId]); + +const requestInstruction = await program.methods + .newPendingRequest(pubkey, requestAmount) + .accounts({ + user: authorizationResult.publicKey, + pendingRequest: pendingRequestPDA, + cashAccount: cashAppPDA, + }) + .instruction(); +``` + +#### Accept and Decline Request Components + +A user will interact with their pending payment requests on the activity page. + +```tsx +const acceptInstruction = await program.methods + .acceptRequest() + .accounts({ + user: authorizationResult.publicKey, + pendingRequest: pendingRequestPDA, + toCashAccount: recipientPDA, + fromCashAccount: cashAppPDA, + }) + .instruction(); + +const declineInstruction = await program.methods + .declineRequest() + .accounts({ + user: authorizationResult.publicKey, + pendingRequest: pendingRequestPDA, + }) + .instruction(); +``` + +### Creating Screens + +#### Payment Screen + +In Cash App, the payment screen is simply a key pad with `request` and `pay` +buttons that take the user input value and redirects you to another screen. + +So the pay screen is mainly some UI work. We need to be able to type in a +numerical value via a keyboard, handle the input value, select currency via a +small modal, and navigate to the request and pay pages via buttons. Here is the +code below: + +```tsx +type HomeScreenNavigationProp = NativeStackNavigationProp< + RootStackParamList, + "Home" +>; + +type Props = { + navigation: HomeScreenNavigationProp; +}; + +const App: React.FC = ({ navigation }) => { + const [inputValue, setInputValue] = useState(""); + const [modalVisible, setModalVisible] = useState(false); + + const handleInput = (value: string) => { + setInputValue(inputValue + value); + }; + + const handleBackspace = () => { + setInputValue(inputValue.slice(0, -1)); + }; + + type NumberButtonProps = { + number: string; + }; + + const NumberButton: React.FC = ({ number }) => ( + handleInput(number)}> + {number} + + ); + + const CurrencySelectorModal = () => ( + { + setModalVisible(!modalVisible); + }} + > + + + Select Currency + + setModalVisible(false)} + > + + {" "} + + US Dollars + + + setModalVisible(false)} + > + + {" "} + + Bitcoin + + + + setModalVisible(false)} + > + Close + + + + + ); + + return ( + + + + ${inputValue || "0"} + setModalVisible(true)} + > + + USD{" "} + + + + + + + {[1, 2, 3].map(number => ( + + ))} + + + {[4, 5, 6].map(number => ( + + ))} + + + {[7, 8, 9].map(number => ( + + ))} + + + + + + + + + + + + + + + ); +}; +``` + +In the above code, the `Request` and `Pay` buttons redirect you to new pages to +complete your transaction, similar to the Cash App UX. + +#### Request and Pay Screens + +The `Request` and `Pay` Screens need to take in your input value from the +previous Payment screen and use it to execute the `transferFunds` and +`newPaymentRequest` instructions. + +```tsx +const PayScreen: React.FC = ({ route, navigation }) => { + const [reason, setReason] = useState(""); + const { inputValue } = route.params; + const [genInProgress, setGenInProgress] = useState(false); + const [userName, setUserName] = useState(""); + const newAmount = new anchor.BN(inputValue); + + const [connection] = useState( + () => new Connection("https://api.devnet.solana.com"), + ); + const { authorizeSession, selectedAccount } = useAuthorization(); + const user = selectedAccount.publicKey; + const { cashAppProgram, cashAppPDA } = UseCashAppProgram(user); + + const transferFunds = useCallback( + async (program: Program) => { + let signedTransactions = await transact( + async (wallet: Web3MobileWallet) => { + const [authorizationResult, latestBlockhash] = await Promise.all([ + authorizeSession(wallet), + connection.getLatestBlockhash(), + ]); + + const { pubkey } = getDomainKeySync(userName); + console.log(pubkey); + console.log(newAmount); + + const transferInstruction = await program.methods + .transferFunds(pubkey, newAmount) + .accounts({ + user: authorizationResult.publicKey, + fromCashAccount: cashAppPDA, + }) + .instruction(); + + const transferTransaction = new Transaction({ + ...latestBlockhash, + feePayer: authorizationResult.publicKey, + }).add(transferInstruction); + + const signedTransactions = await wallet.signTransactions({ + transactions: [transferTransaction], + }); + + return signedTransactions[0]; + }, + ); + + let txSignature = await connection.sendRawTransaction( + signedTransactions.serialize(), + { + skipPreflight: true, + }, + ); + + const confirmationResult = await connection.confirmTransaction( + txSignature, + "confirmed", + ); + + if (confirmationResult.value.err) { + throw new Error(JSON.stringify(confirmationResult.value.err)); + } else { + console.log("Transaction successfully submitted!"); + } + }, + [authorizeSession, connection, cashAppPDA], + ); + + return ( + + + navigation.goBack()}> + + + ${inputValue} + { + if (genInProgress) { + return; + } + setGenInProgress(true); + try { + if (!cashAppProgram || !selectedAccount) { + console.warn( + "Program/wallet is not initialized yet. Try connecting a wallet first.", + ); + return; + } + const deposit = await transferFunds(cashAppProgram); + + alertAndLog( + "Funds deposited into cash account ", + "See console for logged transaction.", + ); + console.log(deposit); + } finally { + setGenInProgress(false); + } + }} + > + Pay + + + + To: + + + + For: + + + + Enable purchase protection: + + + + ); +}; + +export default PayScreen; +``` + +For the RequestScreen, you'll follow the same process except you will use the +`newPaymentRequest` instruction instead of the `transferFunds` instruction. + +Try this out, then check your work here: + +#### Activity Screen + +The Activity Screen will allow you to add friends, see pending payment requests, +accept requests, and decline requests. + +For the Add Friend Feature, you'll want a text box for a user to input the +pubkey of the friend they want to add and a button that calls the add friend +instruction. + +```tsx +export function AddFriend({ address }: { address: PublicKey }) { + const [pubkey, setPubkey] = useState(""); + const [signingInProgress, setSigningInProgress] = useState(false); + const [connection] = useState( + () => new Connection("https://api.devnet.solana.com"), + ); + const { authorizeSession, selectedAccount } = useAuthorization(); + const { cashAppProgram, cashAppPDA, friends } = UseCashAppProgram(address); + const user = friends.data?.balance; + + const addFriend = useCallback( + async (program: Program) => { + let signedTransactions = await transact( + async (wallet: Web3MobileWallet) => { + const [authorizationResult, latestBlockhash] = await Promise.all([ + authorizeSession(wallet), + connection.getLatestBlockhash(), + ]); + + const addFriendIX = await program.methods + .addFriend(pubkey) + .accounts({ + user: authorizationResult.publicKey, + cashAccount: cashAppPDA, + }) + .instruction(); + + const addFriendTX = new Transaction({ + ...latestBlockhash, + feePayer: authorizationResult.publicKey, + }).add(addFriendIX); + + const signedTransactions = await wallet.signTransactions({ + transactions: [addFriendTX], + }); + + return signedTransactions[0]; + }, + ); + + let txSignature = await connection.sendRawTransaction( + signedTransactions.serialize(), + { + skipPreflight: true, + }, + ); + + const confirmationResult = await connection.confirmTransaction( + txSignature, + "confirmed", + ); + + if (confirmationResult.value.err) { + throw new Error(JSON.stringify(confirmationResult.value.err)); + } else { + console.log("Transaction successfully submitted!"); + } + }, + [authorizeSession, connection, cashAppPDA], + ); + + return ( + + + {" "} + Add New Friend: + + + + + + + ); +} +``` + +To accept and decline requests, you'll follow a very similar method. Try this +out yourself and then check the code here to review your work +[here](https://github.com/solana-developers/cash-app-clone/blob/main/cash-app/src/components/solana-pay/solana-pay-ui.tsx): + +## Enabling QR Code functionality with Solana Pay + +To mimic the QR code functionality in Cash App, you can simply use the +`@solana/pay` JavaScript SDK. For more information, refer to the +[Solana Pay API Reference](https://docs.solanapay.com/api/core). + +The `encodeURL` function takes in an amount and a memo to encode a Solana Pay +URL for a specific transaction. + +Typically, this function is paired with `createQR` to generate a QR code with +the Solana Pay URL. As of today, Solana Pay's current version of the `createQR` +function is not compatible with react-native, so we will need to use a different +QR code generator that is react-native compatible. In this guide, we'll input +the url into `QRCode` from `react-native-qrcode-svg`. It does not have the same +QR code styling as the Solana Pay `createQR`, but it still correctly generates +the needed QR code. + +For simplicity, this functionality will live on its own screen, which we already +defined earlier as the Scan Screen. Similarly to the home screen, navigate to +`ScanScreen.tsx` and set up the following function: + +```tsx +export function ScanScreen() { + const { selectedAccount } = useAuthorization(); + + return ( + + {selectedAccount ? ( + + + + ) : ( + <> + Solana Cash App +
+ + + )} + + ); +} +``` + +Now we need to create the `SolanaPayButton` component. Create a file under +`src/components/solana-pay/solana-pay-ui.tsx`. In Cash App, the QR code is just +a link to the users Cash App profile and is a static image in the app. However, +the solana pay QR code is actually uniquely generated for each requested +transaction, so the QR displayed includes the amount, memo, and the recipient's +public key information. So our UI/UX will function slightly different than cash +app in this section. + +To still follow the look and feel of Cash App, we'll allow most of the screen to +display the QR code and have a button at the bottom for a modal that has amount +and memo input fields and a generate QR code button. On clicking the "Create QR" +button, we'll want to generate a new Solana Pay URL and send that value outside +of the modal to the Scan Screen so that the screen will render and display the +new QR code. + +We can do this with the Solana Pay API, state handling, conditional rendering, +and data submission between the two components, as shown below: + +```tsx +export function SolanaPayButton({ address }: { address: PublicKey }) { + const [showPayModal, setShowPayModal] = useState(false); + + const [url, setUrl] = useState(""); + + return ( + <> + + + {url ? ( + <> + + + + + ) : ( + + Generate a QR Code to display. + + )} + Scan to Pay + + setShowPayModal(false)} + show={showPayModal} + address={address} + setParentUrl={setUrl} + /> + + + + ); +} + +export function SolPayModal({ + hide, + show, + address, + setParentUrl, +}: { + hide: () => void; + show: boolean; + address: PublicKey; + setParentUrl: (url: string) => void; +}) { + const [memo, setMemo] = useState(""); + const [amount, setAmount] = useState(""); + + const handleSubmit = () => { + const number = BigNumber(amount); + const newUrl = encodeURL({ + recipient: address, + amount: number, + memo, + }).toString(); + setParentUrl(newUrl); + hide(); + }; + + return ( + + + + + + + ); +} +``` + +## Connecting User Names with Public Keys via Solana Name Service + +Solana Name Service _(SNS)_ enables a human-readable name to be mapped to a +Solana address. By implementing SNS, we can easily prompt a user to create a +user name _(which will become their SNS name behind the scenes)_ and that name +will directly map to the users wallet address. + +Solana Name Service has two functions that we can implement throughout this dapp +to simplify a lot of the front end: + +- `getDomainKeySync` - a function that returns the public key associated with + the provided domain name. This can be implemented anywhere there is a user + input for a public key. Now the user only needs to type in a username when + searching for an account, exactly as you do with Cash App. This is what SNS + calls a + [direct lookup](https://sns.guide/domain-name/domain-direct-lookup.html). + +- `reverseLookup` - an asynchronous function that returns the domain name of the + provided public key.This can be implemented anywhere in the UI where you want + to display the username. This is what SNS calls a + [reverse lookup](https://sns.guide/domain-name/domain-reverse-lookup.html) + +To showcase this, lets update the transfers funds function to now accept a user +name as a parameter rather than a public key and integrate the SNS API. + +```tsx +const transferFunds = useCallback( + async (program: Program) => { + let signedTransactions = await transact( + async (wallet: Web3MobileWallet) => { + const [authorizationResult, latestBlockhash] = await Promise.all([ + authorizeSession(wallet), + connection.getLatestBlockhash(), + ]); + + const { pubkey } = getDomainKeySync(userName); + + const [recipientPDA] = useMemo(() => { + const recipientSeed = pubkey.toBuffer(); + return PublicKey.findProgramAddressSync( + [recipientSeed], + cashAppProgramId, + ); + }, [cashAppProgramId]); + + const transferInstruction = await program.methods + .transferFunds(pubkey, newTransferAmount) + .accounts({ + user: authorizationResult.publicKey, + fromCashAccount: cashAppPDA, + toCashAccount: recipientPDA, + }) + .instruction(); + + const transferTransaction = new Transaction({ + ...latestBlockhash, + feePayer: authorizationResult.publicKey, + }).add(transferInstruction); + + const signedTransactions = await wallet.signTransactions({ + transactions: [transferTransaction], + }); + + return signedTransactions[0]; + }, + ); + + let txSignature = await connection.sendRawTransaction( + signedTransactions.serialize(), + { + skipPreflight: true, + }, + ); + + const confirmationResult = await connection.confirmTransaction( + txSignature, + "confirmed", + ); + + if (confirmationResult.value.err) { + throw new Error(JSON.stringify(confirmationResult.value.err)); + } else { + console.log("Transaction successfully submitted!"); + } + }, + [authorizeSession, connection, cashAppPDA], +); +``` + +This implementation can be integrated everywhere in the application where an +input requires a public key, enabling the user experience to be identical to +that of a web2 application. + +## Final thoughts + +Congrats on completing a a web3 mobile application! By completing this tutorial +you have learned how to build an expo mobile app with a wallet adapter, write +and deploy an anchor solana program, and connect a mobile UI to a deployed +solana program. + +To build on this knowledge here are a few more resources to look into: + +- [The Anchor Book](https://book.anchor-lang.com/) +- [Solana Guides](/developers/guides) +- [Program Examples](https://github.com/solana-developers/program-examples) diff --git a/content/guides/dapps/journal.mdx b/content/guides/dapps/journal.mdx new file mode 100644 index 000000000..cf1d56d9b --- /dev/null +++ b/content/guides/dapps/journal.mdx @@ -0,0 +1,586 @@ +--- +date: 2024-03-18T00:00:00Z +difficulty: intro +title: "How to create a CRUD dApp on Solana" +description: + "Solana developer quickstart guide to learn how to create a basic CRUD dApp on + the Solana blockchain with a simple journal program and interact with the + program via a UI." +tags: + - quickstart + - dApp + - crud + - anchor + - rust + - react + - program +keywords: + - solana dapp + - on-chain + - rust + - anchor program + - crud dapp + - create dapp + - create solana dapp + - tutorial + - intro to solana development + - blockchain developer + - blockchain tutorial + - web3 developer + - web3 crud app +--- + +In this guide, you will learn how to create and deploy both the Solana program +and UI for a basic on-chain CRUD dApp. This dApp will allow you to create +journal entries, update journal entries, read journal entries, and delete +journal entries all through on-chain transactions. + +## What you will learn + +- Setting up your environment +- Using `npx create-solana-dapp` +- Anchor program development +- Anchor PDAs and accounts +- Deploying a Solana program +- Testing an on-chain program +- Connecting an on-chain program to a React UI + +## Prerequisites + +For this guide, you will need to have your local development environment setup +with a few tools: + +- [Rust](https://www.rust-lang.org/tools/install) +- [Node JS](https://nodejs.org/en/download) +- [Solana CLI & Anchor](/docs/intro/installation) + +## Setting up the project + +```shell +npx create-solana-dapp +``` + +This CLI command enables quick Solana dApp creation. You can find the source +code [here](https://github.com/solana-developers/create-solana-dapp). + +Now respond to the prompts as follows: + +- Enter project name: `my-journal-dapp` +- Select a preset: `Next.js` +- Select a UI library: `Tailwind` +- Select an Anchor template: `counter` program + +By selecting `counter` for the Anchor template, a simple counter +[program](/docs/terminology#program), written in rust using the Anchor +framework, will be generated for you. Before we start editing this generated +template program, let's make sure everything is working as expected: + +```shell +cd my-journal-dapp + +npm install + +npm run dev +``` + +## Writing a Solana program with Anchor + +If you're new to Anchor, +[The Anchor Book](https://book.anchor-lang.com/introduction/introduction.html) +and [Anchor Examples](https://examples.anchor-lang.com/) are great references to +help you learn. + +In `my-journal-dapp`, navigate to `anchor/programs/journal/src/lib.rs`. There +will already be template code generated in this folder. Let's delete it and +start from scratch so we can walk through each step. + +### Define your Anchor program + +```rust +use anchor_lang::prelude::*; + +// This is your program's public key and it will update automatically when you build the project. +declare_id!("7AGmMcgd1SjoMsCcXAAYwRgB9ihCyM8cZqjsUqriNRQt"); + +#[program] +pub mod journal { + use super::*; +} +``` + +### Define your program state + +The state is the data structure used to define the information you want to save +to the account. Since Solana onchain programs do not have storage, the data is +stored in accounts that live on the blockchain. + +When using Anchor, the `#[account]` attribute macro is used to define your +program state. + +```rust +#[account] +#[derive(InitSpace)] +pub struct JournalEntryState { + pub owner: Pubkey, + #[max_len(50)] + pub title: String, + #[max_len(1000)] + pub message: String, +} +``` + +For this journal dApp, we will be storing: + +- the journal's owner +- the title of each journal entry, and +- the message of each journal entry + +Note: Space must be defined when initializing an account. The `InitSpace` macro +used in the above code will help calculate the space needed when initializing an +account. For more information on space, read +[here](https://www.anchor-lang.com/docs/space#the-init-space-macro). + +### Create a journal entry + +Now, let's add an +[instruction handler](/docs/terminology#instruction-handler) to this program +that creates a new journal entry. To do this, we will update the `#[program]` +code that we already defined earlier to include an instruction for +`create_journal_entry`. + +When creating a journal entry, the user will need to provide the `title` and +`message` of the journal entry. So we need to add those two variables as +additional arguments. + +When calling this instruction handler function, we want to save the `owner` of +the account, the journal entry `title`, and the journal entry `message` to the +account's `JournalEntryState`. + +```rust +#[program] +mod journal { + use super::*; + + pub fn create_journal_entry( + ctx: Context, + title: String, + message: String, + ) -> Result<()> { + msg!("Journal Entry Created"); + msg!("Title: {}", title); + msg!("Message: {}", message); + + let journal_entry = &mut ctx.accounts.journal_entry; + journal_entry.owner = ctx.accounts.owner.key(); + journal_entry.title = title; + journal_entry.message = message; + Ok(()) + } +} +``` + +With the Anchor framework, every instruction takes a `Context` type as its first +argument. The `Context` macro is used to define a struct that encapsulates +accounts that will be passed to a given instruction handler. Therefore, each +`Context` must have a specified type with respect to the instruction handler. In +our case, we need to define a data structure for `CreateEntry`: + +```rust +#[derive(Accounts)] +#[instruction(title: String, message: String)] +pub struct CreateEntry<'info> { + #[account( + init_if_needed, + seeds = [title.as_bytes(), owner.key().as_ref()], + bump, + payer = owner, + space = 8 + JournalEntryState::INIT_SPACE + )] + pub journal_entry: Account<'info, JournalEntryState>, + #[account(mut)] + pub owner: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +In the above code, we used the following macros: + +- `#[derive(Accounts)]` macro is used to deserialize and validate the list of + accounts specified within the struct +- `#[instruction(...)]` attribute macro is used to access the instruction data + passed into the instruction handler +- `#[account(...)]` attribute macro then specifies additional constraints on the + accounts + +Each journal entry is a Program Derived Address ( [PDA](/docs/core/pda)) that +stores the entries state on-chain. Since we are creating a new journal entry +here, it needs to be initialized using the `init_if_needed` constraint. + +With Anchor, a PDA is initialized with the `seeds`, `bumps`, and +`init_if_needed` constraints. The `init_if_needed` constraint also requires the +`payer` and `space` constraints to define who is paying the +[rent](/docs/terminology#rent) to hold this account's data on-chain and how +much space needs to be allocated for that data. + +Note: By using the `InitSpace` macro in the `JournalEntryState`, we are able to +calculate space by using the `INIT_SPACE` constant and adding `8` to the space +constraint for Anchor's internal discriminator. + +### Updating a journal entry + +Now that we can create a new journal entry, let's add an `update_journal_entry` +instruction handler with a context that has an `UpdateEntry` type. + +To do this, the instruction will need to rewrite/update the data for a specific +PDA that was saved to the `JournalEntryState` of the account when the owner of +the journal entry calls the `update_journal_entry` instruction. + +```rust +#[program] +mod journal { + use super::*; + + ... + + pub fn update_journal_entry( + ctx: Context, + title: String, + message: String, + ) -> Result<()> { + msg!("Journal Entry Updated"); + msg!("Title: {}", title); + msg!("Message: {}", message); + + let journal_entry = &mut ctx.accounts.journal_entry; + journal_entry.message = message; + + Ok(()) + } +} + +#[derive(Accounts)] +#[instruction(title: String, message: String)] +pub struct UpdateEntry<'info> { + #[account( + mut, + seeds = [title.as_bytes(), owner.key().as_ref()], + bump, + realloc = 8 + 32 + 1 + 4 + title.len() + 4 + message.len(), + realloc::payer = owner, + realloc::zero = true, + )] + pub journal_entry: Account<'info, JournalEntryState>, + #[account(mut)] + pub owner: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +In the above code, you should notice that it is very similar to creating a +journal entry but there are a couple key differences. Since +`update_journal_entry` is editing an already existing PDA, we do not need to +initialize it. However, the message being passed to the instruction handler +could have a different space size required to store it (i.e. the `message` could +be shorter or longer), so we will need to use a few specific `realloc` +constraints to reallocate the space for the account on-chain: + +- `realloc` - sets the new space required +- `realloc::payer` - defines the account that will either pay or be refunded + based on the newly required lamports +- `realloc::zero` - defines that the account may be updated multiple times when + set to `true` + +The `seeds` and `bump` constraints are still needed to be able to find the +specific PDA we want to update. + +The `mut` constraints allows us to mutate/change the data within the account. +Because how the Solana blockchain handles reading from accounts and writing to +accounts differently, we must explicitly define which accounts will be mutable +so the Solana runtime can correctly process them. + +Note: In Solana, when you perform a reallocation, which changes the account's +size, the transaction must cover the rent for the new account size. The +realloc::payer = owner attribute indicates that the owner account will pay for +the rent. For an account to be able to cover the rent, it typically needs to be +a signer (to authorize the deduction of funds), and in Anchor, it also needs to +be mutable so that the runtime can deduct the lamports to cover the rent from +the account. + +### Delete a journal entry + +Lastly, we will add a `delete_journal_entry` instruction handler with a context +that has a `DeleteEntry` type. + +To do this, we will simply need to close the account for the specified journal +entry. + +```rust +#[program] +mod journal { + use super::*; + + ... + + pub fn delete_journal_entry(_ctx: Context, title: String) -> Result<()> { + msg!("Journal entry titled {} deleted", title); + Ok(()) + } +} + +#[derive(Accounts)] +#[instruction(title: String)] +pub struct DeleteEntry<'info> { + #[account( + mut, + seeds = [title.as_bytes(), owner.key().as_ref()], + bump, + close = owner, + )] + pub journal_entry: Account<'info, JournalEntryState>, + #[account(mut)] + pub owner: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +In the above code, we use the `close` constraint to close out the account +on-chain and refund the rent back to the journal entry's owner. + +The `seeds` and `bump` constraints are needed to validate the account. + +### Build and deploy your Anchor program + +```shell +npm run anchor build +npm run anchor deploy +``` + +## Connecting a Solana program to a UI + +`create-solana-dapp` already sets up a UI with a wallet connector for you. All +we need to do is simply modify if to fit your newly created program. + +Since this journal program has the three instructions, we will need components +in the UI that will be able to call each of these instructions: + +- create entry +- update entry +- delete entry + +Within your project's repo, open the +`web/components/journal/journal-data-access.tsx` to add code to be able to call +each of our instructions. + +Update the `useJournalProgram` function to be able to create an entry: + +```typescript +const createEntry = useMutation({ + mutationKey: ["journalEntry", "create", { cluster }], + mutationFn: async ({ title, message, owner }) => { + const [journalEntryAddress] = await PublicKey.findProgramAddress( + [Buffer.from(title), owner.toBuffer()], + programId, + ); + + return program.methods + .createJournalEntry(title, message) + .accounts({ + journalEntry: journalEntryAddress, + }) + .rpc(); + }, + onSuccess: signature => { + transactionToast(signature); + accounts.refetch(); + }, + onError: error => { + toast.error(`Failed to create journal entry: ${error.message}`); + }, +}); +``` + +Then update the `useJournalProgramAccount` function to be able to update and +delete entries: + +```typescript +const updateEntry = useMutation({ + mutationKey: ["journalEntry", "update", { cluster }], + mutationFn: async ({ title, message, owner }) => { + const [journalEntryAddress] = await PublicKey.findProgramAddress( + [Buffer.from(title), owner.toBuffer()], + programId, + ); + + return program.methods + .updateJournalEntry(title, message) + .accounts({ + journalEntry: journalEntryAddress, + }) + .rpc(); + }, + onSuccess: signature => { + transactionToast(signature); + accounts.refetch(); + }, + onError: error => { + toast.error(`Failed to update journal entry: ${error.message}`); + }, +}); + +const deleteEntry = useMutation({ + mutationKey: ["journal", "deleteEntry", { cluster, account }], + mutationFn: (title: string) => + program.methods + .deleteJournalEntry(title) + .accounts({ journalEntry: account }) + .rpc(), + onSuccess: tx => { + transactionToast(tx); + return accounts.refetch(); + }, +}); +``` + +Next, update the UI in `web/components/journal/journal-ui.tsx` to take in user +input values for the `title` and `message` of when creating a journal entry: + +```tsx +export function JournalCreate() { + const { createEntry } = useJournalProgram(); + const { publicKey } = useWallet(); + const [title, setTitle] = useState(""); + const [message, setMessage] = useState(""); + + const isFormValid = title.trim() !== "" && message.trim() !== ""; + + const handleSubmit = () => { + if (publicKey && isFormValid) { + createEntry.mutateAsync({ title, message, owner: publicKey }); + } + }; + + if (!publicKey) { + return

Connect your wallet

; + } + + return ( +
+ setTitle(e.target.value)} + className="input input-bordered w-full max-w-xs" + /> +