You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

@blockrun/mcp

Package Overview
Dependencies
Maintainers
1
Versions
7
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@blockrun/mcp - npm Package Compare versions

Comparing version
0.1.0
to
0.2.0
+361
-283
dist/index.js
#!/usr/bin/env node
// src/index.ts
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema
} from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
import { LLMClient } from "@blockrun/llm";

@@ -68,2 +65,19 @@ import { generatePrivateKey, privateKeyToAccount } from "viem/accounts";

}
function getWalletInfo() {
const llm = getClient();
const address = llm.getWalletAddress();
return {
address,
network: "Base",
chainId: 8453,
currency: "USDC",
isNew: walletWasCreated,
basescanUrl: `https://basescan.org/address/${address}`,
fundingOptions: {
coinbase: "Send USDC, select 'Base' network",
bridge: "https://bridge.base.org",
buy: "https://www.coinbase.com/onramp"
}
};
}
function getWalletSetupInstructions() {

@@ -120,5 +134,9 @@ if (!walletAddress) {

}
var tools = [
var server = new McpServer({
name: "blockrun-mcp",
version: "0.2.0"
});
server.registerTool(
"blockrun_chat",
{
name: "blockrun_chat",
description: `Chat with any AI model via BlockRun. Supports 30+ models including GPT-5, Claude Opus 4, Gemini 3, and more.

@@ -136,30 +154,30 @@ Pay-per-request with x402 micropayments - no API keys needed.

inputSchema: {
type: "object",
properties: {
model: {
type: "string",
description: "Model ID (e.g., 'anthropic/claude-sonnet-4', 'openai/gpt-4o'). Use blockrun_models to list all."
},
message: {
type: "string",
description: "Your message to the AI"
},
system: {
type: "string",
description: "Optional system prompt to set context/behavior"
},
max_tokens: {
type: "number",
description: "Maximum tokens in response (default: 1024)"
},
temperature: {
type: "number",
description: "Creativity level 0-2 (default: 1)"
}
},
required: ["model", "message"]
model: z.string().describe("Model ID (e.g., 'anthropic/claude-sonnet-4', 'openai/gpt-4o'). Use blockrun_models to list all."),
message: z.string().describe("Your message to the AI"),
system: z.string().optional().describe("Optional system prompt to set context/behavior"),
max_tokens: z.number().optional().default(1024).describe("Maximum tokens in response"),
temperature: z.number().optional().default(1).describe("Creativity level 0-2")
}
},
async ({ model, message, system, max_tokens, temperature }) => {
try {
const llm = getClient();
const response = await llm.chat(model, message, {
system,
maxTokens: max_tokens,
temperature
});
return { content: [{ type: "text", text: response }] };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
return {
content: [{ type: "text", text: formatError(errorMessage) }],
isError: true
};
}
}
);
server.registerTool(
"blockrun_smart",
{
name: "blockrun_smart",
description: `Smart model routing - automatically picks the best model based on your needs.

@@ -176,45 +194,108 @@

inputSchema: {
type: "object",
properties: {
mode: {
type: "string",
enum: ["fast", "balanced", "powerful", "cheap", "reasoning"],
description: "Routing mode: fast, balanced, powerful, cheap, or reasoning"
},
message: {
type: "string",
description: "Your message to the AI"
},
system: {
type: "string",
description: "Optional system prompt"
},
max_tokens: {
type: "number",
description: "Maximum tokens in response (default: 1024)"
}
},
required: ["mode", "message"]
mode: z.enum(["fast", "balanced", "powerful", "cheap", "reasoning"]).describe("Routing mode"),
message: z.string().describe("Your message to the AI"),
system: z.string().optional().describe("Optional system prompt"),
max_tokens: z.number().optional().default(1024).describe("Maximum tokens in response")
},
outputSchema: {
model_used: z.string().describe("The model that was used"),
response: z.string().describe("The AI response")
}
},
async ({ mode, message, system, max_tokens }) => {
const models = MODEL_TIERS[mode];
let lastError = null;
for (const model of models) {
try {
const llm = getClient();
const response = await llm.chat(model, message, {
system,
maxTokens: max_tokens
});
const result = { model_used: model, response };
return {
content: [{ type: "text", text: `[Used: ${model}]
${response}` }],
structuredContent: result
};
} catch (error) {
lastError = error;
continue;
}
}
const errorMessage = lastError?.message || "All models failed";
return {
content: [{ type: "text", text: formatError(errorMessage) }],
isError: true
};
}
);
server.registerTool(
"blockrun_models",
{
name: "blockrun_models",
description: "List all available AI models with pricing. Use this to discover models and compare costs.",
inputSchema: {
type: "object",
properties: {
category: {
type: "string",
enum: ["all", "chat", "reasoning", "image", "embedding"],
description: "Filter by category (default: all)"
},
provider: {
type: "string",
description: "Filter by provider (e.g., 'openai', 'anthropic', 'google')"
}
category: z.enum(["all", "chat", "reasoning", "image", "embedding"]).optional().default("all").describe("Filter by category"),
provider: z.string().optional().describe("Filter by provider (e.g., 'openai', 'anthropic', 'google')")
},
outputSchema: {
count: z.number().describe("Number of models returned"),
models: z.array(z.object({
id: z.string(),
name: z.string().optional(),
inputPrice: z.number().optional(),
outputPrice: z.number().optional()
})).describe("List of available models")
}
},
async ({ category, provider }) => {
const llm = getClient();
if (!cachedModels) {
cachedModels = await llm.listModels();
setTimeout(() => {
cachedModels = null;
}, 5 * 60 * 1e3);
}
let models = cachedModels;
if (provider) {
const p = provider.toLowerCase();
models = models.filter((m) => m.id.toLowerCase().startsWith(p + "/"));
}
if (category && category !== "all") {
if (category === "image") {
models = models.filter(
(m) => m.id.includes("dall-e") || m.id.includes("flux") || m.id.includes("banana")
);
} else if (category === "reasoning") {
models = models.filter(
(m) => m.id.includes("/o1") || m.id.includes("/o3") || m.id.includes("reasoner")
);
} else if (category === "embedding") {
models = models.filter((m) => m.id.includes("embed"));
}
}
},
const lines = models.map((m) => {
const input = m.inputPrice ? `$${m.inputPrice}/M in` : "";
const output = m.outputPrice ? `$${m.outputPrice}/M out` : "";
const pricing = [input, output].filter(Boolean).join(", ");
return `- ${m.id}: ${m.name || ""} ${pricing ? `(${pricing})` : ""}`;
});
const structuredModels = models.map((m) => ({
id: m.id,
name: m.name,
inputPrice: m.inputPrice,
outputPrice: m.outputPrice
}));
return {
content: [{ type: "text", text: `Available models (${models.length}):
${lines.join("\n")}` }],
structuredContent: { count: models.length, models: structuredModels }
};
}
);
server.registerTool(
"blockrun_image",
{
name: "blockrun_image",
description: `Generate images using AI models. Supports DALL-E 3, Flux, and Nano Banana.

@@ -227,177 +308,92 @@

inputSchema: {
type: "object",
properties: {
prompt: {
type: "string",
description: "Description of the image to generate"
},
model: {
type: "string",
description: "Image model (default: openai/dall-e-3)",
enum: ["openai/dall-e-3", "together/flux-schnell", "google/nano-banana"]
},
size: {
type: "string",
description: "Image size (default: 1024x1024)",
enum: ["1024x1024", "1792x1024", "1024x1792"]
},
quality: {
type: "string",
description: "Quality level for DALL-E 3 (default: standard)",
enum: ["standard", "hd"]
}
},
required: ["prompt"]
prompt: z.string().describe("Description of the image to generate"),
model: z.enum(["openai/dall-e-3", "together/flux-schnell", "google/nano-banana"]).optional().default("openai/dall-e-3").describe("Image model"),
size: z.enum(["1024x1024", "1792x1024", "1024x1792"]).optional().default("1024x1024").describe("Image size"),
quality: z.enum(["standard", "hd"]).optional().default("standard").describe("Quality level for DALL-E 3")
},
outputSchema: {
url: z.string().describe("URL of the generated image"),
prompt: z.string().describe("The prompt used"),
model: z.string().describe("The model used")
}
},
{
name: "blockrun_wallet",
description: "Get information about your BlockRun wallet address. Shows address, network, and quick funding options.",
inputSchema: {
type: "object",
properties: {}
}
},
{
name: "blockrun_setup",
description: `Get detailed wallet setup and funding instructions. Use this for first-time setup or if you need help adding funds to your wallet.
async ({ prompt, model, size, quality }) => {
const apiUrl = "https://blockrun.ai/api/v1/images/generations";
const body = {
model,
prompt,
size,
quality,
n: 1
};
const response = await fetch(apiUrl, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body)
});
if (response.status === 402) {
return {
content: [{ type: "text", text: `Image generation requires payment. Please ensure your wallet has USDC on Base.
Returns:
- Your wallet address
- Step-by-step funding instructions (Coinbase, bridge, direct purchase)
- Pricing information
- Security details`,
inputSchema: {
type: "object",
properties: {}
To generate "${prompt}" with ${model}, the approximate cost is $0.04-0.08 per image.` }],
isError: true
};
}
}
];
async function handleChat(args) {
const llm = getClient();
const response = await llm.chat(args.model, args.message, {
system: args.system,
maxTokens: args.max_tokens,
temperature: args.temperature
});
return response;
}
async function handleSmartRoute(args) {
const models = MODEL_TIERS[args.mode];
if (!models) {
throw new Error(`Invalid mode: ${args.mode}. Use: fast, balanced, powerful, cheap, or reasoning`);
}
let lastError = null;
for (const model of models) {
try {
const response = await handleChat({
model,
message: args.message,
system: args.system,
max_tokens: args.max_tokens
});
return `[Used: ${model}]
${response}`;
} catch (error) {
lastError = error;
continue;
if (!response.ok) {
return {
content: [{ type: "text", text: formatError(`Image generation failed: ${response.status}`) }],
isError: true
};
}
}
throw lastError || new Error("All models failed");
}
async function handleListModels(args) {
const llm = getClient();
if (!cachedModels) {
cachedModels = await llm.listModels();
setTimeout(() => {
cachedModels = null;
}, 5 * 60 * 1e3);
}
let models = cachedModels;
if (args.provider) {
const provider = args.provider.toLowerCase();
models = models.filter((m) => m.id.toLowerCase().startsWith(provider + "/"));
}
if (args.category && args.category !== "all") {
const category = args.category.toLowerCase();
if (category === "image") {
models = models.filter(
(m) => m.id.includes("dall-e") || m.id.includes("flux") || m.id.includes("banana")
);
} else if (category === "reasoning") {
models = models.filter(
(m) => m.id.includes("/o1") || m.id.includes("/o3") || m.id.includes("reasoner")
);
} else if (category === "embedding") {
models = models.filter((m) => m.id.includes("embed"));
const data = await response.json();
const imageUrl = data.data?.[0]?.url;
if (!imageUrl) {
return {
content: [{ type: "text", text: formatError("No image URL in response") }],
isError: true
};
}
}
const lines = models.map((m) => {
const input = m.inputPrice ? `$${m.inputPrice}/M in` : "";
const output = m.outputPrice ? `$${m.outputPrice}/M out` : "";
const pricing = [input, output].filter(Boolean).join(", ");
return `- ${m.id}: ${m.name || ""} ${pricing ? `(${pricing})` : ""}`;
});
return `Available models (${models.length}):
return {
content: [{ type: "text", text: `Image generated successfully!
${lines.join("\n")}`;
}
async function handleImageGeneration(args) {
const model = args.model || "openai/dall-e-3";
const llm = getClient();
const apiUrl = "https://blockrun.ai/api/v1/images/generations";
const body = {
model,
prompt: args.prompt,
size: args.size || "1024x1024",
quality: args.quality || "standard",
n: 1
};
const response = await fetch(apiUrl, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify(body)
});
if (response.status === 402) {
return `Image generation requires payment. Please ensure your wallet has USDC on Base.
URL: ${imageUrl}
To generate "${args.prompt}" with ${model}, the approximate cost is $0.04-0.08 per image.`;
Prompt: ${prompt}
Model: ${model}` }],
structuredContent: { url: imageUrl, prompt, model }
};
}
if (!response.ok) {
throw new Error(`Image generation failed: ${response.status}`);
}
const data = await response.json();
const imageUrl = data.data?.[0]?.url;
if (!imageUrl) {
throw new Error("No image URL in response");
}
return `Image generated successfully!
URL: ${imageUrl}
Prompt: ${args.prompt}
Model: ${model}`;
}
function handleWalletInfo() {
const llm = getClient();
const address = llm.getWalletAddress();
const isNewWallet = walletWasCreated;
let response = `BlockRun Wallet Information
);
server.registerTool(
"blockrun_wallet",
{
description: "Get information about your BlockRun wallet address. Shows address, network, and quick funding options.",
inputSchema: {},
outputSchema: {
address: z.string().describe("Wallet address"),
network: z.string().describe("Network name"),
chainId: z.number().describe("Chain ID"),
currency: z.string().describe("Currency"),
isNew: z.boolean().describe("Whether this is a newly created wallet"),
basescanUrl: z.string().describe("Link to view on Basescan")
}
},
async () => {
const info = getWalletInfo();
const isNewWallet = info.isNew;
let text = `BlockRun Wallet Information
============================
Address: ${address}
Network: Base (Chain ID: 8453)
Currency: USDC
Address: ${info.address}
Network: ${info.network} (Chain ID: ${info.chainId})
Currency: ${info.currency}
View on Basescan: https://basescan.org/address/${address}
View on Basescan: ${info.basescanUrl}
`;
if (isNewWallet) {
response += `
if (isNewWallet) {
text += `
STATUS: NEW WALLET - NEEDS FUNDING
${getWalletSetupInstructions()}`;
} else {
response += `
} else {
text += `
HOW TO ADD FUNDS:

@@ -408,65 +404,151 @@ -----------------

Quick options:
1. From Coinbase: Send USDC, select "Base" network
2. Bridge: https://bridge.base.org
3. Buy: https://www.coinbase.com/onramp
1. From Coinbase: ${info.fundingOptions.coinbase}
2. Bridge: ${info.fundingOptions.bridge}
3. Buy: ${info.fundingOptions.buy}
Full instructions: Run blockrun_setup tool
`;
}
return {
content: [{ type: "text", text }],
structuredContent: {
address: info.address,
network: info.network,
chainId: info.chainId,
currency: info.currency,
isNew: info.isNew,
basescanUrl: info.basescanUrl
}
};
}
return response;
}
function handleSetup() {
getClient();
return getWalletSetupInstructions();
}
var server = new Server(
);
server.registerTool(
"blockrun_setup",
{
name: "blockrun-mcp",
version: "0.1.0"
description: `Get detailed wallet setup and funding instructions. Use this for first-time setup or if you need help adding funds to your wallet.
Returns:
- Your wallet address
- Step-by-step funding instructions (Coinbase, bridge, direct purchase)
- Pricing information
- Security details`,
inputSchema: {}
},
async () => {
getClient();
return { content: [{ type: "text", text: getWalletSetupInstructions() }] };
}
);
server.registerResource(
"wallet",
"blockrun://wallet",
{
capabilities: {
tools: {}
description: "Your BlockRun wallet address and status",
mimeType: "application/json"
},
async () => {
const info = getWalletInfo();
return {
contents: [{
uri: "blockrun://wallet",
mimeType: "application/json",
text: JSON.stringify(info, null, 2)
}]
};
}
);
server.registerResource(
"models",
"blockrun://models",
{
description: "List of all available AI models with pricing",
mimeType: "application/json"
},
async () => {
const llm = getClient();
if (!cachedModels) {
cachedModels = await llm.listModels();
setTimeout(() => {
cachedModels = null;
}, 5 * 60 * 1e3);
}
return {
contents: [{
uri: "blockrun://models",
mimeType: "application/json",
text: JSON.stringify(cachedModels, null, 2)
}]
};
}
);
server.setRequestHandler(ListToolsRequestSchema, async () => ({
tools
}));
server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params;
try {
let result;
switch (name) {
case "blockrun_chat":
result = await handleChat(args);
break;
case "blockrun_smart":
result = await handleSmartRoute(args);
break;
case "blockrun_models":
result = await handleListModels(args);
break;
case "blockrun_image":
result = await handleImageGeneration(args);
break;
case "blockrun_wallet":
result = handleWalletInfo();
break;
case "blockrun_setup":
result = handleSetup();
break;
default:
throw new Error(`Unknown tool: ${name}`);
server.registerPrompt(
"quick_chat",
{
description: "Start a quick chat with a recommended model",
argsSchema: {
message: z.string().describe("Your message"),
style: z.enum(["concise", "detailed", "creative"]).optional().default("concise").describe("Response style")
}
},
async ({ message, style }) => {
const systemPrompts = {
concise: "Be concise and direct. Give short, focused answers.",
detailed: "Provide thorough, comprehensive answers with examples.",
creative: "Be creative and imaginative in your responses."
};
return {
content: [{ type: "text", text: result }]
messages: [
{
role: "user",
content: {
type: "text",
text: `[System: ${systemPrompts[style || "concise"]}]
${message}`
}
}
]
};
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
const isPaymentError = message.toLowerCase().includes("payment") || message.toLowerCase().includes("402") || message.toLowerCase().includes("balance") || message.toLowerCase().includes("insufficient");
let errorText = `Error: ${message}`;
if (isPaymentError) {
errorText += `
}
);
server.registerPrompt(
"code_review",
{
description: "Get a code review from a powerful model",
argsSchema: {
code: z.string().describe("The code to review"),
language: z.string().optional().describe("Programming language"),
focus: z.enum(["bugs", "performance", "style", "all"]).optional().default("all").describe("What to focus on")
}
},
async ({ code, language, focus }) => {
const focusInstructions = {
bugs: "Focus on potential bugs, errors, and edge cases.",
performance: "Focus on performance issues and optimization opportunities.",
style: "Focus on code style, readability, and best practices.",
all: "Review for bugs, performance, and style."
};
return {
messages: [
{
role: "user",
content: {
type: "text",
text: `Please review this ${language || ""} code. ${focusInstructions[focus || "all"]}
\`\`\`${language || ""}
${code}
\`\`\``
}
}
]
};
}
);
function formatError(message) {
const isPaymentError = message.toLowerCase().includes("payment") || message.toLowerCase().includes("402") || message.toLowerCase().includes("balance") || message.toLowerCase().includes("insufficient");
let errorText = `Error: ${message}`;
if (isPaymentError) {
errorText += `
This error usually means your wallet needs funding.

@@ -476,13 +558,9 @@ Run the blockrun_setup tool to get your wallet address and funding instructions.

Quick fix: Send USDC to your wallet on Base network.`;
}
return {
content: [{ type: "text", text: errorText }],
isError: true
};
}
});
return errorText;
}
async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);
console.error("BlockRun MCP Server started");
console.error("BlockRun MCP Server started (v0.1.0)");
}

@@ -489,0 +567,0 @@ main().catch((error) => {

{
"name": "@blockrun/mcp",
"version": "0.1.0",
"version": "0.2.0",
"description": "BlockRun MCP Server - Access 30+ AI models via x402 micropayments. No API keys needed.",

@@ -46,5 +46,6 @@ "type": "module",

"dependencies": {
"@blockrun/llm": "^0.1.1",
"@modelcontextprotocol/sdk": "^1.0.0",
"@blockrun/llm": "^0.1.1",
"viem": "^2.21.0"
"viem": "^2.21.0",
"zod": "^4.3.5"
},

@@ -51,0 +52,0 @@ "devDependencies": {

+71
-83
# @blockrun/mcp
**Access 30+ AI models in Claude Code with zero API keys.**
## The Problem
One wallet. Pay-per-request. All major AI models.
Want to use GPT-5, Gemini, or DeepSeek in Claude Code? Today you need to:
1. Create accounts with 5+ AI providers
2. Manage 5+ API keys and billing systems
3. Pay $20-100/month minimums per provider
4. Configure each provider separately
**That's too much friction.**
## The Solution
BlockRun MCP gives you access to 30+ AI models with:
- **Zero API keys** - No accounts needed with OpenAI, Google, etc.
- **One wallet** - Single USDC balance for all providers
- **Pay-per-use** - No minimums, $5 gets you started
- **One command** - Install and go
```bash

@@ -11,35 +27,22 @@ claude mcp add blockrun npx @blockrun/mcp

## Why BlockRun MCP?
> **Alternative:** Prefer Python? Try the [BlockRun Skill](https://github.com/BlockRunAI/claude-code-blockrun-agent) (`pip install blockrun-llm`) - same features, different integration style.
| Feature | Other Solutions | BlockRun MCP |
|---------|-----------------|--------------|
| **API Keys** | Need 5+ keys (OpenAI, Anthropic, Google...) | **None needed** |
| **Billing** | Manage 5+ subscriptions | **One wallet, unified balance** |
| **Setup** | Complex config per provider | **One command, auto-wallet** |
| **Models** | Usually 1 provider | **30+ models, 6 providers** |
| **Payment** | Monthly subscriptions | **Pay only what you use** |
| **Minimum** | $20-100/month per provider | **$0 minimum, start with $5** |
## Quick Start
### 1. Install (30 seconds)
### 1. Install
```bash
# Add to Claude Code
claude mcp add blockrun npx @blockrun/mcp
```
That's it! A wallet is automatically created for you.
A wallet is automatically created for you.
### 2. Get Your Wallet Address
In Claude Code, run:
```
Use blockrun_setup to get my wallet address
```
You: blockrun setup
Or:
Claude: Your wallet address is 0x...
Send USDC on Base network to fund it.
```
Use blockrun_wallet to show my wallet info
```

@@ -50,34 +53,32 @@ ### 3. Fund Your Wallet

**Funding Options:**
| Method | Steps |
|--------|-------|
| **From Coinbase** | Send → USDC → Select "Base" network → Paste your address |
| **Bridge** | Visit [bridge.base.org](https://bridge.base.org) → Bridge USDC to Base |
| **Buy Direct** | Visit [Coinbase Onramp](https://www.coinbase.com/onramp) → Buy USDC on Base |
| **Bridge** | [bridge.base.org](https://bridge.base.org) → Bridge USDC to Base |
| **Buy Direct** | [Coinbase Onramp](https://www.coinbase.com/onramp) → Buy USDC on Base |
### 4. Start Using
Just ask naturally:
```
You: Use blockrun_chat to ask claude-sonnet-4 what is quantum computing
You: blockrun ask GPT-5 to explain quantum computing
Claude: [calls blockrun_chat]
Quantum computing is a type of computation that harnesses...
You: blockrun chat with Claude Opus about this error
You: blockrun generate an image of a mountain sunset
```
## Available Tools
## Usage Examples
### `blockrun_chat`
Chat with any AI model.
### Chat with Any Model
```javascript
blockrun_chat({
model: "anthropic/claude-sonnet-4", // Required
message: "Explain quantum computing", // Required
system: "You are a physics professor", // Optional
max_tokens: 2000, // Optional (default: 1024)
temperature: 0.7 // Optional (default: 1)
})
```
blockrun ask GPT-5 what causes aurora borealis
blockrun chat with Claude Opus about optimizing this algorithm
blockrun ask Gemini Pro to review this code for security issues
```
**Popular Models:**

@@ -90,57 +91,44 @@ - `openai/gpt-5.2` - Most capable OpenAI model

### `blockrun_smart`
Auto-select the best model for your needs.
### Smart Model Selection
```javascript
blockrun_smart({
mode: "balanced", // Required: fast | balanced | powerful | cheap | reasoning
message: "Hello!" // Required
})
Let BlockRun pick the best model for your needs:
```
blockrun smart fast: what's 2+2
| Mode | Models Used | Best For | Cost |
|------|-------------|----------|------|
| `fast` | Gemini Flash, GPT-4o-mini | Quick responses | $ |
| `balanced` | GPT-4o, Claude Sonnet | Daily tasks | $$ |
| `powerful` | GPT-5.2, Claude Opus, o3 | Complex work | $$$$ |
| `cheap` | Gemini Flash, DeepSeek | Budget-conscious | $ |
| `reasoning` | o3, o1, DeepSeek Reasoner | Logic & math | $$$ |
blockrun smart powerful: analyze this complex codebase
### `blockrun_models`
List all available models with pricing.
```javascript
blockrun_models({
category: "chat", // Optional: all, chat, reasoning, image, embedding
provider: "openai" // Optional: filter by provider
})
blockrun smart cheap: summarize this text
```
### `blockrun_image`
Generate images with AI.
| Mode | Models Used | Best For |
|------|-------------|----------|
| `fast` | Gemini Flash, GPT-4o-mini | Quick responses |
| `balanced` | GPT-4o, Claude Sonnet | Daily tasks |
| `powerful` | GPT-5.2, Claude Opus, o3 | Complex work |
| `cheap` | Gemini Flash, DeepSeek | Budget-conscious |
| `reasoning` | o3, o1, DeepSeek Reasoner | Logic & math |
```javascript
blockrun_image({
prompt: "A sunset over mountains", // Required
model: "openai/dall-e-3", // Optional
size: "1024x1024", // Optional: 1024x1024, 1792x1024, 1024x1792
quality: "hd" // Optional: standard, hd
})
### Generate Images
```
blockrun generate an image of a cyberpunk cityscape
### `blockrun_wallet`
Check your wallet information.
blockrun create a watercolor painting of mountains
```
```javascript
blockrun_wallet({})
// Returns: address, network, balance link, funding options
### List Available Models
```
blockrun list models
### `blockrun_setup`
Get detailed setup and funding instructions.
blockrun show OpenAI models with pricing
```
```javascript
blockrun_setup({})
// Returns: complete setup guide with step-by-step funding instructions
### Wallet Management
```
blockrun setup # First-time setup instructions
blockrun wallet # Check your wallet address
```

@@ -268,3 +256,3 @@ ## Supported Models & Pricing

### "Payment was rejected"
Your wallet needs funding. Run `blockrun_setup` to get your address and funding instructions.
Your wallet needs funding. Say `blockrun setup` to get your address and funding instructions.

@@ -275,6 +263,6 @@ ### "Wallet key required"

### Model not responding
Some models have rate limits. Try `blockrun_smart` with mode `fast` or `cheap` to use alternative models.
Some models have rate limits. Try `blockrun smart cheap` or `blockrun smart fast` to use alternative models.
### Check wallet balance
Visit: `https://basescan.org/address/YOUR_ADDRESS`
Say `blockrun wallet` or visit: `https://basescan.org/address/YOUR_ADDRESS`

@@ -281,0 +269,0 @@ ## Configuration