Skip to content

Commit 0210ca6

Browse files
committed
feat: add AI-powered persona creator chat
- Add persona architect chat sheet for conversational persona/team creation - Rust backend with goose-first LLM provider resolution, Anthropic/OpenAI fallback - Streamlined prompt that's decisive instead of over-asking followups - Auto-generate DiceBear avatar URLs from display names - Fix preview card keying (use array index, not displayName) - Fix silent index filtering in team validation (error instead of drop)
1 parent a3a4127 commit 0210ca6

11 files changed

Lines changed: 722 additions & 1 deletion

File tree

desktop/src-tauri/src/commands/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ mod export_util;
99
mod identity;
1010
mod media;
1111
mod messages;
12+
mod persona_chat;
1213
mod personas;
1314
mod profile;
1415
mod teams;
@@ -25,6 +26,7 @@ pub use dms::*;
2526
pub use identity::*;
2627
pub use media::*;
2728
pub use messages::*;
29+
pub use persona_chat::*;
2830
pub use personas::*;
2931
pub use profile::*;
3032
pub use teams::*;
Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
use serde::{Deserialize, Serialize};
2+
3+
use crate::managed_agents::resolve_command;
4+
5+
#[derive(Debug, Deserialize)]
6+
#[serde(rename_all = "camelCase")]
7+
pub struct ChatMessage {
8+
pub role: String,
9+
pub content: String,
10+
}
11+
12+
#[derive(Debug, Serialize)]
13+
#[serde(rename_all = "camelCase")]
14+
pub struct ChatResponse {
15+
pub content: String,
16+
}
17+
18+
/// Send messages to an LLM for the persona creator chat.
19+
///
20+
/// Uses goose - the app's primary agent runtime - which resolves
21+
/// provider, model, and credentials from its own config.
22+
#[tauri::command]
23+
pub async fn persona_creator_chat(
24+
system_prompt: String,
25+
messages: Vec<ChatMessage>,
26+
) -> Result<ChatResponse, String> {
27+
let goose_path = resolve_command("goose", None).ok_or_else(|| {
28+
"No LLM runtime found. Install goose to use the AI persona creator.".to_string()
29+
})?;
30+
31+
goose_chat(goose_path, system_prompt, messages).await
32+
}
33+
34+
/// Format the conversation history as a single text prompt for goose.
35+
///
36+
/// For single-turn (one user message), returns the message content directly.
37+
/// For multi-turn, includes prior exchanges as context so the LLM can continue
38+
/// the conversation coherently.
39+
fn format_conversation_prompt(messages: &[ChatMessage]) -> String {
40+
if messages.len() <= 1 {
41+
return messages
42+
.first()
43+
.map(|m| m.content.clone())
44+
.unwrap_or_default();
45+
}
46+
47+
let mut parts = Vec::with_capacity(messages.len());
48+
for (i, msg) in messages.iter().enumerate() {
49+
if i < messages.len() - 1 {
50+
let label = if msg.role == "assistant" {
51+
"Assistant"
52+
} else {
53+
"User"
54+
};
55+
parts.push(format!("{label}: {}", msg.content));
56+
}
57+
}
58+
59+
let history = parts.join("\n\n");
60+
let last = &messages[messages.len() - 1].content;
61+
62+
format!(
63+
"Here is our conversation so far:\n\n{history}\n\n---\n\nNow respond to this message:\n\n{last}"
64+
)
65+
}
66+
67+
/// Run a one-shot LLM completion through goose.
68+
async fn goose_chat(
69+
goose_path: std::path::PathBuf,
70+
system_prompt: String,
71+
messages: Vec<ChatMessage>,
72+
) -> Result<ChatResponse, String> {
73+
let prompt_text = format_conversation_prompt(&messages);
74+
75+
let output = tokio::task::spawn_blocking(move || {
76+
std::process::Command::new(&goose_path)
77+
.args([
78+
"run",
79+
"-t",
80+
&prompt_text,
81+
"--system",
82+
&system_prompt,
83+
"--no-session",
84+
"--no-profile",
85+
"--max-turns",
86+
"1",
87+
"-q",
88+
"--output-format",
89+
"json",
90+
])
91+
.stdin(std::process::Stdio::null())
92+
.stdout(std::process::Stdio::piped())
93+
.stderr(std::process::Stdio::piped())
94+
.output()
95+
.map_err(|e| format!("failed to spawn goose: {e}"))
96+
})
97+
.await
98+
.map_err(|e| format!("goose task failed: {e}"))?
99+
.map_err(|e: String| e)?;
100+
101+
if !output.status.success() {
102+
let stderr = String::from_utf8_lossy(&output.stderr);
103+
return Err(format!(
104+
"goose exited with {}: {}",
105+
output.status.code().unwrap_or(-1),
106+
stderr.chars().take(500).collect::<String>()
107+
));
108+
}
109+
110+
let response: serde_json::Value = serde_json::from_slice(&output.stdout)
111+
.map_err(|e| format!("failed to parse goose JSON: {e}"))?;
112+
113+
// Extract the last assistant message's text content.
114+
let content = response["messages"]
115+
.as_array()
116+
.and_then(|msgs| {
117+
msgs.iter()
118+
.rev()
119+
.find(|m| m["role"].as_str() == Some("assistant"))
120+
})
121+
.and_then(|msg| msg["content"].as_array())
122+
.and_then(|blocks| {
123+
blocks
124+
.iter()
125+
.find(|b| b["type"].as_str() == Some("text"))
126+
.and_then(|b| b["text"].as_str())
127+
})
128+
.unwrap_or("")
129+
.to_string();
130+
131+
if content.is_empty() {
132+
return Err("goose returned no assistant response".to_string());
133+
}
134+
135+
Ok(ChatResponse { content })
136+
}

desktop/src-tauri/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -418,6 +418,7 @@ pub fn run() {
418418
parse_team_file,
419419
parse_persona_files,
420420
export_persona_to_json,
421+
persona_creator_chat,
421422
get_channel_workflows,
422423
get_workflow,
423424
create_workflow,
Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
import { invokeTauri } from "@/shared/api/tauri";
2+
3+
import { PERSONA_CREATOR_SYSTEM_PROMPT } from "./prompt";
4+
5+
type ChatResponse = {
6+
content: string;
7+
};
8+
9+
/**
10+
* Send conversation messages to the LLM for the persona creator.
11+
* Calls the `persona_creator_chat` Tauri command which handles
12+
* API key resolution and provider selection.
13+
*/
14+
export async function personaCreatorChat(
15+
messages: ReadonlyArray<{ role: string; content: string }>,
16+
): Promise<string> {
17+
const response = await invokeTauri<ChatResponse>("persona_creator_chat", {
18+
systemPrompt: PERSONA_CREATOR_SYSTEM_PROMPT,
19+
messages: messages.map((m) => ({ role: m.role, content: m.content })),
20+
});
21+
return response.content;
22+
}
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
export {
2+
extractJsonBlock,
3+
parsePersonaCreatorOutput,
4+
personaCreatorJsonSchema,
5+
toCreateInputs,
6+
type PersonaCreatorOutput,
7+
type PersonaCreatorPersona,
8+
type PersonaCreatorTeam,
9+
} from "./schema";
10+
export { PERSONA_CREATOR_SYSTEM_PROMPT } from "./prompt";
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import { personaCreatorJsonSchema } from "./schema";
2+
3+
export const PERSONA_CREATOR_SYSTEM_PROMPT = `You are a Persona Architect - a friendly expert who helps users design AI agent personas and teams for the Sprout desktop app.
4+
5+
## Your Role
6+
Help users create one or more personas (and optionally a team to group them). Be decisive - gather what you need, then produce results. Don't over-ask.
7+
8+
## Conversation Flow
9+
1. Ask what kind of agent(s) the user wants to create and what they'll be used for. One question is enough - don't pepper them with followups.
10+
2. Once you have enough context, draft everything: display names, system prompts, and if multiple personas are involved, a team grouping. Show a preview.
11+
3. If the user gives feedback, revise. Otherwise, output the final structured JSON immediately.
12+
13+
Be proactive: if the user describes multiple related personas, group them into a team automatically - don't ask permission. Make sensible default choices for names, tone, and structure. Only ask followups when genuinely ambiguous.
14+
15+
## Output Format
16+
When finalizing, emit a single fenced JSON code block matching this schema:
17+
18+
\`\`\`
19+
${JSON.stringify(personaCreatorJsonSchema, null, 2)}
20+
\`\`\`
21+
22+
Important notes about the output:
23+
- \`personaIndices\` in the team object are zero-based indices into the \`personas\` array.
24+
- Only include the JSON block when the user has approved and you're ready to finalize.
25+
- Do NOT include the JSON block in intermediate/draft messages - just show previews in plain text.
26+
27+
## Guidelines
28+
- Be conversational and helpful, not robotic.
29+
- Keep system prompts concise but effective - focus on behavior, tone, and capabilities.
30+
- If the user is unsure, suggest reasonable defaults.
31+
- One persona is fine - teams are optional.
32+
- Name pools are optional fun - suggest them if appropriate (e.g. themed names).
33+
`;

0 commit comments

Comments
 (0)