Added the langchain4j and LLM routing functionality. Tested with a basic ping_server skill which worked.

This commit is contained in:
2026-02-27 15:18:34 +11:00
parent 12c54e237b
commit 1a3d6bde7c
3 changed files with 132 additions and 32 deletions

View File

@@ -1,34 +1,68 @@
package net.mmanningau.alice
import dev.langchain4j.data.message.AiMessage
import dev.langchain4j.data.message.ChatMessage
import dev.langchain4j.data.message.SystemMessage
import dev.langchain4j.data.message.ToolExecutionResultMessage
import dev.langchain4j.data.message.UserMessage
import dev.langchain4j.model.chat.ChatLanguageModel
import dev.langchain4j.model.openai.OpenAiChatModel
import dev.langchain4j.service.AiServices
import java.time.Duration
object LlmManager {
// IMPORTANT FOR THE EMULATOR:
// 10.0.2.2 is a special IP that lets the Android Emulator talk to your Host PC's localhost.
// If you are running Llama.cpp or LM Studio on your Mac/PC at port 8080, use this:
private const val LOCAL_LLM_URL = "http://10.0.2.2:11434/v1"
// (Later, when the LLM is running directly ON the phone itself, this will change to "http://localhost:8080/v1")
// 1. We build the "Model" connection
private val chatModel: ChatLanguageModel = OpenAiChatModel.builder()
.baseUrl(LOCAL_LLM_URL)
.apiKey("dummy-key-not-needed") // Local servers ignore this, but the builder requires a string
// IMPORTANT: Change "llama3.2" to whatever model you currently have pulled in your Ollama instance!
// e.g., "mistral", "qwen2.5", "phi3"
.modelName("qwen3:8b")
.timeout(Duration.ofMinutes(3)) // Local inference can take a bit
.maxRetries(0) // ADD THIS LINE: Bypasses the broken Java 15 retry logger!
.logRequests(true) // Great for debugging in Logcat
.apiKey("dummy-key")
.modelName("qwen3:8b") // Ensure this matches your Ollama model!
.timeout(Duration.ofMinutes(3))
.maxRetries(0)
.logRequests(true)
.logResponses(true)
.build()
// 2. We build the "Agent" combining our Model and our AliceAgent Interface
val agent: AliceAgent = AiServices.builder(AliceAgent::class.java)
.chatLanguageModel(chatModel)
.build()
// We manually maintain the conversation state now
private val chatHistory = mutableListOf<ChatMessage>(
SystemMessage("You are Alice, a highly capable local AI assistant. You provide concise, direct answers.")
)
fun chat(userText: String): String {
// 1. Add user's new message to memory
chatHistory.add(UserMessage(userText))
// 2. Fetch all current tools dynamically from the folder
val toolSpecs = SkillManager.loadSkills()
// 3. Send the entire history and the tools to the LLM
var response = chatModel.generate(chatHistory, toolSpecs)
var aiMessage: AiMessage = response.content()
chatHistory.add(aiMessage)
// 4. THE EXECUTION LOOP
// If the LLM decides it needs to run a tool, it will set hasToolExecutionRequests() to true.
while (aiMessage.hasToolExecutionRequests()) {
for (request in aiMessage.toolExecutionRequests()) {
val toolName = request.name()
val arguments = request.arguments()
// Send the request across the bridge to Python!
val toolResult = SkillManager.executeSkill(toolName, arguments)
// Package the result and add it to the memory
val toolMessage = ToolExecutionResultMessage(request.id(), toolName, toolResult)
chatHistory.add(toolMessage)
}
// Ping the LLM again with the new tool results so it can formulate a final answer
response = chatModel.generate(chatHistory, toolSpecs)
aiMessage = response.content()
chatHistory.add(aiMessage)
}
// 5. Return the final conversational text to the UI
return aiMessage.text()
}
}

View File

@@ -154,7 +154,7 @@ fun MainChatScreen() {
scope.launch(Dispatchers.IO) {
try {
// Send it to the local LLM!
val response = LlmManager.agent.chat(userText)
val response = LlmManager.chat(userText)
// Compose state automatically handles switching back to the main thread for UI updates
messages = messages + ChatMessage(response, false)

View File

@@ -2,31 +2,97 @@ package net.mmanningau.alice
import android.content.Context
import android.util.Log
import com.chaquo.python.Python
import dev.langchain4j.agent.tool.JsonSchemaProperty
import dev.langchain4j.agent.tool.ToolSpecification
import org.json.JSONObject
import java.io.File
object SkillManager {
// We hold the path here so the UI and LangChain4j can both access it instantly
var skillsDirectory: File? = null
private set
fun initialize(context: Context) {
// getExternalFilesDir(null) points to:
// /storage/emulated/0/Android/data/net.mmanningau.alice/files
// This is safe from Scoped Storage restrictions, but accessible via Android file managers.
val baseDir = context.getExternalFilesDir(null)
val skillsDir = File(baseDir, "Skills")
if (!skillsDir.exists()) {
val created = skillsDir.mkdirs()
if (created) {
Log.i("AliceSkills", "Created Skills directory at: ${skillsDir.absolutePath}")
} else {
Log.e("AliceSkills", "Failed to create Skills directory at: ${skillsDir.absolutePath}")
}
} else {
Log.i("AliceSkills", "Skills directory already exists at: ${skillsDir.absolutePath}")
skillsDir.mkdirs()
}
skillsDirectory = skillsDir
}
// 1. THE PARSER: Reads the folders and creates LangChain4j Tool Rules
fun loadSkills(): List<ToolSpecification> {
val toolSpecs = mutableListOf<ToolSpecification>()
skillsDirectory?.listFiles()?.forEach { skillDir ->
if (skillDir.isDirectory) {
val manifestFile = File(skillDir, "manifest.json")
if (manifestFile.exists()) {
try {
val json = JSONObject(manifestFile.readText())
val name = json.getString("name")
val description = json.getString("description")
val builder = ToolSpecification.builder()
.name(name)
.description(description)
// Parse the expected parameters so the LLM knows what to extract
val parameters = json.optJSONObject("parameters")
val properties = parameters?.optJSONObject("properties")
properties?.keys()?.forEach { key ->
val prop = properties.getJSONObject(key)
val type = prop.getString("type") // e.g., "string"
val desc = prop.optString("description", "")
builder.addParameter(
key,
JsonSchemaProperty.type(type),
JsonSchemaProperty.description(desc)
)
}
toolSpecs.add(builder.build())
Log.i("AliceSkills", "Successfully loaded skill: $name")
} catch (e: Exception) {
Log.e("AliceSkills", "Failed to parse manifest for ${skillDir.name}", e)
}
}
}
}
return toolSpecs
}
// 2. THE EXECUTOR: Runs the Python script via Chaquopy
fun executeSkill(toolName: String, argumentsJson: String): String {
return try {
val scriptFile = File(skillsDirectory, "$toolName/script.py")
if (!scriptFile.exists()) return "Error: script.py not found for tool '$toolName'."
val py = Python.getInstance()
val builtins = py.builtins
// We create an isolated dictionary for the script to run in.
// This allows you to edit the Python files and have them hot-reload instantly!
val globals = py.getModule("builtins").callAttr("dict")
// Execute the raw script text
builtins.callAttr("exec", scriptFile.readText(), globals)
// Find the 'execute' function we mandated in our python script
val executeFunc = globals.callAttr("get", "execute")
if (executeFunc == null) return "Error: Python script missing 'def execute(args_json):' function."
// Call it and return the string!
executeFunc.call(argumentsJson).toString()
} catch (e: Exception) {
Log.e("AliceSkills", "Execution failed for $toolName", e)
"Error executing skill: ${e.message}"
}
}
}