Check in before major engine ovberhaul to use the Google Mediapipe LLM framework.

This commit is contained in:
2026-03-02 10:05:08 +11:00
parent d4322740e2
commit e703df9ec1
2 changed files with 84 additions and 32 deletions

View File

@@ -4,10 +4,10 @@
<selectionStates>
<SelectionState runConfigName="app">
<option name="selectionMode" value="DROPDOWN" />
<DropdownSelection timestamp="2026-02-28T03:12:27.182833316Z">
<DropdownSelection timestamp="2026-02-28T04:08:30.769945596Z">
<Target type="DEFAULT_BOOT">
<handle>
<DeviceId pluginId="PhysicalDevice" identifier="serial=461ed66e" />
<DeviceId pluginId="LocalEmulator" identifier="path=/home/michael/.android/avd/Pixel_8_API_35.avd" />
</handle>
</Target>
</DropdownSelection>

View File

@@ -1,15 +1,18 @@
package net.mmanningau.alice
import com.llamatik.library.platform.LlamaBridge // The correct Llamatik import!
import com.llamatik.library.platform.LlamaBridge
import dev.langchain4j.agent.tool.ToolExecutionRequest
import dev.langchain4j.agent.tool.ToolSpecification
import dev.langchain4j.data.message.AiMessage
import dev.langchain4j.data.message.ChatMessage
import dev.langchain4j.data.message.SystemMessage
import dev.langchain4j.data.message.ToolExecutionResultMessage
import dev.langchain4j.data.message.UserMessage
import dev.langchain4j.model.chat.ChatLanguageModel
import dev.langchain4j.model.output.Response
import org.json.JSONObject
import java.io.File
import dev.langchain4j.agent.tool.ToolSpecification
import java.util.UUID
class LlamaCppAdapter(private val modelPath: String) : ChatLanguageModel {
@@ -21,43 +24,92 @@ class LlamaCppAdapter(private val modelPath: String) : ChatLanguageModel {
if (!modelFile.exists()) {
throw IllegalStateException("Model file not found at: $modelPath. Please download a model first.")
}
// Boot the native C++ backend via Llamatik's Kotlin bridge
LlamaBridge.initGenerateModel(modelPath)
isEngineLoaded = true
}
}
override fun generate(messages: List<ChatMessage>): Response<AiMessage> {
getOrInitEngine()
// 1. Translation IN: Format specifically for Qwen 2.5 (ChatML)
val promptBuilder = java.lang.StringBuilder()
for (message in messages) {
when (message) {
is SystemMessage -> promptBuilder.append("<|im_start|>system\n${message.text()}<|im_end|>\n")
is UserMessage -> promptBuilder.append("<|im_start|>user\n${message.text()}<|im_end|>\n")
is AiMessage -> promptBuilder.append("<|im_start|>assistant\n${message.text()}<|im_end|>\n")
}
}
// Prompt the AI to start generating its response
promptBuilder.append("<|im_start|>assistant\n")
// 2. Execution: Run it on the local hardware using Llamatik
val responseText = LlamaBridge.generate(promptBuilder.toString())
// 3. Translation OUT: Clean up any trailing ChatML tags the engine might leave behind
val cleanResponse = responseText.replace("<|im_end|>", "").trim()
return Response.from(AiMessage(cleanResponse))
return generate(messages, emptyList())
}
// This catches LangChain4j when it tries to send tools to our local engine
// Advanced generator (With tools)
override fun generate(
messages: List<ChatMessage>,
toolSpecifications: List<ToolSpecification>
): Response<AiMessage> {
// For Phase 1, we simply ignore the tools and route it to our standard text generator
// so we can prove the local GPU engine is successfully generating tokens!
return generate(messages)
getOrInitEngine()
val promptBuilder = java.lang.StringBuilder()
// 1. Build the Fool-Proof System Prompt
val toolsPrompt = java.lang.StringBuilder()
if (toolSpecifications.isNotEmpty()) {
toolsPrompt.append("\n\n# AVAILABLE TOOLS\n")
for (tool in toolSpecifications) {
toolsPrompt.append("- ${tool.name()}: ${tool.description()}\n")
}
toolsPrompt.append("\nCRITICAL INSTRUCTION: To use a tool, you MUST reply with a JSON object. Do NOT use Python parentheses.\n")
toolsPrompt.append("Correct Example:\n{\"name\": \"get_battery_level\", \"arguments\": {}}\n")
}
// 2. Format the Chat History
for (message in messages) {
when (message) {
is SystemMessage -> {
val content = message.text() + toolsPrompt.toString()
promptBuilder.append("<|im_start|>system\n$content<|im_end|>\n")
}
is UserMessage -> promptBuilder.append("<|im_start|>user\n${message.text()}<|im_end|>\n")
is ToolExecutionResultMessage -> {
promptBuilder.append("<|im_start|>user\nTool [${message.toolName()}] returned: ${message.text()}<|im_end|>\n")
}
is AiMessage -> {
if (message.hasToolExecutionRequests()) {
val request = message.toolExecutionRequests()[0]
promptBuilder.append("<|im_start|>assistant\n{\"name\": \"${request.name()}\", \"arguments\": ${request.arguments()}}<|im_end|>\n")
} else {
val cleanText = message.text()?.replace(Regex("Calling tool:.*?\\.\\.\\."), "")?.trim() ?: ""
if (cleanText.isNotBlank()) {
promptBuilder.append("<|im_start|>assistant\n$cleanText<|im_end|>\n")
}
}
}
}
}
promptBuilder.append("<|im_start|>assistant\n")
// 3. Execution
val responseText = LlamaBridge.generate(promptBuilder.toString()).replace("<|im_end|>", "").trim()
// 4. Parse the Output (Regex Hunter)
if (toolSpecifications.isNotEmpty()) {
// Hunt for the pattern "name": "something" regardless of surrounding brackets
val nameRegex = Regex("\"name\"\\s*:\\s*\"([^\"]+)\"")
val match = nameRegex.find(responseText.replace("'", "\"")) // Sanitize single quotes first
if (match != null) {
val toolName = match.groupValues[1]
// Try to extract arguments if they exist, otherwise default to empty JSON
var argumentsJson = "{}"
val argRegex = Regex("\"arguments\"\\s*:\\s*(\\{.*?\\})")
val argMatch = argRegex.find(responseText.replace("'", "\""))
if (argMatch != null) {
argumentsJson = argMatch.groupValues[1]
}
val request = ToolExecutionRequest.builder()
.id(UUID.randomUUID().toString())
.name(toolName)
.arguments(argumentsJson)
.build()
return Response.from(AiMessage.from("Calling tool: $toolName...", listOf(request)))
}
}
// 5. Standard Text Output
return Response.from(AiMessage(responseText))
}
}