diff --git a/app/build.gradle.kts b/app/build.gradle.kts
index 788aa7b..e3eb113 100644
--- a/app/build.gradle.kts
+++ b/app/build.gradle.kts
@@ -89,6 +89,9 @@ dependencies {
// Llama.cpp Kotlin Multiplatform Wrapper
implementation("com.llamatik:library:0.8.1")
+ // Google AI Edge - MediaPipe LLM Inference API
+ implementation("com.google.mediapipe:tasks-genai:0.10.27")
+
// Extended Material Icons (for Download, CheckCircle, etc.)
implementation("androidx.compose.material:material-icons-extended")
}
\ No newline at end of file
diff --git a/app/src/main/AndroidManifest.xml b/app/src/main/AndroidManifest.xml
index 22b893b..22d0308 100644
--- a/app/src/main/AndroidManifest.xml
+++ b/app/src/main/AndroidManifest.xml
@@ -4,6 +4,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/app/src/main/java/net/mmanningau/alice/AliceAccessibilityService.kt b/app/src/main/java/net/mmanningau/alice/AliceAccessibilityService.kt
new file mode 100644
index 0000000..ec0135c
--- /dev/null
+++ b/app/src/main/java/net/mmanningau/alice/AliceAccessibilityService.kt
@@ -0,0 +1,21 @@
+package net.mmanningau.alice
+
+import android.accessibilityservice.AccessibilityService
+import android.view.accessibility.AccessibilityEvent
+import android.util.Log
+
+class AliceAccessibilityService : AccessibilityService() {
+
+ override fun onServiceConnected() {
+ super.onServiceConnected()
+ Log.d("AliceAccessibility", "Service Connected and Ready!")
+ }
+
+ override fun onAccessibilityEvent(event: AccessibilityEvent?) {
+ // We will build the screen-reading logic here later!
+ }
+
+ override fun onInterrupt() {
+ Log.e("AliceAccessibility", "Service Interrupted")
+ }
+}
\ No newline at end of file
diff --git a/app/src/main/java/net/mmanningau/alice/LlmManager.kt b/app/src/main/java/net/mmanningau/alice/LlmManager.kt
index 866823f..d0a7f39 100644
--- a/app/src/main/java/net/mmanningau/alice/LlmManager.kt
+++ b/app/src/main/java/net/mmanningau/alice/LlmManager.kt
@@ -1,5 +1,7 @@
package net.mmanningau.alice
+import android.content.Context
+import android.util.Log
import dev.langchain4j.data.message.AiMessage
import dev.langchain4j.data.message.ChatMessage
import dev.langchain4j.data.message.SystemMessage
@@ -11,13 +13,17 @@ import java.time.Duration
import java.text.SimpleDateFormat
import java.util.Date
import java.util.Locale
-import android.content.Context
+import kotlinx.coroutines.flow.MutableStateFlow
+import kotlinx.coroutines.flow.StateFlow
object LlmManager {
private var chatModel: ChatLanguageModel? = null
var currentMode: String = "Remote"
private set
+ // Hardware telemetry for the UI
+ private val _hardwareBackend = MutableStateFlow("Standby")
+ val hardwareBackend: StateFlow = _hardwareBackend
// Database tracking
private var chatDao: ChatDao? = null
@@ -29,7 +35,7 @@ object LlmManager {
// Initialization now makes the dao optional so the UI can safely call it!
fun initialize(
- context: Context,dao: ChatDao?, mode: String, url: String, modelName: String, apiKey: String, systemPrompt: String
+ context: Context, dao: ChatDao?, mode: String, url: String, modelName: String, apiKey: String, systemPrompt: String
) {
// Only update the DAO if one was passed in (like on app boot)
if (dao != null) {
@@ -52,9 +58,36 @@ object LlmManager {
.logResponses(true)
.build()
} else if (mode == "Local") {
- // NEW: Grab the absolute path from the registry and boot the middleman!
+ // Grab the absolute path from the registry
val fullPath = ModelRegistry.getModelPath(context, modelName)
- chatModel = LlamaCppAdapter(fullPath)
+
+ // NEW: The Switchboard! Route the path to the correct engine based on file type
+ when {
+ fullPath.endsWith(".task") || fullPath.endsWith(".litertlm") -> {
+ Log.d("AliceEngine", "Routing to MediaPipe Engine (Formula 1 Mode)")
+ // Reset to standby when a new model is selected
+ _hardwareBackend.value = "Standby"
+
+ chatModel = MediaPipeAdapter(context, fullPath) { systemEvent ->
+ // Intercept the hardware broadcast
+ if (systemEvent.startsWith("HARDWARE_STATE:")) {
+ _hardwareBackend.value = systemEvent.removePrefix("HARDWARE_STATE:").trim()
+ } else {
+ Log.w("AliceSystem", systemEvent)
+ }
+ }
+ }
+ fullPath.endsWith(".gguf") -> {
+ Log.d("AliceEngine", "Routing to Llama.cpp Engine (Flexible Mode)")
+ // Llama.cpp manages its own Vulkan backend, so we just label it
+ _hardwareBackend.value = "Vulkan"
+ chatModel = LlamaCppAdapter(fullPath)
+ }
+ else -> {
+ Log.e("AliceEngine", "Unsupported model file extension: $fullPath")
+ chatModel = null
+ }
+ }
}
// Database Startup Logic
@@ -105,7 +138,7 @@ object LlmManager {
}
fun chat(userText: String): String {
- if (currentMode == "Local" && chatModel == null) return "System: Llamatik Ollama On-Device engine is selected but not yet installed."
+ if (currentMode == "Local" && chatModel == null) return "System: Local engine is selected but not properly initialized or unsupported file format."
val currentModel = chatModel ?: return "Error: LLM engine not initialized."
// If the history size is 1, it means only the System prompt exists. This is the first message!
diff --git a/app/src/main/java/net/mmanningau/alice/MainActivity.kt b/app/src/main/java/net/mmanningau/alice/MainActivity.kt
index 4f2385f..1e6ff71 100644
--- a/app/src/main/java/net/mmanningau/alice/MainActivity.kt
+++ b/app/src/main/java/net/mmanningau/alice/MainActivity.kt
@@ -50,6 +50,8 @@ class MainActivity : ComponentActivity() {
fun MainChatScreen() {
val drawerState = rememberDrawerState(initialValue = DrawerValue.Closed)
val scope = rememberCoroutineScope()
+ // Observe the live hardware state
+ val hardwareBackend by LlmManager.hardwareBackend.collectAsState()
var currentScreen by remember { mutableStateOf("Chat") }
var inputText by remember { mutableStateOf("") }
@@ -135,7 +137,29 @@ fun MainChatScreen() {
Scaffold(
topBar = {
TopAppBar(
- title = { Text("Alice Agent") },
+ title = {
+ Row(verticalAlignment = Alignment.CenterVertically) {
+ Text("Alice Agent")
+
+ // The Live Hardware Telemetry Badge
+ if (hardwareBackend != "Standby") {
+ Spacer(modifier = Modifier.width(8.dp))
+ Surface(
+ color = if (hardwareBackend == "GPU") MaterialTheme.colorScheme.primary
+ else MaterialTheme.colorScheme.error,
+ shape = RoundedCornerShape(4.dp)
+ ) {
+ Text(
+ text = hardwareBackend,
+ style = MaterialTheme.typography.labelSmall,
+ color = if (hardwareBackend == "GPU") MaterialTheme.colorScheme.onPrimary
+ else MaterialTheme.colorScheme.onError,
+ modifier = Modifier.padding(horizontal = 6.dp, vertical = 2.dp)
+ )
+ }
+ }
+ }
+ },
navigationIcon = {
IconButton(onClick = { scope.launch { drawerState.open() } }) {
Icon(Icons.Default.Menu, contentDescription = "Menu")
@@ -519,7 +543,10 @@ fun ModelManagerScreen(onBackClicked: () -> Unit) {
// Launch the background download
scope.launch {
- ModelDownloader.downloadModel(context, model.downloadUrl, model.fileName)
+ // Grab the Hugging Face token from the API Key settings field
+ val hfToken = prefs.getString("apiKey", "") ?: ""
+
+ ModelDownloader.downloadModel(context, model.downloadUrl, model.fileName, hfToken)
.collect { progress ->
downloadProgress[model.id] = progress
}
diff --git a/app/src/main/java/net/mmanningau/alice/MediaPipeAdapter.kt b/app/src/main/java/net/mmanningau/alice/MediaPipeAdapter.kt
new file mode 100644
index 0000000..81695df
--- /dev/null
+++ b/app/src/main/java/net/mmanningau/alice/MediaPipeAdapter.kt
@@ -0,0 +1,175 @@
+package net.mmanningau.alice
+
+import android.content.Context
+import android.util.Log
+import com.google.mediapipe.tasks.genai.llminference.LlmInference
+import dev.langchain4j.agent.tool.ToolExecutionRequest
+import dev.langchain4j.agent.tool.ToolSpecification
+import dev.langchain4j.data.message.AiMessage
+import dev.langchain4j.data.message.ChatMessage
+import dev.langchain4j.data.message.SystemMessage
+import dev.langchain4j.data.message.ToolExecutionResultMessage
+import dev.langchain4j.data.message.UserMessage
+import dev.langchain4j.model.chat.ChatLanguageModel
+import dev.langchain4j.model.output.Response
+import org.json.JSONObject
+import java.io.File
+import java.util.UUID
+
+class MediaPipeAdapter(
+ private val context: Context,
+ private val modelPath: String,
+ private val onSystemEvent: (String) -> Unit // Flexible routing for UI notifications & Accessibility
+) : ChatLanguageModel {
+
+ private var engine: LlmInference? = null
+
+ private fun getOrInitEngine(): LlmInference {
+ if (engine == null) {
+ val modelFile = File(modelPath)
+ if (!modelFile.exists()) {
+ throw IllegalStateException("Task file not found: $modelPath. Please download it.")
+ }
+
+ try {
+ // THE PUSH: Aggressively demand the Adreno GPU
+ val gpuOptions = LlmInference.LlmInferenceOptions.builder()
+ .setModelPath(modelPath)
+ .setMaxTokens(4096)
+ .setPreferredBackend(LlmInference.Backend.GPU)
+ .build()
+ engine = LlmInference.createFromOptions(context, gpuOptions)
+ Log.d("AliceEngine", "Formula 1 Mode: GPU Initialized successfully.")
+ // Broadcast the successful hardware lock!
+ onSystemEvent("HARDWARE_STATE: GPU")
+
+ } catch (e: Exception) {
+ // THE FALLBACK: If GPU fails, notify the UI and drop to CPU
+ Log.e("AliceEngine", "GPU Initialization failed: ${e.message}")
+ onSystemEvent("Hardware Notice: GPU not supported for this model. Falling back to CPU. Generation will be slower and consume more battery.")
+
+ val cpuOptions = LlmInference.LlmInferenceOptions.builder()
+ .setModelPath(modelPath)
+ .setMaxTokens(4096)
+ .setPreferredBackend(LlmInference.Backend.CPU)
+ .build()
+ engine = LlmInference.createFromOptions(context, cpuOptions)
+ // Broadcast the fallback
+ onSystemEvent("HARDWARE_STATE: CPU")
+ }
+ }
+ return engine!!
+ }
+
+ override fun generate(messages: List): Response {
+ return generate(messages, emptyList())
+ }
+
+ override fun generate(
+ messages: List,
+ toolSpecifications: List
+ ): Response {
+ val activeEngine = getOrInitEngine()
+ val promptBuilder = java.lang.StringBuilder()
+
+ // 1. The Strict Negative-Constraint Schema
+ val toolsPrompt = java.lang.StringBuilder()
+ if (toolSpecifications.isNotEmpty()) {
+ toolsPrompt.append("\n\n# AVAILABLE TOOLS\n")
+ for (tool in toolSpecifications) {
+ toolsPrompt.append("- ${tool.name()}: ${tool.description()} | Params: ${tool.parameters()?.toString() ?: "{}"}\n")
+ }
+ toolsPrompt.append("\nCRITICAL RULES:\n")
+ toolsPrompt.append("1. NEVER guess or fabricate data (like battery levels, IP addresses, or network latency). You MUST use a tool to fetch real data.\n")
+ toolsPrompt.append("2. Do NOT invent your own syntax. You must use ONLY the exact JSON format below.\n")
+ toolsPrompt.append("3. To execute a tool, reply with ONLY this JSON object and absolutely no other text:\n")
+ toolsPrompt.append("{\"name\": \"\", \"arguments\": {}}\n")
+ }
+
+ // 2. Format Chat History using GEMMA 3 TAGS (Merging System into User)
+ var isFirstUserMessage = true
+
+ for (message in messages) {
+ when (message) {
+ is SystemMessage -> {
+ // IGNORE: We do not append a 'system' tag because Gemma 3 doesn't support it.
+ // We already built the toolsPrompt string in Step 1, so we just hold it.
+ }
+ is UserMessage -> {
+ if (isFirstUserMessage) {
+ // Merge the draconian tools prompt and the user's first message into one block
+ promptBuilder.append("user\n${toolsPrompt.toString()}\n\n${message.text()}\n")
+ isFirstUserMessage = false
+ } else {
+ promptBuilder.append("user\n${message.text()}\n")
+ }
+ }
+ is ToolExecutionResultMessage -> {
+ promptBuilder.append("user\n[SYSTEM DATA VIA TOOL '${message.toolName()}']: ${message.text()}\nUse this real data to answer the previous question.\n")
+ }
+ is AiMessage -> {
+ if (message.hasToolExecutionRequests()) {
+ val request = message.toolExecutionRequests()[0]
+ promptBuilder.append("model\n{\"name\": \"${request.name()}\", \"arguments\": ${request.arguments()}}\n")
+ } else {
+ val cleanText = message.text()?.replace(Regex("Calling tool:.*?\\.\\.\\."), "")?.trim() ?: ""
+ if (cleanText.isNotBlank()) {
+ promptBuilder.append("model\n$cleanText\n")
+ }
+ }
+ }
+ }
+ }
+ promptBuilder.append("model\n")
+
+ // 3. Execution on MediaPipe
+ val rawResponse = activeEngine.generateResponse(promptBuilder.toString())
+ Log.d("AliceEngine", "RAW_RESPONSE_LENGTH: ${rawResponse.length}")
+ Log.d("AliceEngine", "RAW_RESPONSE: $rawResponse")
+ Log.d("AliceEngine", "Engine state after gen - messages count: ${messages.size}")
+ val responseText = rawResponse.replace("", "").trim()
+
+ // Strip the markdown code blocks if Gemma adds them
+ val cleanText = responseText.replace(Regex("```(?:json)?"), "").replace("```", "").trim()
+
+ // 4. The Bulletproof Regex JSON Parser
+ if (toolSpecifications.isNotEmpty()) {
+ // Hunt directly for the tool name, bypassing strict JSON validation
+ val nameRegex = Regex("\"name\"\\s*:\\s*\"([^\"]+)\"")
+ val match = nameRegex.find(cleanText)
+
+ if (match != null) {
+ val toolName = match.groupValues[1]
+
+ // Safely attempt to grab arguments. If they are hallucinated garbage, default to {}
+ var argumentsJson = "{}"
+ // Old regex - pre Claude ..... val argRegex = Regex("\"arguments\"\\s*:\\s*(\\{.*?\\})")
+ val argRegex = Regex("\"arguments\"\\s*:\\s*(\\{.*?\\})", RegexOption.DOT_MATCHES_ALL)
+
+ val argMatch = argRegex.find(cleanText)
+
+ if (argMatch != null) {
+ val foundArgs = argMatch.groupValues[1]
+ try {
+ // Test if the args are valid JSON
+ JSONObject(foundArgs)
+ argumentsJson = foundArgs
+ } catch (e: Exception) {
+ // It was garbage (like the infinite 7777s). Keep the "{}" default.
+ Log.w("AliceEngine", "Discarded malformed arguments: $foundArgs")
+ }
+ }
+
+ val request = ToolExecutionRequest.builder()
+ .id(UUID.randomUUID().toString())
+ .name(toolName)
+ .arguments(argumentsJson)
+ .build()
+
+ return Response.from(AiMessage.from("Calling tool: $toolName...", listOf(request)))
+ }
+ }
+
+ return Response.from(AiMessage(cleanText))
+ }
+}
\ No newline at end of file
diff --git a/app/src/main/java/net/mmanningau/alice/ModelDownloader.kt b/app/src/main/java/net/mmanningau/alice/ModelDownloader.kt
index 342bbe4..b1ce988 100644
--- a/app/src/main/java/net/mmanningau/alice/ModelDownloader.kt
+++ b/app/src/main/java/net/mmanningau/alice/ModelDownloader.kt
@@ -12,7 +12,7 @@ import java.io.File
object ModelDownloader {
- fun downloadModel(context: Context, url: String, fileName: String): Flow = flow {
+ fun downloadModel(context: Context, url: String, fileName: String, hfToken: String = ""): Flow = flow {
val downloadManager = context.getSystemService(Context.DOWNLOAD_SERVICE) as DownloadManager
// Ensure the directory exists
@@ -22,10 +22,14 @@ object ModelDownloader {
.setTitle(fileName)
.setDescription("Downloading AI Model for Alice...")
.setNotificationVisibility(DownloadManager.Request.VISIBILITY_VISIBLE_NOTIFY_COMPLETED)
- // Save it directly into our app's specific Models folder
.setDestinationUri(Uri.fromFile(File(modelsDir, fileName)))
.setAllowedOverMetered(true) // Allow cellular downloads
+ // THE FIX: Inject the Hugging Face Authorization header so we bypass the gate
+ if (hfToken.isNotBlank()) {
+ request.addRequestHeader("Authorization", "Bearer $hfToken")
+ }
+
val downloadId = downloadManager.enqueue(request)
var finishDownload = false
var progress = 0
diff --git a/app/src/main/java/net/mmanningau/alice/ModelRegistry.kt b/app/src/main/java/net/mmanningau/alice/ModelRegistry.kt
index 254937b..9aa7250 100644
--- a/app/src/main/java/net/mmanningau/alice/ModelRegistry.kt
+++ b/app/src/main/java/net/mmanningau/alice/ModelRegistry.kt
@@ -46,6 +46,30 @@ object ModelRegistry {
fileName = "qwen2.5-3b-instruct-q4_k_m.gguf",
downloadUrl = "https://huggingface.co/Qwen/Qwen2.5-3B-Instruct-GGUF/resolve/main/qwen2.5-3b-instruct-q4_k_m.gguf",
sizeMb = 2020
+ ),
+ LocalModel(
+ id = "gemma3-1b",
+ name = "Gemma 3 (1B)",
+ sizeMb = 555, // Update these sizes based on the exact HuggingFace .task file
+ description = "Google's highly optimized mobile intelligence. Best balance of speed and reasoning.",
+ fileName = "gemma-3-1b-it.task",
+ downloadUrl = "https://huggingface.co/litert-community/Gemma3-1B-IT/resolve/main/gemma3-1b-it-int4.task" // Update with exact raw URL
+ ),
+ LocalModel(
+ id = "gemma3n-e2b",
+ name = "Gemma 3n (E2B)",
+ sizeMb = 3600,
+ description = "Elastic architecture. Activates fewer parameters for battery efficiency while maintaining high logic.",
+ fileName = "gemma-3n-e2b-it.task",
+ downloadUrl = "https://huggingface.co/google/gemma-3n-E2B-it-litert-lm/resolve/main/gemma-3n-E2B-it-int4.litertlm"
+ ),
+ LocalModel(
+ id = "Gemma3-1B-IT_multi-prefill-seq_q8_ekv4096",
+ name = "Gemma 3 (1B) Prefill",
+ sizeMb = 3390,
+ description = "A highly optimised and fine tuned model for agentic tasks and function calling.",
+ fileName = "Gemma3-1B-IT_multi-prefill-seq_q8_ekv4096.task",
+ downloadUrl = "https://huggingface.co/litert-community/Gemma3-1B-IT/resolve/main/Gemma3-1B-IT_multi-prefill-seq_q8_ekv4096.task"
)
)
diff --git a/app/src/main/res/values/strings.xml b/app/src/main/res/values/strings.xml
index 154744c..610e393 100644
--- a/app/src/main/res/values/strings.xml
+++ b/app/src/main/res/values/strings.xml
@@ -1,3 +1,4 @@
Alice
+ Alice Screen Reader Service
\ No newline at end of file
diff --git a/app/src/main/res/xml/accessibility_service_config.xml b/app/src/main/res/xml/accessibility_service_config.xml
new file mode 100644
index 0000000..a0c72ce
--- /dev/null
+++ b/app/src/main/res/xml/accessibility_service_config.xml
@@ -0,0 +1,7 @@
+
+
\ No newline at end of file