added llm endpoint calling logic

This commit is contained in:
SatyamSah5
2025-09-04 11:49:27 +05:30
parent 4ca4f2cc00
commit f1be2a07c6

View File

@@ -128,7 +128,7 @@ public class AISummaryActionExecuter extends ActionExecuterAbstractBase
ContentReader txtReader = tempWriter.getReader(); ContentReader txtReader = tempWriter.getReader();
try (InputStream is = txtReader.getContentInputStream()) { try (InputStream is = txtReader.getContentInputStream()) {
String textString = new String(is.readAllBytes()); String textString = new String(is.readAllBytes());
String aiResult = sendToAIEndpoint(is); String aiResult = sendToAIEndpoint(textString);
QName AI_SUMMARY_PROP = QName.createQName(CONTENT_MODEL_1_0_URI, "AiSummary"); QName AI_SUMMARY_PROP = QName.createQName(CONTENT_MODEL_1_0_URI, "AiSummary");
nodeService.setProperty(actionedUponNodeRef, AI_SUMMARY_PROP, aiResult); nodeService.setProperty(actionedUponNodeRef, AI_SUMMARY_PROP, aiResult);
// Optionally, store or log the result // Optionally, store or log the result
@@ -141,44 +141,37 @@ public class AISummaryActionExecuter extends ActionExecuterAbstractBase
* Placeholder for sending content to an AI endpoint. * Placeholder for sending content to an AI endpoint.
* Implement actual HTTP call or integration as needed. * Implement actual HTTP call or integration as needed.
*/ */
private String sendToAIEndpoint(InputStream txtContent) throws Exception { private String sendToAIEndpoint(String txtContent) throws Exception {
// Example: read content, send to AI, return result
// Implement actual HTTP client logic here
// Read input stream to string // Read input stream to string
// String inputText = new String(txtContent.readAllBytes(), StandardCharsets.UTF_8);
//
// // Prepare JSON payload // Build JSON payload
// String payload = "{ \"inputs\": " + escapeJson(inputText) + " }"; String payload = "{"
// + "\"context\": " + escapeJson(txtContent) + ","
// // Create connection + "\"prompt\": \"provide concise summary\""
// URL url = new URL("https://api-inference.huggingface.co/models/gpt2"); + "}";
// HttpURLConnection conn = (HttpURLConnection) url.openConnection();
// conn.setRequestMethod("POST"); // Create connection
// conn.setRequestProperty("Authorization", "Bearer " + HUGGING_FACE_TOKEN); URL url = new URL("http://alfresco-llm-ai:5000/api/respond");
// conn.setRequestProperty("Content-Type", "application/json"); HttpURLConnection conn = (HttpURLConnection) url.openConnection();
// conn.setDoOutput(true); conn.setRequestMethod("POST");
// conn.setRequestProperty("Content-Type", "application/json");
// // Send request conn.setDoOutput(true);
// try (OutputStream os = conn.getOutputStream()) {
// os.write(payload.getBytes(StandardCharsets.UTF_8)); // Send request
// } try (OutputStream os = conn.getOutputStream()) {
// os.write(payload.getBytes(StandardCharsets.UTF_8));
// // Read response }
// int status = conn.getResponseCode();
// InputStream responseStream = (status >= 200 && status < 300) ? conn.getInputStream() : conn.getErrorStream(); // Read response
// String response = new String(responseStream.readAllBytes(), StandardCharsets.UTF_8); int status = conn.getResponseCode();
// InputStream responseStream = (status >= 200 && status < 300) ? conn.getInputStream() : conn.getErrorStream();
// // Simple extraction of generated text (not robust, for demo) String jsonResponse = new String(responseStream.readAllBytes(), StandardCharsets.UTF_8);
// int start = response.indexOf("\"generated_text\":\"");
// if (start != -1) { // Parse JSON and extract the "response" field
// start += 18; org.json.JSONObject obj = new org.json.JSONObject(jsonResponse);
// int end = response.indexOf("\"", start); String summary = obj.optString("response", "");
// if (end != -1) { return summary.trim();
// return response.substring(start, end);
// }
// }
// return "AI summary unavailable";
return "AI summary or insights result";
} }
// Helper to escape JSON string // Helper to escape JSON string