From 0e595ea428ffc753c6ebcaaad249c91d1b3d9969 Mon Sep 17 00:00:00 2001 From: KyMAN <0kyman0@gmail.com> Date: Mon, 27 Apr 2026 07:04:14 +0200 Subject: [PATCH] #wip: Testing the base ollama. --- .gitignore | 2 + AIChat.py | 53 ++++++++++++++++ README.md | 27 +++++++- docker-compose.yml | 25 ++++++++ navecitas.html | 150 +++++++++++++++++++++++++++++++++++++++++++++ navecitas.v2.html | 134 ++++++++++++++++++++++++++++++++++++++++ version | 1 + 7 files changed, 391 insertions(+), 1 deletion(-) create mode 100644 .gitignore create mode 100644 AIChat.py create mode 100644 docker-compose.yml create mode 100644 navecitas.html create mode 100644 navecitas.v2.html create mode 100644 version diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e3dd274 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +/ollama +/open-webui \ No newline at end of file diff --git a/AIChat.py b/AIChat.py new file mode 100644 index 0000000..13bf15e --- /dev/null +++ b/AIChat.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +from typing import Self, Any +from threading import Thread +from requests import post as Post, Response +from json import loads as json_decode + +class AIChat: + + PAYLOAD:dict[str, str|bool] = { + "model" : "gemma", + "stream" : True + } + + def __init__(self:Self) -> None: + self.__working:bool = True + self.__thread:Thread = Thread(target=self.__listener) + self.__thread.start() + + def send(self:Self, message:str) -> str: + try: + + response:Response + + with Post('http://localhost:11434/api/chat', json = {**self.PAYLOAD, "prompt" : message}) as response: + + line:bytes + + for line in response.iter_lines(): + if line: + + chunk:dict[str, Any|None] = json_decode(line) + + print(chunk.get("response", ""), end = "", flush = True) + + if chunk.get("done"): + break + + except Exception as exception: + print(f"An error occurred while sending the message: {exception}") + + def __listener(self:Self) -> None: + while self.__working: + + user_input:str = input('> ') + + if user_input in ("close", "exit", "quit"): + self.__working = False + else: + self.send(user_input) + +ai_chat = AIChat() \ No newline at end of file diff --git a/README.md b/README.md index 567e69e..fd75c9f 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,28 @@ # AIDockerCompose -Ecosistema de intérpretes y modelos de IAs vía Docker Compose. \ No newline at end of file +Ecosistema de intérpretes y modelos de IAs vía Docker Compose. + +# Notas + +```sh +#!/bin/bash + +# Descarga de modelos. +docker exec -it ollama ollama pull gemma + +# Descarga y prueba por terminal. +docker exec -it ollama ollama run gemma + +``` + +Modelos: + +- gemma - 5GB +- gemma3 - 3.3GB + +# Referencias + +- https://hub.docker.com/r/ollama/ollama +- https://github.com/ollama/ollama +- https://ollama.com/library +- https://ollama.com/steamdj/llama3.1-cpu-only \ No newline at end of file diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..6b8f4cf --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,25 @@ +services: + ollama: + image: ollama/ollama:latest + container_name: ollama + volumes: + - ./ollama:/root/.ollama + ports: + - 11434:11434/tcp + pull_policy: always + tty: true + restart: unless-stopped + + # open-webui: + # image: ghcr.io/open-webui/open-webui:main + # container_name: open-webui + # volumes: + # - ./open-webui:/app/backend/data + # depends_on: + # - ollama + # ports: + # - 3000:8080/tcp + # environment: + # - 'OLLAMA_BASE_URL=http://ollama:11434' + # - 'WEBUI_SECRET_KEY=tu_clave_secreta_aqui' + # restart: unless-stopped \ No newline at end of file diff --git a/navecitas.html b/navecitas.html new file mode 100644 index 0000000..0ab5074 --- /dev/null +++ b/navecitas.html @@ -0,0 +1,150 @@ + + +
+