diff --git a/config.org b/config.org index 519160e..f282cf8 100644 --- a/config.org +++ b/config.org @@ -64,6 +64,9 @@ - [[#notifications-1][Notifications]] - [[#rainbow-mode][RAINBOW MODE]] - [[#ai][AI]] + - [[#ellama][Ellama]] + - [[#gptel][GPT.el]] + - [[#minuet][Minuet]] - [[#shells-and-terminals][SHELLS AND TERMINALS]] - [[#eshell][Eshell]] - [[#vterm][Vterm]] @@ -1133,6 +1136,7 @@ Display the actual color as a background for any hex color value (ex. #ffffff). ((org-mode prog-mode) . rainbow-mode)) #+end_src * AI +** Ellama #+begin_src emacs-lisp (use-package ellama :ensure t @@ -1144,9 +1148,72 @@ Display the actual color as a background for any hex color value (ex. #ffffff). (setopt ellama-language "Russian") :config ;; show ellama context in header line in all buffers - (ellama-context-header-line-global-mode +1) + ;;(ellama-context-header-line-global-mode +1) ;; show ellama session id in header line in all buffers - (ellama-session-header-line-global-mode +1)) + ;;(ellama-session-header-line-global-mode +1)) +#+end_src +** GPT.el +#+begin_src emacs-lisp + (use-package gptel + :ensure t + :bind + ("C-c a c" . gptel) + ("C-c a r" . gptel-rewrite) + ("C-c a s" . gptel-send) + ("C-c a f" . gptel-add-file)) + (setq + gptel-model 'llama3.1 + gptel-backend (gptel-make-ollama "Ollama" + :host "localhost:11434" + :stream t + :models '(llama3.1)) + gptel-track-media t + gptel-default-mode 'org-mode) +(add-hook 'gptel-post-stream-hook 'gptel-auto-scroll) +(add-hook 'gptel-post-response-functions 'gptel-end-of-response) +#+end_src +** Minuet +#+begin_src emacs-lisp + (use-package minuet + :ensure t + :bind + (("M-y" . #'minuet-complete-with-minibuffer) ;; use minibuffer for completion + ("M-i" . #'minuet-show-suggestion) ;; use overlay for completion + ("C-c m" . #'minuet-configure-provider) + :map minuet-active-mode-map + ;; These keymaps activate only when a minuet suggestion is displayed in the current buffer + ("M-p" . #'minuet-previous-suggestion) ;; invoke completion or cycle to next completion + ("M-n" . #'minuet-next-suggestion) ;; invoke completion or cycle to previous completion + ("M-A" . #'minuet-accept-suggestion) ;; accept whole completion + ;; Accept the first line of completion, or N lines with a numeric-prefix: + ;; e.g. C-u 2 M-a will accepts 2 lines of completion. + ("M-a" . #'minuet-accept-suggestion-line) + ("M-e" . #'minuet-dismiss-suggestion)) + + :init + ;; if you want to enable auto suggestion. + ;; Note that you can manually invoke completions without enable minuet-auto-suggestion-mode + (add-hook 'prog-mode-hook #'minuet-auto-suggestion-mode) + + :config + ;; You can use M-x minuet-configure-provider to interactively configure provider and model + (setq minuet-provider 'openai-fim-compatible) + (setq minuet-n-completions 1) ; recommended for Local LLM for resource saving + ;; I recommend beginning with a small context window size and incrementally + ;; expanding it, depending on your local computing power. A context window + ;; of 512, serves as an good starting point to estimate your computing + ;; power. Once you have a reliable estimate of your local computing power, + ;; you should adjust the context window to a larger value. + (setq minuet-context-window 512) + (plist-put minuet-openai-fim-compatible-options :end-point "http://localhost:11434/v1/completions") + ;; an arbitrary non-null environment variable as placeholder. + ;; For Windows users, TERM may not be present in environment variables. + ;; Consider using APPDATA instead. + (plist-put minuet-openai-fim-compatible-options :name "Ollama") + (plist-put minuet-openai-fim-compatible-options :api-key "TERM") + (plist-put minuet-openai-fim-compatible-options :model "llama3.1") + + (minuet-set-optional-options minuet-openai-fim-compatible-options :max_tokens 56)) #+end_src * SHELLS AND TERMINALS ** Eshell