Compare commits

...

5 Commits

Author SHA1 Message Date
sealad886 126de8c0e9
Merge 1b1e6c9546 into 6e7ac0544e 2025-02-19 12:14:33 +00:00
sealad886 1b1e6c9546 trying to get flags to work. some movement, but not quite there yet. 2025-01-27 15:25:01 +00:00
sealad886 d94d2bf8d1 alias stuff removed 2025-01-27 13:08:12 +00:00
sealad886 7e4b330e98 Took out the manpage parts. 2025-01-27 09:49:43 +00:00
sealad886 78515593d5 feat(ollama): add plugin for managing and running language models with autocompletion and man page support 2025-01-27 09:42:07 +00:00
2 changed files with 266 additions and 0 deletions

95
plugins/ollama/README.md Normal file
View File

@ -0,0 +1,95 @@
# Ollama Plugin for Oh-My-Zsh
This plugin enhances your Zsh shell environment by integrating powerful features for managing, running, and creating large language models locally using the [Ollama CLI](https://ollama.ai/). The plugin provides streamlined workflows, autocompletion, and man page support, making it easier than ever to interact with your local AI models.
## Features
- **Command Autocompletion**: Full support for Ollama CLI commands, options, and arguments.
- **Dynamic Model Suggestions**: Automatically suggests available models based on the output of `ollama list`.
## Installation
### Prerequisites
- A working installation of [Oh-My-Zsh](https://ohmyz.sh/).
- The Ollama CLI installed on your system. Refer to the [official Ollama documentation](https://github.com/ollama/ollama) for setup instructions.
### Steps
1. **Enable the Plugin**
Add `ollama` to the `plugins` array in your `.zshrc` file:
```sh
# in your ~/.zshrc file
plugins=(... ollama)
```
or
```sh
# from shell
omz plugin enable ollama
```
In order to get the most benefit from completions, with helpful usage hints, etc:
```sh
# ~/.zshrc
# add the following zstyle entry wherever you want
zstyle ':completion:*:*:*:*:descriptions' format '%F{green}%d%f'
```
2. **Restart Your Shell**
Apply the changes by reloading Oh-My-Zsh:
```sh
omz reload
```
## Usage
### Commands
The plugin provides autocompletion and enhanced functionality for the following Ollama commands:
| Command | Description |
|-------------|------------------------------------------|
| `serve`, `start`| Start the Ollama server locally. |
| `create` | Create a model from a Modelfile. |
| `show` | Display information about a specific model. |
| `run` | Execute a model with a given prompt. |
| `stop` | Terminate a running model. |
| `pull` | Download a model from a registry. |
| `push` | Upload a model to a registry. |
| `list`, `ls` | List all available models. |
| `ps` | Show currently running models. |
| `cp` | Duplicate an existing model locally. |
| `rm` | Remove a model from the local system. |
| `help [command]` | Provide help information for a command. |
```sh
>>> o ls
NAME ID SIZE MODIFIED
deepseek-r1:14b-qwen-distill-q8_0 022efe288297 15 GB 3 hours ago
deepseek-r1:32b 38056bbcbb2d 19 GB 3 days ago
deepseek-r1:8b 28f8fd6cdc67 4.9 GB 3 days ago
deepseek-r1:70b 0c1615a8ca32 42 GB 3 days ago
```
## Notes
- **Model Naming**: Models follow a `model:tag` format. If no tag is provided, Ollama defaults to `latest`. The model can be invoked with or without `latest` (e.g. `ollama run llama3.2` is equivalent to `ollama run llama3.2:latest`)
- **Multiline Input**: Use triple quotes (`"""`) for multiline prompts:
```zsh
> """What is the impact of AI on society?
... Include specific examples."""
```
## License
This project is licensed under the MIT License.
For more details, visit the [Ollama CLI GitHub repository](https://github.com/ollama/ollama).
Currently maintained by [sealad886](https://github.com/sealad886)

View File

@ -0,0 +1,171 @@
# ------------------------------------------------------------------------------
# ollama.plugin.zsh
#
# Plugin providing Zsh completions for the `ollama` command.
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Function: _ollama_get_models
# Purpose: Retrieves the list of available models for completion.
# Uses `ollama list` with a short timeout and provides candidates.
# ------------------------------------------------------------------------------
_ollama_get_models() {
local models_output
local timeout=5 # Timeout duration in seconds
# Attempt to fetch models via `ollama list`; if it fails, show a short message.
models_output="$(timeout $timeout ollama list 2>/dev/null)" || {
_message "Failed to fetch models"
return 1
}
# Accumulate parsed model names here
local -a models
local line
while IFS= read -r line; do
# Skip blank lines and header lines (starting with NAME)
[[ -z "$line" || "$line" =~ ^NAME ]] && continue
# Extract the first column and escape any colons for safety
local suggestion="${line%% *}"
suggestion="${suggestion/:/\\:}"
models+=("$suggestion")
done <<< "$models_output"
# Provide model suggestions using `_describe`
_describe -t models 'models' models
}
# ------------------------------------------------------------------------------
# Function: _ollama
# Purpose: The main completion function for the `ollama` CLI. Determines which
# subcommand is being completed, then sets up the corresponding flags
# and suggestions.
# ------------------------------------------------------------------------------
_ollama() {
# List of top-level commands and their descriptions
local -a commands=(
'serve:Start the Ollama server'
'create:Create a model from a Modelfile'
'show:Display information about a specific model'
'run:Execute a model with a given prompt'
'stop:Terminate a running model'
'pull:Download a model from the registry'
'push:Upload a model to the registry'
'list:Display all available models'
'ps:Show currently running models'
'cp:Duplicate an existing model'
'rm:Delete a model from the local system'
'help:Provide help information for a command'
)
# Standard local variables used by _arguments
local curcontext="$curcontext" state line
local -A opt_args
# The main `_arguments` call for handling top-level options (e.g. -h, -v)
# and capturing the first positional argument -> subcommand, then the rest.
_arguments -C \
'(-h --help)'{-h,--help}'[Display help information]' \
'(-v --version)'{-v,--version}'[Show version information]' \
'1: :->command' \
'*:: :->args'
# If the user is trying to complete the first argument (the subcommand),
# then we present them the `commands` array above.
case $state in
command)
_describe -t commands 'ollama commands' commands
return
;;
esac
# If the first argument is known, proceed with subcommand-specific completions
case $words[1] in
serve)
_arguments \
'(-p --port)'{-p,--port}'[Specify the port number]:port number:'
;;
create)
# If user typed only `ollama create ` (with no second arg),
# display a short message to remind them to name the new model
if [[ $CURRENT -eq 2 ]]; then
_message 'Specify the new model name'
else
# Otherwise, offer flags for `create`
_arguments \
'(-f --filename)'{-f,--filename}'[Path to the Modelfile]:Modelfile:_files' \
'(-q --quantize)'{-q,--quantize}'[Quantization method (e.g. q4_0)]' \
'--prefix[Set a prefix for the created model]' \
'(-h --help)--help[Show help for create]'
fi
;;
show)
_message 'Usage: ollama show MODEL [flags]'
if [[ $CURRENT -eq 2 ]]; then
_ollama_get_models
else
_arguments \
'--license[Show the models license]' \
'--modelfile[Show the models Modelfile]' \
'--parameters[Show model parameters]' \
'--system[Show the system message of the model]' \
'--template[Show the models template]' \
'(-h --help)--help[Show help for show]'
fi
;;
run)
# Display usage message only if there's no argument yet
if [[ $CURRENT -eq 2 ]]; then
_message "Usage: ollama run MODEL [PROMPT] [flags]"
_ollama_get_models
else
# Define flags for the `run` command
local -a _run_flags=(
'--format-string=[Format string for the output (e.g. json)]'
'--insecure[Use an insecure registry]'
'--keepalive=[Time to keep the model loaded (e.g. 5m)]'
'--nowordwrap[Disable word wrapping]'
'--verbose[Show response timings]'
'(-h --help)--help[Show help for run]'
)
# Use a mix of `_arguments` and manual handling for freeform input
if [[ $CURRENT -eq 3 ]]; then
# Suggest a freeform prompt (arbitrary input)
_message "Enter a prompt as a string"
else
# Provide flag completions
_arguments -S "${_run_flags[@]}"
fi
fi
;;
cp)
# The `cp` command expects `ollama cp SOURCE DEST`
if [[ $CURRENT -eq 2 ]]; then
_ollama_get_models
elif [[ $CURRENT -eq 3 ]]; then
_message 'Specify the destination model name'
fi
;;
rm|stop|pull|push)
# All of these commands accept one or more model names
if [[ $CURRENT -eq 2 ]]; then
_ollama_get_models
fi
;;
# If the subcommand doesnt match anything above, fall back to default
*)
_default
;;
esac
}
# Finally, register the completion function for the `ollama` command
compdef _ollama ollama