generated from actions/typescript-action
-
Notifications
You must be signed in to change notification settings - Fork 54
Expand file tree
/
Copy pathaction.yml
More file actions
91 lines (87 loc) · 2.9 KB
/
action.yml
File metadata and controls
91 lines (87 loc) · 2.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
name: 'AI Inference'
description: Generate an AI response based on a provided prompt
author: 'GitHub'
# Add your action's branding here. This will appear on the GitHub Marketplace.
branding:
icon: 'message-square'
color: red
# Define your inputs here.
inputs:
prompt:
description: The prompt for the model
required: false
default: ''
prompt-file:
description: Path to a file containing the prompt (supports .txt and .prompt.yml
formats)
required: false
default: ''
input:
description: Template variables in YAML format for .prompt.yml files
required: false
default: ''
file_input:
description: Template variables in YAML format mapping variable names to file paths. The file contents will be used for templating.
required: false
default: ''
model:
description: The model to use
required: false
default: 'openai/gpt-4o'
endpoint:
description: The endpoint to use
required: false
default: 'https://models.github.ai/inference'
system-prompt:
description: The system prompt for the model
required: false
default: 'You are a helpful assistant'
system-prompt-file:
description: Path to a file containing the system prompt
required: false
default: ''
max-tokens:
description: The maximum number of tokens to generate (deprecated)
required: false
default: '200'
max-completion-tokens:
description: The maximum number of tokens to generate
required: false
default: ''
temperature:
description: The sampling temperature to use (0-1)
required: false
default: ''
top-p:
description: The nucleus sampling parameter to use (0-1)
required: false
default: ''
token:
description: The token to use
required: false
default: ${{ github.token }}
enable-github-mcp:
description: Enable Model Context Protocol integration with GitHub tools
required: false
default: 'false'
github-mcp-token:
description: The token to use for GitHub MCP server (defaults to the main token if not specified). This must be a PAT for MCP to work.
required: false
default: ''
github-mcp-toolsets:
description: 'Comma-separated list of toolsets to enable for GitHub MCP (e.g., "repos,issues,pull_requests,actions"). Use "all" for all toolsets, "default" for default set. If not specified, uses default toolsets (context,repos,issues,pull_requests,users).'
required: false
default: ''
custom-headers:
description: 'Custom HTTP headers to include in API requests. Supports both YAML format (header1: value1) and JSON format ({"header1": "value1"}). Useful for API Management platforms, rate limiting, and request tracking.'
required: false
default: ''
# Define your outputs here.
outputs:
response:
description: The response from the model
response-file:
description: The file path where the response is saved
runs:
using: node24
main: dist/index.js