Skip to content

Commit 8e9fecc

Browse files
committed
chore: update iac config
1 parent 7cbcf6e commit 8e9fecc

File tree

4 files changed

+181
-18
lines changed

4 files changed

+181
-18
lines changed

azure.yaml

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
name: llama-index-javascript
44
metadata:
5-
template: azd-init@1.15.1
5+
template: llama-index-javascript@1.15.1
66
services:
77
llama-index-javascript:
88
project: .

infra/main.bicep

+47-2
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@ param environmentName string
99
@description('Primary location for all resources')
1010
param location string
1111

12-
param llamaIndexJavascriptExists bool
13-
1412
@description('Id of the user or app to assign application roles')
1513
param principalId string
1614

@@ -23,6 +21,33 @@ var tags = {
2321
'azd-env-name': environmentName
2422
}
2523

24+
param llamaIndexJavascriptExists bool
25+
param isContinuousIntegration bool // Set in main.parameters.json
26+
27+
var llamaIndexConfig = {
28+
chat: {
29+
model: 'gpt-4o-mini'
30+
deployment: 'gpt-4o-mini'
31+
version: '2024-07-18'
32+
capacity: 10
33+
}
34+
embedding: {
35+
model: 'text-embedding-3-large'
36+
deployment: 'text-embedding-3-large'
37+
version: '1'
38+
dim: '1024'
39+
capacity: 10
40+
}
41+
model_provider: 'openai'
42+
openai_api_key: ''
43+
llm_temperature: '0.7'
44+
llm_max_tokens: '100'
45+
openai_api_version: '2024-02-15-preview'
46+
top_k: '3'
47+
fileserver_url_prefix: 'http://localhost/api/files'
48+
system_prompt: 'You are a helpful assistant who helps users with their questions.'
49+
}
50+
2651
// Organize resources in a resource group
2752
resource rg 'Microsoft.Resources/resourceGroups@2021-04-01' = {
2853
name: 'rg-${environmentName}'
@@ -38,7 +63,27 @@ module resources 'resources.bicep' = {
3863
tags: tags
3964
principalId: principalId
4065
llamaIndexJavascriptExists: llamaIndexJavascriptExists
66+
llamaIndexConfig: llamaIndexConfig
67+
isContinuousIntegration: isContinuousIntegration
4168
}
4269
}
4370
output AZURE_CONTAINER_REGISTRY_ENDPOINT string = resources.outputs.AZURE_CONTAINER_REGISTRY_ENDPOINT
4471
output AZURE_RESOURCE_LLAMA_INDEX_JAVASCRIPT_ID string = resources.outputs.AZURE_RESOURCE_LLAMA_INDEX_JAVASCRIPT_ID
72+
73+
output AZURE_OPENAI_ENDPOINT string = resources.outputs.AZURE_OPENAI_ENDPOINT
74+
output AZURE_DEPLOYMENT_NAME string = llamaIndexConfig.chat.deployment
75+
output AZURE_OPENAI_API_VERSION string = llamaIndexConfig.openai_api_version
76+
77+
// LlamaIndex configuration
78+
output MODEL_PROVIDER string = llamaIndexConfig.model_provider
79+
output MODEL string = llamaIndexConfig.chat.model
80+
output EMBEDDING_MODEL string = llamaIndexConfig.embedding.model
81+
output EMBEDDING_DIM string = llamaIndexConfig.embedding.dim
82+
output OPENAI_API_KEY string = llamaIndexConfig.openai_api_key
83+
output LLM_TEMPERATURE string = llamaIndexConfig.llm_temperature
84+
output LLM_MAX_TOKENS string = llamaIndexConfig.llm_max_tokens
85+
output TOP_K string = llamaIndexConfig.top_k
86+
output FILESERVER_URL_PREFIX string = llamaIndexConfig.fileserver_url_prefix
87+
output SYSTEM_PROMPT string = llamaIndexConfig.system_prompt
88+
output OPENAI_API_TYPE string = 'AzureOpenAI'
89+
output STORAGE_CACHE_DIR string = './cache'

infra/main.parameters.json

+18-15
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,21 @@
11
{
2-
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#",
3-
"contentVersion": "1.0.0.0",
4-
"parameters": {
5-
"environmentName": {
6-
"value": "${AZURE_ENV_NAME}"
7-
},
8-
"location": {
9-
"value": "${AZURE_LOCATION}"
10-
},
11-
"llamaIndexJavascriptExists": {
12-
"value": "${SERVICE_LLAMA_INDEX_JAVASCRIPT_RESOURCE_EXISTS=false}"
13-
},
14-
"principalId": {
15-
"value": "${AZURE_PRINCIPAL_ID}"
16-
}
2+
"$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#",
3+
"contentVersion": "1.0.0.0",
4+
"parameters": {
5+
"environmentName": {
6+
"value": "${AZURE_ENV_NAME}"
7+
},
8+
"location": {
9+
"value": "${AZURE_LOCATION}"
10+
},
11+
"llamaIndexJavascriptExists": {
12+
"value": "${SERVICE_LLAMA_INDEX_JAVASCRIPT_RESOURCE_EXISTS=false}"
13+
},
14+
"principalId": {
15+
"value": "${AZURE_PRINCIPAL_ID}"
16+
},
17+
"isContinuousIntegration": {
18+
"value": "${GITHUB_ACTIONS}"
1719
}
20+
}
1821
}

infra/resources.bicep

+115
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,13 @@ param location string = resourceGroup().location
44
@description('Tags that will be applied to all resources')
55
param tags object = {}
66

7+
@description('The configuration for the LlamaIndex application')
8+
param llamaIndexConfig object = {}
79

10+
11+
var principalType = isContinuousIntegration ? 'ServicePrincipal' : 'User'
12+
13+
param isContinuousIntegration bool
814
param llamaIndexJavascriptExists bool
915

1016
@description('Id of the user or app to assign application roles')
@@ -100,6 +106,62 @@ module llamaIndexJavascript 'br/public:avm/res/app/container-app:0.8.0' = {
100106
name: 'PORT'
101107
value: '3000'
102108
}
109+
{
110+
name: 'AZURE_OPENAI_ENDPOINT'
111+
value: openAi.outputs.endpoint
112+
}
113+
{
114+
name: 'AZURE_DEPLOYMENT_NAME'
115+
value: llamaIndexConfig.chat.deployment
116+
}
117+
{
118+
name: 'AZURE_OPENAI_API_VERSION'
119+
value: llamaIndexConfig.openai_api_version
120+
}
121+
{
122+
name: 'MODEL_PROVIDER'
123+
value: llamaIndexConfig.model_provider
124+
}
125+
{
126+
name: 'MODEL'
127+
value: llamaIndexConfig.chat.model
128+
}
129+
{
130+
name: 'EMBEDDING_MODEL'
131+
value: llamaIndexConfig.embedding.model
132+
}
133+
{
134+
name: 'EMBEDDING_DIM'
135+
value: llamaIndexConfig.embedding.dim
136+
}
137+
{
138+
name: 'LLM_TEMPERATURE'
139+
value: llamaIndexConfig.llm_temperature
140+
}
141+
{
142+
name: 'LLM_MAX_TOKENS'
143+
value: llamaIndexConfig.llm_max_tokens
144+
}
145+
{
146+
name: 'TOP_K'
147+
value: llamaIndexConfig.top_k
148+
}
149+
{
150+
name: 'FILESERVER_URL_PREFIX'
151+
value: llamaIndexConfig.fileserver_url_prefix
152+
}
153+
{
154+
name: 'SYSTEM_PROMPT'
155+
value: llamaIndexConfig.system_prompt
156+
}
157+
{
158+
name: 'OPENAI_API_TYPE'
159+
value: 'AzureOpenAI'
160+
}
161+
{
162+
name: 'STORAGE_CACHE_DIR'
163+
value: './cache'
164+
}
103165
]
104166
}
105167
]
@@ -118,5 +180,58 @@ module llamaIndexJavascript 'br/public:avm/res/app/container-app:0.8.0' = {
118180
tags: union(tags, { 'azd-service-name': 'llama-index-javascript' })
119181
}
120182
}
183+
184+
module openAi 'br/public:avm/res/cognitive-services/account:0.10.2' = {
185+
name: 'openai'
186+
params: {
187+
name: '${abbrs.cognitiveServicesAccounts}${resourceToken}'
188+
tags: tags
189+
location: location
190+
kind: 'OpenAI'
191+
disableLocalAuth: true
192+
customSubDomainName: '${abbrs.cognitiveServicesAccounts}${resourceToken}'
193+
publicNetworkAccess: 'Enabled'
194+
deployments: [
195+
{
196+
name: llamaIndexConfig.chat.deployment
197+
model: {
198+
format: 'OpenAI'
199+
name: llamaIndexConfig.chat.model
200+
version: llamaIndexConfig.chat.version
201+
}
202+
sku: {
203+
capacity: llamaIndexConfig.chat.capacity
204+
name: 'GlobalStandard'
205+
}
206+
}
207+
{
208+
name: llamaIndexConfig.embedding.deployment
209+
model: {
210+
format: 'OpenAI'
211+
name: llamaIndexConfig.embedding.model
212+
version: llamaIndexConfig.embedding.version
213+
}
214+
sku: {
215+
capacity: llamaIndexConfig.embedding.capacity
216+
name: 'Standard'
217+
}
218+
}
219+
]
220+
roleAssignments: [
221+
{
222+
principalId: principalId
223+
principalType: principalType
224+
roleDefinitionIdOrName: 'Cognitive Services OpenAI User'
225+
}
226+
{
227+
principalId: llamaIndexJavascriptIdentity.outputs.principalId
228+
principalType: 'ServicePrincipal'
229+
roleDefinitionIdOrName: 'Cognitive Services OpenAI User'
230+
}
231+
]
232+
}
233+
}
234+
121235
output AZURE_CONTAINER_REGISTRY_ENDPOINT string = containerRegistry.outputs.loginServer
122236
output AZURE_RESOURCE_LLAMA_INDEX_JAVASCRIPT_ID string = llamaIndexJavascript.outputs.resourceId
237+
output AZURE_OPENAI_ENDPOINT string = openAi.outputs.endpoint

0 commit comments

Comments
 (0)