Voice+™
Set up a voice assistant for your website or web app
The basic setup for Voice+ looks like this:
<html lang="en">
<head>
<title>Touchpoint Sample HTML</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
</head>
<body>
<script type="module">
import { create } from "https://unpkg.com/@nlxai/touchpoint-ui/lib/index.js?module";
const touchpoint = await create({
config: {
applicationUrl: "https://apps.nlx.ai/c/eEPT4sP6HLboEiTHSDZ7x/M0aE_SZnl3HQG4bU96WtT",
headers: {
"nlx-api-key": "yuTwyfoq9r_nY75Wm-jVuCSLWU45wQQy"
},
languageCode: "en-US",
},
input: "voiceMini",
bidirectional: {}
});
</script>
</body>
</html>import { create } from "@nlxai/touchpoint-ui";
const touchpoint = await create({
config: {
applicationUrl: "https://apps.nlx.ai/c/eEPT4sP6HLboEiTHSDZ7x/M0aE_SZnl3HQG4bU96WtT",
headers: {
"nlx-api-key": "yuTwyfoq9r_nY75Wm-jVuCSLWU45wQQy"
},
languageCode: "en-US",
},
input: "voiceMini",
bidirectional: {}
});Note the voiceMini input type (this will make the Touchpoint user interface small and relatively unobtrusive allowing the user to keep interacting with the interface of your site). The bidirectional flag opens a command channel allowing the NLX voice application to affect your website.
Automatic context
By default Voice+ will operate in fully automatic mode:
it will automatically gather context about the available form fields and hyperlinks on the current page using accessibility APIs that will be made available to the LLM
it will handle user requests for navigation and form filling automatically.
You may want to customise one or both of these processes (it often is the case that the automatic behavior covers 90% of what you need and a simple customisation can make the whole experience perfect).
Let's look at an example:
<html lang="en">
<head>
<title>Touchpoint Sample HTML</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
</head>
<body>
<script type="module">
import { create } from "https://unpkg.com/@nlxai/touchpoint-ui/lib/index.js?module";
const touchpoint = await create({
config: {
applicationUrl: "https://bots.studio.nlx.ai/c/eEPT4sP6HLboEiTHSDZ7x/M0aE_SZnl3HQG4bU96WtT",
headers: {
"nlx-api-key": "yuTwyfoq9r_nY75Wm-jVuCSLWU45wQQy"
},
languageCode: "en-US",
},
input: "voiceMini",
bidirectional: {
customizeAutomaticContext({ context, state }) {
const linksAlwaysAvailable = {
"Product": "/index.asp?pageId=prod",
"Terms & Conditions": "/index.asp?cat=legal&p0",
};
return {
context: {
...context,
destinations: [
...context.destinations,
Object.keys(linksAlwaysAvailable),
],
},
state: {
...state,
links: {
...state.links,
...linksAlwaysAvailable,
},
},
};
},
navigation(action, destination, availableDestinations) {
if (action === "page_custom" && destination != null) {
const url = availableDestinations[destination];
if (url != null) {
router.navigate(url);
}
} else if (action === "page_next") {
history.forward();
} else if (action === "page_previous") {
history.back();
}
},
}
});
</script>
</body>
</html>import { create } from "@nlxai/touchpoint-ui";
const touchpoint = await create({
config: {
applicationUrl:
"https://bots.studio.nlx.ai/c/eEPT4sP6HLboEiTHSDZ7x/M0aE_SZnl3HQG4bU96WtT",
headers: {
"nlx-api-key": "yuTwyfoq9r_nY75Wm-jVuCSLWU45wQQy",
},
languageCode: "en-US",
},
input: "voiceMini",
bidirectional: {
customizeAutomaticContext({ context, state }) {
const linksAlwaysAvailable = {
Product: "/index.asp?pageId=prod",
"Terms & Conditions": "/index.asp?cat=legal&p0",
};
return {
context: {
...context,
destinations: [
...context.destinations,
Object.keys(linksAlwaysAvailable),
],
},
state: {
...state,
links: {
...state.links,
...linksAlwaysAvailable,
},
},
};
},
navigation(action, destination, availableDestinations) {
if (action === "page_custom" && destination != null) {
const url = availableDestinations[destination];
if (url != null) {
router.navigate(url);
}
} else if (action === "page_next") {
history.forward();
} else if (action === "page_previous") {
history.back();
}
},
},
});
In this example we are augmenting the context sent to the LLM with a few hardcoded links. We are also making sure the user navigation hook plays nicely with our client side routing library (in this example we are using Tanstack Router).
We can also make custom actions available to the voice assistant. The set of available custom actions can be dynamically modified during the user session, so the following API can be called repeatedly.
import { create } from "@nlxai/touchpoint-ui";
import * as z from "zod/v4";
import { foodOrder } from "../myApi/foodOrder";
const touchpoint = await create({
config: {
applicationUrl: "https://bots.studio.nlx.ai/c/eEPT4sP6HLboEiTHSDZ7x/M0aE_SZnl3HQG4bU96WtT",
headers: {
"nlx-api-key": "yuTwyfoq9r_nY75Wm-jVuCSLWU45wQQy"
},
languageCode: "en-US",
},
input: "voiceMini",
bidirectional: {}
});
const burgerSchema = z.object({
type: z
.enum(["cheeseburger", "veggieburger", "hamburger"])
.describe("Which variety of burger to order"),
count: z
.number()
.min(1)
.max(200)
.default(1)
.describe("How many burgers to order"),
});
// potentially later
touchpoint.setCustomBidirectionalCommands([
{
action: "burger",
description: "Order a hamburger",
schema: z.toJSONSchema(burgerSchema),
handler: ({type, count} : z.infer<typeof burgerSchema>) => {
foodOrder({
product: type,
quantity: count,
})
},
},
]);Last updated

