[{"data":1,"prerenderedAt":464},["ShallowReactive",2],{"footer-primary":3,"footer-secondary":93,"footer-description":119,"tv-enter-the-workshop":121,"tv-enter-the-workshop-seasons":131,"tv-enter-the-workshop-episodes":141,"sales-reps":212},{"items":4},[5,29,49,69],{"id":6,"title":7,"url":8,"page":8,"children":9},"522e608a-77b0-4333-820d-d4f44be2ade1","Solutions",null,[10,15,20,25],{"id":11,"title":12,"url":8,"page":13},"fcafe85a-a798-4710-9e7a-776fe413aae5","Headless CMS",{"permalink":14},"/solutions/headless-cms",{"id":16,"title":17,"url":8,"page":18},"79972923-93cf-4777-9e32-5c9b0315fc10","Backend-as-a-Service",{"permalink":19},"/solutions/backend-as-a-service",{"id":21,"title":22,"url":8,"page":23},"0fa8d0c1-7b64-4f6f-939d-d7fdb99fc407","Product Information",{"permalink":24},"/solutions/product-information-management",{"id":26,"title":27,"url":28,"page":8},"63946d54-6052-4780-8ff4-91f5a9931dcc","100+ Things to Build","https://directus.io/blog/100-tools-apps-and-platforms-you-can-build-with-directus",{"id":30,"title":31,"url":8,"page":8,"children":32},"8ab4f9b1-f3e2-44d6-919b-011d91fe072f","Resources",[33,37,41,45],{"id":34,"title":35,"url":36,"page":8},"f951fb84-8777-4b84-9e91-996fe9d25483","Documentation","https://docs.directus.io",{"id":38,"title":39,"url":40,"page":8},"366febc7-a538-4c08-a326-e6204957f1e3","Guides","https://docs.directus.io/guides/",{"id":42,"title":43,"url":44,"page":8},"aeb9128e-1c5f-417f-863c-2449416433cd","Community","https://directus.chat",{"id":46,"title":47,"url":48,"page":8},"da1c2ed8-0a77-49b0-a903-49c56cb07de5","Release Notes","https://github.com/directus/directus/releases",{"id":50,"title":51,"url":8,"page":8,"children":52},"d61fae8c-7502-494a-822f-19ecff3d0256","Support",[53,57,61,65],{"id":54,"title":55,"url":56,"page":8},"8c43c781-7ebd-475f-a931-747e293c0a88","Issue Tracker","https://github.com/directus/directus/issues",{"id":58,"title":59,"url":60,"page":8},"d77bb78e-cf7b-4e01-932a-514414ba49d3","Feature Requests","https://github.com/directus/directus/discussions?discussions_q=is:open+sort:top",{"id":62,"title":63,"url":64,"page":8},"4346be2b-2c53-476e-b53b-becacec626a6","Community Chat","https://discord.com/channels/725371605378924594/741317677397704757",{"id":66,"title":67,"url":68,"page":8},"26c115d2-49f7-4edc-935e-d37d427fb89d","Cloud Dashboard","https://directus.cloud",{"id":70,"title":71,"url":8,"page":8,"children":72},"49141403-4f20-44ac-8453-25ace1265812","Organization",[73,78,84,88],{"id":74,"title":75,"url":76,"page":77},"1f36ea92-8a5e-47c8-914c-9822a8b9538a","About","/about",{"permalink":76},{"id":79,"title":80,"url":81,"page":82},"b84bf525-5471-4b14-a93c-225f6c386005","Careers","#",{"permalink":83},"/careers",{"id":85,"title":86,"url":87,"page":8},"86aabc3a-433d-434b-9efa-ad1d34be0a34","Brand Assets","https://drive.google.com/drive/folders/1lBOTba4RaA5ikqOn8Ewo4RYzD0XcymG9?usp=sharing",{"id":89,"title":90,"url":8,"page":91},"8d2fa1e3-198e-4405-81e1-2ceb858bc237","Contact",{"permalink":92},"/contact",{"items":94},[95,101,107,113],{"id":96,"title":97,"url":8,"page":98,"children":100},"8a1b7bfa-429d-4ffc-a650-2a5fdcf356da","Cloud Policies",{"permalink":99},"/cloud-policies",[],{"id":102,"title":103,"url":81,"page":104,"children":106},"bea848ef-828f-4306-8017-6b00ec5d4a0c","License",{"permalink":105},"/bsl",[],{"id":108,"title":109,"url":81,"page":110,"children":112},"4e914f47-4bee-42b7-b445-3119ee4196ef","Terms",{"permalink":111},"/terms",[],{"id":114,"title":115,"url":81,"page":116,"children":118},"ea69eda6-d317-4981-8421-fcabb1826bfd","Privacy",{"permalink":117},"/privacy",[],{"description":120},"\u003Cp>A composable backend to build your Headless CMS, BaaS, and more.&nbsp;\u003C/p>",{"id":122,"title":123,"logo":124,"cover":125,"tile":126,"announcement_text":8,"description":127,"slug":128,"one_liner":129,"card_text":8,"status":130,"sort":8},"9f10d27e-0df8-4574-a0c8-34bbd33f0943","Enter the Workshop","d9920cd0-d3ea-474b-97ec-9b951bf784e7","a58e5acf-a9f8-4fa0-acfe-39d3951f775c","e9e9a7a1-29f9-4bab-b486-d75e385a9d13","Join members of the core team and special guests for workshops on using Directus. Each workshop will focus on a different technology or use case and will take you step by step through building with Directus.","enter-the-workshop","Join our core team and special guests for workshops on using Directus.","published",[132],{"id":133,"number":134,"show":122,"year":135,"episodes":136},"181a77a7-65c5-46b3-9e4f-474acc00436a",1,"2024",[137,138,139,140],"bb508a73-7947-4be9-8493-a226861cfe7c","b0159a9d-73b5-432e-b2a4-b3f606f8ba96","65956c5e-17ae-467d-8ae8-c2dd8cfcc2ab","45133ec4-b8c7-4989-83a3-b0b46c20835c",[142,159,177,195],{"id":137,"slug":143,"vimeo_id":144,"description":145,"tile":146,"length":147,"resources":8,"people":8,"episode_number":134,"published":148,"title":149,"video_transcript_html":150,"video_transcript_text":151,"content":8,"seo":152,"status":130,"episode_people":153,"recommendations":156,"season":157},"twiliio-dialer-panel-extension","963063255","Join Kevin and Nathaniel Okenwa, Developer Evangelist at Twilio, as they utilize Twilio's Voice SDK to build a Directus extension that allows outbound phone calls directly from the browser.","7f9a2314-505c-4f60-a5eb-1da0b9c07992",109,"2024-06-20","Build a Dialer Panel Extension in Directus Insights with Twilio","\u003Cp>Speaker 0: Hello. Hello. Hello.\u003C/p>\u003Cp>Speaker 1: How are you doing?\u003C/p>\u003Cp>Speaker 0: It's good to hear you. Great, mate. How are you? Yeah. Yeah.\u003C/p>\u003Cp>Yeah. I'm doing alright.\u003C/p>\u003Cp>Speaker 1: I'm doing alright.\u003C/p>\u003Cp>Speaker 0: It's wild to me that we have been friends in the same kind of job family for so long, and I don't think we've ever run a workshop before.\u003C/p>\u003Cp>Speaker 1: I know. I mean, I think it's been a missed opportunity for all the wonderful people that we haven't been able to hang out. Genuinely, it's crazy because we went to university together, for those of you who maybe have never heard. So we we've been known each other for a long time, and our careers have, like, had these moments where they almost look like they're about to cross. And I feel like this might be one of the first professional cross that you've said.\u003C/p>\u003Cp>I'm really excited.\u003C/p>\u003Cp>Speaker 0: Yeah. So I thought we'd open by just introducing ourselves, introducing what this event is, introducing what we're gonna be doing, how it's gonna work, and then we'll launch straight in and make a mess of it for 2 hours. How's that sound?\u003C/p>\u003Cp>Speaker 1: Sounds good. Sounds good. I could start by introducing\u003C/p>\u003Cp>Speaker 0: you first.\u003C/p>\u003Cp>Speaker 1: Hi, folks. My name is Nathaniel Okenwa. As Kevin would know, I talk a lot and I write code, so my friends call me Chatterbox Coder. That's where you can find me on all the socials. And I work for a company called Twilio, which is kind of what we're gonna be talking about today.\u003C/p>\u003Cp>Twilio, for those of you who don't know, it does many things. But here's the TLDR. We are telecommunications APIs that help you build amazing communications into your applications. That sounds like a lot of words, but you've definitely used Twilio at some point. If you've ever received a text message from a company, a phone call from a company, maybe a 2 f a text, lots of emails when it comes to Black Friday, even all sorts of communications, chances are they may have been using Twilio under the hood.\u003C/p>\u003Cp>Some of your biggest some of your biggest brands and favorite brands that people use use Twilio under the hood. But we also do so much more. So if you do wanna find out about some of the more advanced use cases, feel free to chat to me because the well of Twilio can be quite bottomless.\u003C/p>\u003Cp>Speaker 0: Yeah. Definitely one way to put it. Yeah. If you're here, you might be coming from the Twilio world and not have heard of Directus before. So I will also tell you a bit about Directus.\u003C/p>\u003Cp>Directus is a back end, basically, that you can use to build wicked applications. You connect it to a new or existing database, and any number of asset storage, storages, storage. I don't know if it's like the word sheep where it's the same in plural. And you immediately get developer tooling, including APIs, a real time interface, authentication, and a user management system, and this really lovely web application with which to interact with that database, which you can easily hand to people who aren't developers. So you don't need to build APIs.\u003C/p>\u003Cp>You don't need to build kind of these admin panels, admin panel back ends. Really cool tool. And today, we get to converge the 2, which I'm really, really excited about. So, the project today, is oh, actually, no. A little bit more preamble.\u003C/p>\u003Cp>This event's happening as part of Leap Week 3. This is our week of announcements. On Monday, we did a keynote where we announced directors 11, which is coming out this week as a release candidate where we announce new shows for Directus TV. This platform you're watching this video in right now has, now 35 shows worth of content. You can go and potter around and find some cool awesome content too.\u003C/p>\u003Cp>Speaker 1: Like, I must say.\u003C/p>\u003Cp>Speaker 0: Thank thank you very much. Thank you very much for that. I'll pay you under the table later. And also, and and a bunch of other things, as well. This is one of the workshops that's happening this week.\u003C/p>\u003Cp>So this one right here is this Twilio workshop. After this, like, an hour and a quarter after the end of this event is a 100 apps and a 100 hours live where Bryant and some of our colleagues are gonna build an app in 60 minutes. I don't know what he's gonna build yet. I'm not sure he knows yet, so that would be chaotic as it was last time. Tomorrow, we're doing a workshop with Deepgram, which is a voice AI company.\u003C/p>\u003Cp>We'll be building a cool project there and then a community social networking thing on Friday using a platform that doesn't suck. So you can come and have a chat with other people who use or know about or are interested in direct us, in hopefully not too of a not too much of an awkward format. But we're getting ahead of ourselves. We're here for this Twilio workshop. And what we will be building today is a panel extension for Directus Insights.\u003C/p>\u003Cp>So Directus Insights is this dashboard builder tool here that we have. And in a dashboard, you have any number of these panels which can interact with the data in your database. But these panels aren't just for reading data. You can actually put components within them that are interactive. So you can add, like, forms and buttons, and you could just run arbitrary codes in them if you want.\u003C/p>\u003Cp>And that's what we're gonna be exploiting today.\u003C/p>\u003Cp>Speaker 1: So so what I'm hearing, right, because I have not played around with insights. So, insights essentially just give you the ability to create components that are powered by your data. So there are some obviously components that are really well built. And then see, it doesn't actually have to just be reading data because the thing that I'm really excited about is the fact that the stuff we're gonna do is going to read data, but it's gonna interact with it and cause other things to happen as a result. And that's something which I think is very, very powerful, especially when it starts to come to, like, workflows and building maybe internal tooling as well for people.\u003C/p>\u003Cp>This could be really, really useful.\u003C/p>\u003Cp>Speaker 0: Ding ding ding. So we've not spoken much about director, so you're kind of coming in it pretty fresh as well. So please keep asking questions. But, yes, you said a word you said a pair of words there, which I think is really interesting, which is internal apps. And I actually think that is the kind of hidden power of director's insights.\u003C/p>\u003Cp>While all of the panels that ship out the box are very much about, you know, building graphs and charts and reading data and analyzing it, it doesn't it's not just that. And you can also obviously build panel extensions and distribute them through the marketplace. So today, we're gonna build a panel that will have a drop down and show you the users inside of your directors project. With other phone number to those users. And so there'll be a phone number attached to them.\u003C/p>\u003Cp>And then you press a button, and we'll use the please forgive me if I get it around the Twilio voice SDK. Yes. That's what it's called. Fantastic. And we're gonna call that that person from the browser.\u003C/p>\u003Cp>So the browser will connect to a phone number and do a two way call.\u003C/p>\u003Cp>Speaker 1: Cool. Yeah. I'm really excited because this is something which I think lots of people oftentimes, we end up having tools in different places. Right? So for example, especially when it comes to telecommunications and telephony, we always, like, separate so that you have your email account or a phone, like, application or something separate to all the places you have you there.\u003C/p>\u003Cp>And, yeah, I'm not saying that's not bad, but when you start wanting to be efficient or maybe have some more smart and cool interactions, bringing those 2 together gets really powerful. And I think, like, what I could see here is and we could talk about, like, potential use cases. Look, we're not trying to sell you a thing, but kind of just tell you practically some of the ways in which you could be using these these tools. And I think like this, it could be a good way for you to, for example, like, quickly integrate communications without necessarily exposing PII. Like, you can have it so the person using the tool never sees a phone number, but it's still able to phone them.\u003C/p>\u003Cp>Some of your favorite ride sharing apps kind of use a similar technology behind this if your driver ever calls you to be like, hey. I can't find you. But, anyway, I'll stop talking. Let's get on.\u003C/p>\u003Cp>Speaker 0: No. No. Please do. A couple of other just bits of context. So we'll refer to the director's docs, obviously, a bunch today.\u003C/p>\u003Cp>We'll take a look at some of the extension docs. As part of that, inside of these docs, there are a couple of existing Twilio guides, and one additional non Twilio guide. And I wanna talk about them before we crack on so you kinda understand the approach which we are going to use today. First of all, there is this guide here called use custom endpoints to create an authenticated API proxy, which is a lot of words. So to break that down, endpoints, custom endpoints are one of Directus' extension types, and they allow you to just create kind of what they sound like, arbitrary endpoints that you can hit.\u003C/p>\u003Cp>We expose an express router, so it works just like that. You know, you set up route handlers, and then you can hit them. But within the context of directives, you can start doing stuff like checking if the user's authenticated in in directors or checking their permission sets and so on and so forth. So, this is an example where we actually just expose the full Twilio. Let's find it here, where we expose let me find it.\u003C/p>\u003Cp>Twilio host. Yeah. Where we expose just a root URL. And then what we do is we check whether you are authenticated, and that's the key part right here, which means you can't just hit this endpoint from anywhere. Right?\u003C/p>\u003Cp>You have to do it either with your Directus API token or cookie, or from within Directus itself. So that's 1. The second one is, another Twilio integration. This uses our automation tool, direct us automate. They're called flows each kind of, workflow.\u003C/p>\u003Cp>And this one here allows you to send SMS notifications. That's kinda interesting.\u003C/p>\u003Cp>Speaker 1: Can you do for these? Because, like, obviously, with SMS, when I'm talking to devs who are building into their applications, they're often either a couple of things. Either they wanna send a message, like, when a specific action occurs, Maybe they wanna do a batch, like, at the end of the day, or there's, like, a campaign going out, especially when you start getting to marketing and stuff like that. And the same with email, because we we also you we have email API. So, like, how can people trigger those, those things?\u003C/p>\u003Cp>Those flows?\u003C/p>\u003Cp>Speaker 0: Great question. Five ways. Event hooks. So something happens in your director's project often in one of your collections. A collection is a database with, like, additional director's metadata.\u003C/p>\u003Cp>So when I say collection, you can think table, but it is not just a table. Right? When something happens in your database, you can immediately fire off an automation. So this can be, a new user is created, a new file is uploaded, or any of your tables are have CRUD operations, executed against them. They can run as blocking, as blocking triggers, which means the whole flow has to see its way to the end, and then the database transaction will execute so it, like, intercepts it.\u003C/p>\u003Cp>And as a result, you can actually fail out. So, we have an example somewhere in our docs using a verification API where if you fail the verification, we actually just block you from signing up entirely. It just never commits to the database.\u003C/p>\u003Cp>Speaker 1: Ah, it's\u003C/p>\u003Cp>Speaker 0: really cool. On the other hand, it is cool because you can also manipulate data in the middle. Maybe you enrich it or stuff like that. That's really, really good. And then and then you have actions, which happen after data after data has, been committed or after a transaction has been committed, then it will run.\u003C/p>\u003Cp>So that's event hooks. Then we have webhooks, so just inbound HTTP requests. We have a schedule. So, you know, you set up an interval using, the 6 point Chrome job syntax, and then we'll run the flow, which is how you could then batch. Or, 2 more, actually.\u003C/p>\u003Cp>There's another flow so you can compartmentalize functionality and trigger other flows from core from, like, your your controller flow and path data in and backup and stuff like that. And then finally, manual. What manual does is in your data views where you've got your list of all your items in a collection or you're in an individual item in the editor, there's a button you can press that will pass in the IDs of the items that you've either checked in the collection or the page, and then send that into the payload as well. Manual flow triggers also have confirmation dialogues so you can pop up and collect a bit more information. There's the button there on the right.\u003C/p>\u003Cp>And it can you can pop up a box, ask for arbitrary information, and then trigger the flow, which I think is a very, like I'm looking at thinking comms. So they're very communication type, you know, option here. You can go write a message and hit go, for example. So they're the triggers. There's a bunch of inbuilt operations.\u003C/p>\u003Cp>You can also build custom operations, but that allows you to interact with your database, make it external web requests, just write some arbitrary JavaScript that can manipulate data in a little more of a clever way. But, yeah, that's something that's really cool\u003C/p>\u003Cp>Speaker 1: about this tool. Yeah. No. Absolutely. And a thing which I'm kind of noticing is the number of different ways, the flexibility that is given to you to try and to to trigger these things in different ways allows you to put into put the automation into different parts of your application.\u003C/p>\u003Cp>Especially, I'm loving the filtering functionality. I can already spin off a couple of great reasons why I'd use that, especially when you start to want to reduce computational load or con reduce things from blocking our operations from going through or vice versa when you actually do wanna filter stuff. So that's useful. I'm gonna play around with that next time I get a chance.\u003C/p>\u003Cp>Speaker 0: So I wanna jump in. There's one more guide I wanna show because this guide is going to basically be the north star for our approach today. And it\u003C/p>\u003Cp>Speaker 1: is this, which is filled\u003C/p>\u003Cp>Speaker 0: a little bit with, but it's using external weather data in a custom panel extension. And you might be thinking, I don't really see the translation between this and using Twilio in a in a panel. So before I explain this, maybe we take a moment to explain the Twilio flow in this all the way from generating I think it'll come in quite nicely. I can share your screen or you can just talk. It's completely up to you.\u003C/p>\u003Cp>Speaker 1: Well, I can start by just talking, and then we can, we can get even further. So we start with the Twibio voice API. So Twilio has a bunch of different APIs, and we have one that we focus on programmable voice. I would say one is more of a collection because even with invoice, there's a lot of diversity depending on what you want to do. In today's example, kind of like the external weather data, we essentially want to have a component in a web page, and this component is going to be able to make phone calls out to the users that are stored in the director's database.\u003C/p>\u003Cp>Right. So in the director's back end. So what we're going to need to do is we need a couple of things. We need a browser component or a component that that lives in a browser. So essentially a browser that is able to make phone calls.\u003C/p>\u003Cp>Your browser, unfortunately, doesn't have telephone APIs. So what your browser does is your browser can connect to Twilio, Twilio's servers. And then Twilio then handles and creates telephony using, old school telephony that, like, is really, really boring slash complex or really, really interesting depending on how what floats your boat, and then does all of that in the background. But what we do is we expose an SDK which you can build into your front end applications. Now there is one extra step which maybe adds a little bit of complexity, but I just wanna talk a little bit about that, which is just a bit of authentication and security.\u003C/p>\u003Cp>Because we can't just hand anyone the keys to make phone calls from any number in the world. Right? So when you have a Twilio account, you have a phone number. And when your browser connects it with you, what happens first is there is an exchange of a token. A token must be generated, it's what we call it, and that browser must use that token to connect to Twilio.\u003C/p>\u003Cp>Now that token gives it a couple things. It first says, hi. I am Nathaniel's computer, and I am connecting to Twilio, and I'd like to be able to make phone calls. Also, it says I have a phone number identity. So, like, a a a phone number which is attached to my identity.\u003C/p>\u003Cp>So when I do make phone calls, I will come across as if this phone call is coming from said telephone number. And these things happen in the background. Now the SDK has a lot of, flexibility, so we start off with a few basic methods which you can use. And then if you want to build a really, really custom interface, there are a lot more events that we expose. But if you are looking to get started with Twilio voice, I would recommend heading over to the Twilio docs, which I am showing on my screen.\u003C/p>\u003Cp>They often take you through starting your first phone call. This is from a server. You may want to start your first phone phone call, but then we also have these client side SDKs, and we talk you through how you use the client side SDKs. We have JavaScript SDKs, iOS, Android, and we even have React Native SDKs. I often like to say it's good to start with a quick start and then work backwards because the quick start builds all of that functionality, and then you can customize it as you are.\u003C/p>\u003Cp>But then there is also the reference if you want to dig deeper into all of the methods and the things going on behind the scenes. Is there anything I've left out? I know we've talked about this before.\u003C/p>\u003Cp>Speaker 0: No. You did mention the thing I hoped you would mention so I can explain why whether the data and Twilio are related. Before I jump in, actually, a reminder for those watching you, we've got a chat here. I'm watching it. So by all means, ask questions, thoughts, concerns, grievances even, and we'll address them as we go.\u003C/p>\u003Cp>But let's come back to my screen. Right. Why does this matter? Extensions and directives live in 1 of 2 places. They live in the data studio, which is the web app, or they live in the data engine, which is the back end, an API side extension, you could call it.\u003C/p>\u003Cp>The browser, because of just security in the browser and the way that we lock that down, can't always confidently make an external web request. Right? It can't go off to Twilio and say, hey. Go generate an access token for me. In this example, the same way it can't go out to a third party weather API confident.\u003C/p>\u003Cp>Like, if you control both sides and you configure security on both sides, you'll be fine. But you often don't control the the vendor. You don't control Twilio. You don't control the weather API. So what do you do with this?\u003C/p>\u003Cp>Well, you use an endpoint. Use an endpoint, which is an API extension. Use an endpoint first to actually make those external requests do what it needs. And then your front end extension, the panel in this case, and in this case in the tutorial, then calls an internal endpoint because it's that's what it is now. It's now gonna be like / Twilio token, that my direct test project slash Twilio token.\u003C/p>\u003Cp>So it's now an internal API. So you you act you treat it like a proxy, which is actually not dissimilar to the post I showed earlier. To bring these together and make sure that you always have both of them and you don't have to deploy them separately, we have a concept called a bundle. So first, you create a bundle extension, which is an empty shell, then you add an endpoint, then you add a panel. And when you install the bundle, you get both of them together as as is required.\u003C/p>\u003Cp>So that's gonna be more\u003C/p>\u003Cp>Speaker 1: hearing you right. Just as a mental model, what I'm hearing is in this bundle, if this bundle was the application that fits into the panel, you kind of have the component, which would be the front end logic and then the endpoints can stand in for like a like almost like your server side. I know this is a weird way to think about it, but ways that you can query data and interact with other applications.\u003C/p>\u003Cp>Speaker 0: Yes and no. The only thing I'll change there is query data because panel extensions, for example, can query collections in directors without needing a a server. So if it's within the bounds of your director's project and the services we provide, in fact, we have them the the the they are called\u003C/p>\u003Cp>Speaker 1: Query external data. So data from outside. Correct.\u003C/p>\u003Cp>Speaker 0: Correct. Because, for example, your front end, your front end, your app extensions is what we call them, your front end extensions, have immediate access to all of your data because you're in this authenticated, you know, box that you can work within. But, yes, exactly. So the reason I wanted to show this, this weather panel thing is because that's basically we're gonna follow a very similar approach here. We're gonna create a bundle.\u003C/p>\u003Cp>We're gonna create an endpoint which will allow us to generate a token, then our panel will call that to grab the token and then use the voice SDK. So we've been talking for probably about minutes. I'd love to jump in. Now the format this is going to take, we decided just because, of of of the fiddliness of certain parts of this, is we are gonna go nice and slow, but we're going to present. We're gonna build it together.\u003C/p>\u003Cp>You can watch, and this will be available on demand if you wanna play along. This will also get turned into a blog post, and hello there is on demand. This will also turn into a blog post you can follow if reading, you know, technical material is more your thing. So if you're watching along, I I would say don't try and play along necessarily, but, take advantage of the fact that chat is here because we're we're here to chat to you. Right?\u003C/p>\u003Cp>Otherwise, this would just be a just a video, but this is a live event for a reason. Before we jump in, is there anything else you wanna add?\u003C/p>\u003Cp>Speaker 1: Nothing comes to mind. Let's get going.\u003C/p>\u003Cp>Speaker 0: Let's get going. So Why\u003C/p>\u003Cp>Speaker 1: do we do this? Why don't we have you start with, like, spinning up the environment that we're gonna use, getting us set up, so that we are ready to start building the telephony parts? And then I can step in. I can focus on building the Twilio parts. I know we we've we will switch between them and then connect those together and then do a little test\u003C/p>\u003Cp>Speaker 0: test as well. That sounds good. I actually had a little bit of a rundown that structure which looks slightly different. We'll follow yours, but as a result, this might end up being slightly more chaotic, but I like that. I like that format a little bit more.\u003C/p>\u003Cp>The the gremlins have just returned home, so we'll see what happens in the next 10 minutes. So can you hear that? You can hear that. Yeah.\u003C/p>\u003Cp>Speaker 1: Oh, now I can. Now I can.\u003C/p>\u003Cp>Speaker 0: Yeah. Now you can. So here I have a brand new completely empty code editor. I am running Docker in the background. We'll be running Directus locally with Docker.\u003C/p>\u003Cp>So if you're following along, you'll need that. And what we're gonna do first is we're going to create a Docker composed dot yml file. And I do just happen to have here sequel I do happen to just have a light one prewritten because, you don't wanna watch me write this. Go grab the latest version of Directus, expose it on port 8255, map some of the internal volumes that Directus uses to local directories. So when we first run this, it will create a database in the uploads and an extensions folder.\u003C/p>\u003Cp>We need a key and secret, which you should replace with a random value. To me, replace with random value is random enough for today. The initial admin email and password, you, of course, can change that. We're gonna just use SQLite because I don't wanna we don't need anything more heavy, and so that will, pop the file inside of this, data file here. WebSockets are enabled, not that I think we're gonna use them today, but this exposes a WebSocket and GraphQL subscription interface for, subscribing to updates and directors.\u003C/p>\u003Cp>This one line here is not in our quick start, in our docs, and it just will improve the developer experience of building extensions. Whenever we'll we'll set it running so whenever we save it or rebuild, whenever it rebuilds, it'll auto upload indirectus. So with that done, we're gonna go ahead and, just taking a quick look at the chat. Hello, Scott. And that's c collection that logs different tokens for Twilio or other APIs to variable use token by another key.\u003C/p>\u003Cp>Then you have Internet access to modify tokens for phone numbers or something. This is to always be missing in Directus, and you end up doing this outside Directus UI. Hold that thought for some short period of time. We know. But today, we'll use environment variables here in, here in the Docker Compose file.\u003C/p>\u003Cp>But we're not gonna that's not gonna be the approach today in the interest of time. And then Docker Compose. That should just do its thing, he says. There we go. Running all the first time setup, and then it will run-in just a moment at 8055.\u003C/p>\u003Cp>So local host 8055. There is our brand new director's project.\u003C/p>\u003Cp>Speaker 1: That was so quick.\u003C/p>\u003Cp>Speaker 0: Yeah. I like it. It's pretty sick. And this is also full fact directives. It's not like a shitter version you run locally.\u003C/p>\u003Cp>Like, it is the full thing when you use direct to this is what you use. The only caveat here is SQLite does not have geospatial plugins included out the box. So if you wanna use the mapping features, you'll wanna use just a different docker compose that's in our docs. It just takes a little longer to bootstrap, and it it wasn't necessary for today. Okay.\u003C/p>\u003Cp>Right now, there are a bunch of directors collections that have been created out of the box. Directors doesn't alter your tables in order to run. Instead, we create this, I don't know, like 20 ish tables or prepended with directus underscore. That means if you wanna get rid of directus later, you can just delete the application, delete these tables, and it's like we were never there. But this handles all of the UI, all of the settings, all of the configuration, they all live here.\u003C/p>\u003Cp>And the one thing I'm gonna do, because you can extend these, you can't delete these default fields, but you can add new ones, is for all the things that are here, avatar, email, title, you know, all of these, there isn't one for a phone number. So I am just gonna add phone because I think that's gonna be useful for later. Cool. I, out the box, have this one user here, but I think it would\u003C/p>\u003Cp>Speaker 1: be So quick question. So when you added phone, you you did it as a string. Just question, can you do any sort of validation from there\u003C/p>\u003Cp>Speaker 0: Yes.\u003C/p>\u003Cp>Speaker 1: I can. On that input?\u003C/p>\u003Cp>Speaker 0: Yeah. You can indeed. Over here, edit field validation.\u003C/p>\u003Cp>Speaker 1: Nice. Nice. And and, validation, what kind of validation do you use? What other rules? What is it?\u003C/p>\u003Cp>Speaker 0: Fantastic question. I think you could just throw in a red x to be honest. Yeah. It matches. You could\u003C/p>\u003Cp>Speaker 1: But also, it's got a bunch of other ones. Nice. Nice. That's awesome.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Yeah. It's pretty cool. And you can do, like, you know, logic you can do grouping.\u003C/p>\u003Cp>Right? And or and stuff like that. We'll leave it, and we'll just trust ourselves to put phone numbers in. Right? Don't do that in the real.\u003C/p>\u003Cp>I worked at another telephony company that offers APIs in the past. And you and I both know, no one ever knows how to put a phone number in correctly, and it's a huge pain in the ass.\u003C/p>\u003Cp>Speaker 1: So enforced validation. Because many different places, actually regionally write their phone numbers differently. So the way Americans write their phone numbers, especially compared to Europeans, Even, like, recently, we I had been I had built an app. Go to Singapore. We're testing the app.\u003C/p>\u003Cp>The app fails because Singaporean numbers just have way more digits than we had accounted for.\u003C/p>\u003Cp>Speaker 0: So Interesting.\u003C/p>\u003Cp>Speaker 1: Make sure you, like, keep it flexible for all of these inputs. But anyway, phone number.\u003C/p>\u003Cp>Speaker 0: Give me a phone number in whatever the format Twilio likes me. I think Twilio likes the plus.\u003C/p>\u003Cp>Speaker 1: Plus? 44 Yeah. 79 4757 41 48.\u003C/p>\u003Cp>Speaker 0: Why are you giving those me two numbers at a time? Anyway, whatever. Here. There. When we put you in.\u003C/p>\u003Cp>This is a user now in our in our project. If I provided the email and the password, you would be able to, you know, log and enable the app access. You would be able to log in here. The password is hashed when you save it, but you could you could just pop it in here. Yeah.\u003C/p>\u003Cp>Yeah. See? I see it in the chat here. I have to add a one in brackets. Like, it's a huge, honestly.\u003C/p>\u003Cp>Also, Americans in particular, that one in the brackets and sometimes you put other numbers in the brackets too, Okay. Absolutely. Archaic. Anyway,\u003C/p>\u003Cp>Speaker 1: it's it's like as it's as bad as them in the month before the day and the date.\u003C/p>\u003Cp>Speaker 0: Let's I'm not even gonna\u003C/p>\u003Cp>Speaker 1: Alright. But but seriously, though, there is international standards, ISO. I forget what the rest of the number is. But there is a Is it\u003C/p>\u003Cp>Speaker 0: the E164 thing?\u003C/p>\u003Cp>Speaker 1: I believe so. Yes. There is an international standard for phone yes. E164. That's what I meant.\u003C/p>\u003Cp>So you know, ISO. I don't know why it where that came from in my brain. But, you should check this out. And what you can often do, there are lots of tools and one that is actually sponsored by Twilio where they can, change inputs to fit the format. So it's always international plus international code, then the digits with no spaces.\u003C/p>\u003Cp>Speaker 0: But, interestingly, other telephony API company matches e164, but doesn't put the plus in front of it. So I don't think it's technically to standard, but it whatever. Twilio needs the plus. We've put the plus\u003C/p>\u003Cp>Speaker 1: in.\u003C/p>\u003Cp>Speaker 0: We we\u003C/p>\u003Cp>Speaker 1: Twilio doesn't need the plus. Like, we will pop in\u003C/p>\u003Cp>Speaker 0: the plus. Interesting. But\u003C/p>\u003Cp>Speaker 1: but the the important thing that I recommend to people is that you use that format when you have it stored because it's then uniform across every single phone. Absolutely. I wanna do is have some people have that ability and some people don't. So whichever way you do it, store it in your in your, in your, database, in your back end as, like, just one format, and then have logic that separates it out and changes what it looks like to users based on regionalization.\u003C/p>\u003Cp>Speaker 0: And not to derail us too much here, but Twilio does provide an API which formats phone numbers. I think it's called insights or lookup or something. Lookup. Lookup like the basic tier or whatever whatever will format the number for you. So you could introduce that as either a filter or an action event based trigger, and you could format it before it ever gets to your database.\u003C/p>\u003Cp>You rely on, you know, the user to get it somewhat correct in order to know. You know, you drop the country code entirely, for example, and sometimes it won't know which country is it. But in any case, let's I feel I I feel our time slipping away. So we have set up a project. We've added this extra field.\u003C/p>\u003Cp>I've added you in here. You'll be the guinea pig who gets the calls. Next in my little list was to add a bunch of environment variables and handle, like, all all of the Twilio configurations so we can focus on extension building. But I'm happy to get to the point where we first need a Twilio environment variable to do that. So it's up to you.\u003C/p>\u003Cp>What do you wanna do?\u003C/p>\u003Cp>Speaker 1: Why don't we jump to my screen and start from a console? I'm gonna start from the dashboard. Like,\u003C/p>\u003Cp>Speaker 0: 5 times zoomed in, like 5 zoom in points, please.\u003C/p>\u003Cp>Speaker 1: Would you like me to zoom in more?\u003C/p>\u003Cp>Speaker 0: No. That's good. That's solid. Cool.\u003C/p>\u003Cp>Speaker 1: Alright. Let's go. So the first thing we're gonna need, because we're going to be making phone calls, is we're going to need a phone number. So we're gonna come over here in our dashboard, and we're gonna find our active numbers. Now I actually went in and bought a phone number earlier.\u003C/p>\u003Cp>And the reason I did this is because, phone regulations and some some of you maybe who've used Twilio in the past, Buying a phone number can be very, very quick and simple. Simple. However, there are regulations that come in from countries and, governments, and it's really good because it helps protect us from spam, which means you often have to upload, either ID or maybe address an address and other sorts of identification to say that you are who you say you are. So I had to do this. It literally I uploaded my ID with this number, and within half an hour, it was done.\u003C/p>\u003Cp>I will say I have a at Twilio dot com email so that expedites it. But within 48 hours, we usually get 1 get it done for you. And you can still start building and sending messages to the phone number that was verified with your account. So, for example, when you create an account, you have a 2 f a number. You can start messaging that 2 f a number, but then start messaging all users.\u003C/p>\u003Cp>You then need to have that extra level of, ID and regulation com regulatory compliance. Okay. So I'm based in the UK, so I think we should just use a UK phone number.\u003C/p>\u003Cp>Speaker 0: Works for me.\u003C/p>\u003Cp>Speaker 1: So now I have a UK phone number, and this is the first, I number that I'm going to hold on and keep into a safe place. So now I own a phone number. Now the thing is, for us\u003C/p>\u003Cp>Speaker 0: Could I put could I pause you? Yes. Throw me the phone number in our little joint chat. I wanna show what I'm gonna do with it because then I don't need to show again what we're gonna do with them. If it's okay with you, give me 2 ticks.\u003C/p>\u003Cp>Steal it stealing it for a moment. Over here in our Docker Compose file, we have these environment variables. I've added one called Twilio phone number. I'm gonna take the one that Nathaniel just sent me here, and there will be a series of other environment variables as Nathaniel goes around the UI. I will populate here.\u003C/p>\u003Cp>A couple of them are sensitive, so I'm no longer going to show the Docker Compose. I'll show you what happens once we get them all populated, though. Okay. As you\u003C/p>\u003Cp>Speaker 1: were. Awesome. So now that we have this, the next thing we're going to need, and it's just underneath, is a TwiML application. TwiML applications are pretty important. They are ways that we can essentially give Twilio some instructions of what what to do when phone calls happen.\u003C/p>\u003Cp>We are going to create a new to more application, and we're going to call this directus for the workshop. Now, what you often have is you have a voice configuration. This says what we should what should happen when a phone call is made to or from this TwiML app. I'm going to come and fill this in in just one moment. But I'm gonna hit create.\u003C/p>\u003Cp>So now we've got this director's Twilio workshop. We're gonna open it into a new tab. And I'm gonna come back and grab this SID. I need this SID for what we're going to do next. So this is the identifier of the application that we are going to be using.\u003C/p>\u003Cp>Alright. One more thing that I need to do. I'm going to be using something called functions to do this. You don't actually need to use functions. You can write your own endpoints, and your own ways to do this.\u003C/p>\u003Cp>I am going to be using this. I'm going to create a new function. I'm going to be creating a function that uses the Twilio client quick start, and I'm going to give it that AppSid that we just collected. I'm also going to give it a phone number. This phone number is the caller ID, and we're going to create it.\u003C/p>\u003Cp>Drum roll. It's creating in the background Any second now. I know why it's failed to create\u003C/p>\u003Cp>Speaker 0: Twilio cogs go brr.\u003C/p>\u003Cp>Speaker 1: The reason why it's failed to create it is because, I did a practice run a couple of minutes ago, and they have the same name. So I'm just gonna quickly delete these and then start again. Create this, pop in a caller ID and pop in that ID. This time, touching wood, it should be fine. Uh-oh.\u003C/p>\u003Cp>Okay. I might have to come back to this. Come on, folks.\u003C/p>\u003Cp>Speaker 0: I can always do this step as well. Like, it's all good. We'll we'll we'll we'll get this.\u003C/p>\u003Cp>Speaker 1: We'll get there.\u003C/p>\u003Cp>Speaker 0: You got this.\u003C/p>\u003Cp>Speaker 1: Okay. Once I've done this one, we'll we'll come back to it in a second. I think it's just, like, deploying, and it's actually going and taking down some assets, and it maybe needs to be taken a second. But we'll come back to this. Okay.\u003C/p>\u003Cp>So we'll come back and do this later, but for now, we have our Twilio phone number. We have our Twilio API appsid, and then we also need one more thing. We need to get API keys and tokens. That's\u003C/p>\u003Cp>Speaker 0: How many do you account set?\u003C/p>\u003Cp>Speaker 1: Yep. As well. Yeah. Keys and tokens and then accounts set. And this is essentially how we can, authenticate with Twilio.\u003C/p>\u003Cp>Give me a second. Just need to log in because I've been in for a certain amount of time, and I'm trying to access a secure page. And I need to turn over and hit my\u003C/p>\u003Cp>Speaker 0: So when all is said and done, to generate tokens using a an endpoint in Directus and to do all the stuff we're gonna need to do later, there are 5 pieces of information we will need from Twilio. We need a phone number. We need the the TwiML app SID, which you've generated. I'll grab it off you in a moment. We need your account SID.\u003C/p>\u003Cp>We need an API key and secret, which we're in the middle of generating now. We'll use the API key and secret to generate tokens, and we'll do that in the endpoint.\u003C/p>\u003Cp>Speaker 1: Cool. Sending over the SIT and sending Lovely.\u003C/p>\u003Cp>Speaker 0: This is the TwiML the TwiML app, Sid?\u003C/p>\u003Cp>Speaker 1: These are the API keys and secrets, and the TwiML Sid begins with an a p. So if it starts with a s k, it is always a secret key. If it starts with a a p\u003C/p>\u003Cp>Speaker 0: Man, you just sent you just sent me 3 long ass numbers, and I don't know it right. The first one is what?\u003C/p>\u003Cp>Speaker 1: So if it starts with a s k, it's a secret key.\u003C/p>\u003Cp>Speaker 0: Yeah. Right. Right. Right. Got you.\u003C/p>\u003Cp>Got you. Got you. Got you. Got you.\u003C/p>\u003Cp>Speaker 1: The one under the secret key is the, secret no. So the see. Yeah. Secret keys partner. Secretly keys is the secret secret?\u003C/p>\u003Cp>Yeah. And then the last one that starts with an a p is the TwiML app.\u003C/p>\u003Cp>Speaker 0: Yeah. Right. I'm gonna just say this again. The one that begins AP is the TwiML app.\u003C/p>\u003Cp>Speaker 1: Yes.\u003C/p>\u003Cp>Speaker 0: The one that begins 2 k is the API key.\u003C/p>\u003Cp>Speaker 1: Is the API secret?\u003C/p>\u003Cp>Speaker 0: Alright. And the one that begins SK is the\u003C/p>\u003Cp>Speaker 1: API key.\u003C/p>\u003Cp>Speaker 0: Right. Okay. Cool. That's why we double check. Can you imagine if we got much further and we're just like, why the hell isn't this working?\u003C/p>\u003Cp>It's because I missed copy and pasted a key. Freaking hell, man. I need your account, Sid, too. And then we've got everything we need from the Twilio side.\u003C/p>\u003Cp>Speaker 1: Account Sid\u003C/p>\u003Cp>Speaker 0: asked. Thank you. And we need to just make sure that function can be deployed correctly. And that's linked to the TwiML app.\u003C/p>\u003Cp>Speaker 1: Let me quickly go there and try this. I'm just gonna quickly hit this function. Alright.\u003C/p>\u003Cp>Speaker 0: Good. We're literally still in Sab. I'm having a blast doing this with you, by the way. We should do it more often.\u003C/p>\u003Cp>Speaker 1: Yeah. Alright. Now it works perfectly fine because there's nothing else that has the same name. And let me just quickly tell people what this generates. This generates a function with just a little bit of code that just says, that if this number is coming from from a Twilio client, it should be forwarded to, whoever it's going to be calling and vice versa.\u003C/p>\u003Cp>Right? I'm\u003C/p>\u003Cp>Speaker 0: gonna copy this It's kinda nice that you could just create it from the boilerplate and forget about it to a degree. I still need your account, Sid. And then we are all groovy. I mean, get onto building building. Yeah.\u003C/p>\u003Cp>You didn't send it to me. Sid.\u003C/p>\u003Cp>Speaker 1: It's the last one. A c. Oh, cool.\u003C/p>\u003Cp>Speaker 0: Alright.\u003C/p>\u003Cp>Speaker 1: And, last thing, I'm just gonna quickly grab this URL, head over to the TwilioTwiML app that we created, and pop it into our voice. And voila, we are done.\u003C/p>\u003Cp>Speaker 0: Lovely. And what I will just say is the recording will be available on demand. We'll also write this up. So there was there was quite a few little moving parts there. They do all have purposes that, you know, that are important in this whole journey.\u003C/p>\u003Cp>We'll detail them there as well so you can in case it hasn't quite sunk in, it can. Of course, we also have the chat if you wanna ask further questions now. But with that, can I take my screen share back? Yep. Wonderful.\u003C/p>\u003Cp>So I popped all of those environment variables inside of the Docker Compose file, and now I just need to restart. So control c, up and enter. And now we restart the Docker container, and it will grab those variables and put them in the environment. So there we are. So now we're ready to actually go ahead and just start creating an extension.\u003C/p>\u003Cp>So what we're gonna do is I'm gonna create a new a new, terminal over here. And, in here, this is this folder, I wanna get into this extensions directory. So cd extensions, and I'm gonna run the npx create directors extension. Extension. Extension.\u003C/p>\u003Cp>That doesn't look right. Extension. Extend. That's correct. Latest.\u003C/p>\u003Cp>Just to make sure I definitely have the latest version of that. So, I get to pick the extension type that it will boilerplate, and I actually want a bundle, which I just happen to know is the last in the list. So I went up. I can call it whatever I want. I suppose I'll call it, Twilio, Twilio.\u003C/p>\u003Cp>I'll call it Twilio, whatever. Auto install dependencies. So just a reminder that a bundle isn't really an extension type in its own right. It's a shell where we can put multiple extensions. Now when these were originally created, they were actually to share dependencies and, you know, and reduce the overall code bundle size, But they also have this purpose of making sure you can distribute multiple extensions together.\u003C/p>\u003Cp>So it's just going and scaffolding that now. And the moment that's done, we're gonna go ahead and add an extension straight into that bundle. So we'll just give that a moment there. There we go. We will go into this new Twilio directory, and we will go, we'll run npm run ads.\u003C/p>\u003Cp>I believe that's what it is. And once again, we get to pick an extension type. Now if you remember, we're gonna build 2. We're gonna build a panel and an end point. The end point will generate the token.\u003C/p>\u003Cp>So we're gonna do that first, make sure we can generate a token, then we'll move on to building the panel. So we want an endpoint, and we'll call this one Twilio token. We'll just do it in JavaScript, and it will go and add that to this, package here.\u003C/p>\u003Cp>Speaker 1: And for those of you who are wondering why are we doing all of these Twilio tokens? Twilio tokens are because you're not going to actually put any of the credentials that we've just generated. Most of them none of them actually going to live in the browser, because that's unsafe. Someone could find them in the client. But what they do is they can be used by this function to create a temporary token, which then gets sent to the browser.\u003C/p>\u003Cp>Does that make sense? So we use these tokens from Twilio or no. These credentials from Twilio, I'm gonna call them, to create tokens. That token, it uses the account SID API key and secret to say, I am a Twilio approved. But I am the person on this account making this request.\u003C/p>\u003Cp>It then uses the TwiML appsid to say, this is the application I want to interact with and I would like to have permission to interact with. And then that generates a token which gets sent over to the browser, which can now interact with Twilio.\u003C/p>\u003Cp>Speaker 0: Hang on a minute. Oh, did it did it update in the thing here? It does say extensions reloaded. That was just while it was in the middle of boiler plating. Just to I'm I'm just gonna restart the Docker container.\u003C/p>\u003Cp>I don't think I actually need to do this. I just saw an error and I was like, let's work that out. So I think it should just be Twilio token. Twilio token. Oh, there it is.\u003C/p>\u003Cp>There we go. So that so that is this wrap. Ahoy ahoy world. Ahoy, world.\u003C/p>\u003Cp>Speaker 1: And for those of you who are wondering why we said ahoy, world. Ahoy was the greeting that was created for it wasn't created for, but it was used for phones when phones were first created.\u003C/p>\u003Cp>Speaker 0: I thought it was the first word ever said down a phone line.\u003C/p>\u003Cp>Speaker 1: Yeah. Yeah.\u003C/p>\u003Cp>Speaker 0: Okay. Oh, holy world. Great. So we check that that works. Now in here, we wanna go ahead and actually, and generate a token.\u003C/p>\u003Cp>So I'll just create a new endpoint, a new route handler here. So router dot post, and we'll call this one generate rec res. Again, if you've ever done any kind of Node. Js web development, this will feel very, very familiar to you with good reason. This is just the express route to the I was talking about\u003C/p>\u003Cp>Speaker 1: it feels like express.\u003C/p>\u003Cp>Speaker 0: Yep. Yep. Yep. That's exactly what this is. Now inside of here, we wanna go ahead and use the Twilio, helper library SDK.\u003C/p>\u003Cp>What do you call it?\u003C/p>\u003Cp>Speaker 1: From here, it will be the Twilio helper library.\u003C/p>\u003Cp>Speaker 0: The Twilio helper library. And so we actually wanna go ahead and install that. So npm install Twilio. Fantastic. Now we wanna go ahead and, and use it.\u003C/p>\u003Cp>So there is a page in the docs. Let's find it. I actually saved the link earlier. It's just shortcut here. This is the access tokens page, and it shows us how we can generate access tokens.\u003C/p>\u003Cp>The access token oh my god. What are these kids doing? I swear they're getting, like, pamphlets, and they're just going, can you hear them?\u003C/p>\u003Cp>Speaker 1: I can a little bit.\u003C/p>\u003Cp>Speaker 0: Yeah. Like this. Anyway, so an access token is just this really, really long string here, that contains this information.\u003C/p>\u003Cp>Speaker 1: Web token, just in case some people know. So you can break it apart into its pieces.\u003C/p>\u003Cp>Speaker 0: Now we just wanna go ahead and generate them, and the docs, for Twilio provide these, these these different, snippets here. We are gonna be using the voice SDK in the browser. So we're actually gonna use create an access token, create an access token. So what we're gonna do here is is go ahead and copy and paste this, into this. Now there's a few things we're gonna do, we're gonna do ahead of time.\u003C/p>\u003Cp>So we are going to one moment. We are going to, obviously, pull these to the top.\u003C/p>\u003Cp>Speaker 1: Those we\u003C/p>\u003Cp>Speaker 0: definitely need.\u003C/p>\u003Cp>Speaker 1: We need to bring in our environment variables.\u003C/p>\u003Cp>Speaker 0: We do need to bring in a a couple of our environment variables, or we could just use them use them directly from process to end. But I was gonna say this is an ESM environment, so we are just gonna very quickly change, change the way that we import Twilio here. So import Twilio from Twilio, and then the access token will be twilio.jwtdotaccess. That was just a small small, semantic change there, but it is important. Then we'll take the rest of this That should bring you in the environment.\u003C/p>\u003Cp>You're dead right. You're dead right. And I believe that is exactly what I called them in the Docker Compose file, so I don't need to do anything with those. Then and this is now where we're moving into the, into the actual route handler itself. We're gonna go ahead and generate a ticket.\u003C/p>\u003Cp>Speaker 1: Through that as well so that people know what's going on. So the outgoing application SID is the application SID we created earlier. So if you remember, I generated it, and he's going to pop that in. And then we also have an identity. Now remember, because this is a browser panel, this browser panel could be anyone.\u003C/p>\u003Cp>Usually, you would decide a string to identify who is this browser. Quick question. Do we have access to who the logged in you Directus user is right here?\u003C/p>\u003Cp>Speaker 0: We absolutely do, and we're gonna come back to this once we have the panel because the panel is gonna send an authenticated request to this endpoint. And within that authentication will come the ID of the user along with all the roles and permissions that that user holds. So for now, we'll just hard code it as a user, but we are gonna swing back around to this later and, as you said, actually provide the user ID.\u003C/p>\u003Cp>Speaker 1: Cool. Alright.\u003C/p>\u003Cp>Speaker 0: Now I don't think if we take a look, you know, alright. We're not doing incoming calls, but I think we'll just leave it as is. We generate a new access token with with all of these values we've set up. The only thing left to actually do is, is return this. Yeah.\u003C/p>\u003Cp>Res.send, and we'll just send this value instead of\u003C/p>\u003Cp>Speaker 1: the document. People are wondering, they're like, what is that voice grant stuff I am seeing? That's because you can you might want to give multiple permissions to different products with one token. So you can give a voice grant, a, a messaging grant, a conversations grant, a video grant. You could do all of that and send it in one token just to give that token lots of permissions rather than doing this if you're using multiple products.\u003C/p>\u003Cp>Speaker 0: Absolutely. So with that, it's built, which is cool. This should generate a token for us if we go to slash willio token slash generate as a post request. Once again, it has the voice grounds. It has our account SID, API key, and secret.\u003C/p>\u003Cp>It has our identity, which is, for now, just a fixed string, but we will update that in a moment. And then it will go ahead and generate a token and, and provide it as a JSON web token, which is what we're gonna need in our panel. So let's just quickly test this.\u003C/p>\u003Cp>Speaker 1: Expect to see a string of epic proportions and randomness show up in a second, if this works.\u003C/p>\u003Cp>Speaker 0: Yep. So Twilio token slash generate.\u003C/p>\u003Cp>Speaker 1: And we do. Perfect.\u003C/p>\u003Cp>Speaker 0: Lovely. So that's fantastic. That's actually really at its absolute core, all this endpoint needs to do. There are 2 things it doesn't do, which we're gonna do later. The first is actually identify the user, and the second is be authenticated at all.\u003C/p>\u003Cp>Right now, any user on the web, if this was a hosted application, could hit this endpoint and generate tokens. Not good. So, later on, we're going to make sure that this is more locked down. But, for now, I think this is good.\u003C/p>\u003Cp>Speaker 1: Yeah. And I think that and you can keep going while I say this. I think that people must remember on the web is just because a, URL is not publicly advertised does not mean that people will not be able to find it. And oftentimes, people use, like, just URLs are public to create really important private stuff like this and end up creating exploit, like, holes that the application can be exploited through. So make sure you protect this, outside of Demoland as well.\u003C/p>\u003Cp>Okay. So we're creating now our actual panel.\u003C/p>\u003Cp>Speaker 0: Yeah. Is that how you spell dialer?\u003C/p>\u003Cp>Speaker 1: With yeah. That was perfect. You corrected yourself.\u003C/p>\u003Cp>Speaker 0: Nice. We'll just do it with JavaScript again. So we're adding this to the bundle now. The nice thing about this is we only need to be running that npm run dev at a bundle level, and it will rebuild when the things under are updated as well.\u003C/p>\u003Cp>Speaker 1: Cool. So\u003C/p>\u003Cp>Speaker 0: I think you see the extension. Yeah. I let's, let's npm run dev this, and, let's take let's take a little look firstly around the code and then then what it does. So there is. That's like, where is it?\u003C/p>\u003Cp>Oh, there we are, the dialer. The dialer is made up of 2 files, the index dot JS and the panel dot Vue. The front end of directives is built in Vue. JS. And therefore, when you're building these app extensions, they are also built in Vue.\u003C/p>\u003Cp>Js. So we have an ID. This has to be unique across the whole system. So, you know, you can't have 2 conflicting extensions. It's generally best practice to prepend this with your author name, to, you know, namespace it somewhat.\u003C/p>\u003Cp>But for now, I'll just be like Twilio dialer, whatever. We provide a name. This will show in the UI. Don't know why I keep writing it like that. We are going to put in an icon.\u003C/p>\u003Cp>You can use any Google material icon. Descriptions, make own calls. Now all I want you to do for a moment is take in the fact that there is this option called text. And I'm gonna show you what that does in the actual directus UI in just a moment. Now the panel, is a view component.\u003C/p>\u003Cp>It takes in the props from from the index JS. So here we have the text. Text is there. That's how data gets passed between this kind of configuration pane and the panel itself, and then it's just a view component. By default, out of the box, it comes with the options API.\u003C/p>\u003Cp>You are, of course, completely, able to use the composition API, which is, I think, what we're gonna do today. I think most of you developers now kind of lean towards expecting to see that. So what I'm actually gonna do is delete no. I'm not gonna delete anything yet. I'm gonna show you what this panel does.\u003C/p>\u003Cp>So let's create a new insights dashboard. We'll call this Twilio workshop, and we will add we will restart container. I didn't need to do that. I just needed to refresh the browser. That was it.\u003C/p>\u003Cp>I couldn't remember what step I needed to take, so I'm like, let's do them all. There we go.\u003C/p>\u003Cp>Speaker 1: Gotta be sure. Better safe.\u003C/p>\u003Cp>Speaker 0: You gotta be sure. There's our Twilio guy. There's the icon of phone.\u003C/p>\u003Cp>Speaker 1: I was really\u003C/p>\u003Cp>Speaker 0: also pass in yeah. It was easy. Thanks. I mean You can also pass in these SVGs. Drop it.\u003C/p>\u003Cp>Speaker 1: I'm still impressed.\u003C/p>\u003Cp>Speaker 0: Cool. You can pop in SVGs too. And as you can see, the SVGs are all purple. That is the theme color of the director's projects. You can also use these CSS variables in the SVG, which is kind of nifty.\u003C/p>\u003Cp>So they all feel, you know, like they belong. There's the text that this is the show header show header right here. You can add extra configuration if you want, and there it is. Show header, you can put some text in, Call people. There it is.\u003C/p>\u003Cp>There's the text that came from the from this configuration options. I think it's called options pane. We don't want text, but, just wanna show you that's that's kinda how how it works. So now we're going to start now we're going to start ripping it apart. First thing I think we're going to do is we are going to let's just have a think here.\u003C/p>\u003Cp>I think we're going to remove all the options. I don't think we need options in this.\u003C/p>\u003Cp>Speaker 1: No.\u003C/p>\u003Cp>Speaker 0: It has you know, it's it's going to just show the users.\u003C/p>\u003Cp>Speaker 1: Like an example is, for example, a lot of companies are international. So they end up having, like, a German number, a UK number, a different number. So I'm saying I'm probably seeing you could have, like, as an option, a drop down to be, like, I wanna phone this person, but from this number because they're in Germany, and they wanna use the German number. So they it feels familiar to them, something like that.\u003C/p>\u003Cp>Speaker 0: Absolutely. Absolutely. I think what we'll do here is we'll just empty that out to a div and onto the let's just let's just rock on. Let's just you know? Yeah.\u003C/p>\u003Cp>I love some boiler plating, but we're just gonna we we get we're gonna go from scratch. So but and I'm gonna use setup here, so we'll use the composition API. I'm not gonna bother reloading the browser because nothing's gonna show up. It's gonna be an empty box. So now we're at the point where with this blank slate, we can really talk about every line of code we're gonna write, help you understand what it does, and build this extension that will call this this endpoint we've created, which is now an internal API endpoint.\u003C/p>\u003Cp>We'll instal and configure the voice SDK, and we will eventually just make a phone call. I'm gonna pause for just a just a moment. We're about halfway through our time, and this is comfortable. This is a good spot to be in. Does anyone have any questions in the chat?\u003C/p>\u003Cp>And, Nathaniel, too, you're kinda seeing this set up, I think, for, you know, one of the first few times. Do you have any questions so far?\u003C/p>\u003Cp>Speaker 1: No questions. I'm curious. I'm really excited about some of the the options as well. So you know how it had text. What are the types of things we can put into the options?\u003C/p>\u003Cp>So, like, I I just said, like, a drop down list, but are there other kinds of, interactions or ways we can select?\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Yeah. All of the built in they're called interfaces in Director, so form input, you know, you could we call an interface. You can use any of the built in interfaces.\u003C/p>\u003Cp>So you have the the WYSIWYG. You have a codes input box that does some syntax highlighting. What the hell is going on? You have a text box that, you know, can be integer or float type. Like, you can enforce that.\u003C/p>\u003Cp>You have sliders. You have relationships. You can actually pull data from collections and select an item within the collection. All of the built in interfaces and directives are exposed to you through through this.\u003C/p>\u003Cp>Speaker 1: Interesting. Good to know. Good to know.\u003C/p>\u003Cp>Speaker 0: Yep. Yep. Yep. And you can also in fact, if I just if I just undo this slightly, there's other, like, meta information you can provide. You can, hide values, make them like a password It says width full.\u003C/p>\u003Cp>You can also do width half and put 2 things side by side. So we give you some flexibility around how that looks and\u003C/p>\u003Cp>Speaker 1: feels. And last but not least, like, because I'm thinking, like, we're making this panel and it's kind of just a demo one. But people can people, share panels maybe for other users to to maybe try out?\u003C/p>\u003Cp>Speaker 0: Yeah. So, on the director's marketplace, you can publish them on npm. If you boilerplate it with, the CLI, which I boiler plated mine with, we everything's set up. You can just push it straight to NPM, and in a few hours, you'll see it appear in the marketplace.\u003C/p>\u003Cp>Speaker 1: Awesome.\u003C/p>\u003Cp>Speaker 0: Yeah. Cool. Right. So let's move into the panel dot view. Let's move into the panel dot view.\u003C/p>\u003Cp>So we're starting from scratch. What we're gonna do here? First thing we're gonna do is in is import all of the composables that come with the view, with the directors extensions SDK that we're gonna use today. So we're gonna import use API. We'll talk about what that does in just a moment.\u003C/p>\u003Cp>And use items. Once again, we'll talk about what that does in just a moment.\u003C/p>\u003Cp>Speaker 1: Is related to how well, I know we're gonna talk about in just a second. No. Yeah. These are things that are coming from Directus that we can use Yeah. In our component.\u003C/p>\u003Cp>Speaker 0: Correct. And, actually, I might just take this moment to pause and show you inside of the composables right here. These components make working with direct this easier. Use API effectively wraps your API request with all of the authentication that comes in your that is in your Directus client. So you could, of course, just use fetch.\u003C/p>\u003Cp>But then how does your your how does your endpoint know it's you? But if you instead use the use API, which, I think, yes, actually, it's under the hood. It also sends all of your client details. And that's how we're gonna know which users there, whether they're authenticated and so on and so forth.\u003C/p>\u003Cp>Speaker 1: And this is really because I'm guessing it means you don't need to write, like, all of that metadata into\u003C/p>\u003Cp>Speaker 0: your\u003C/p>\u003Cp>Speaker 1: access to your endpoint URL.\u003C/p>\u003Cp>Speaker 0: We're just gonna use the use API composable instead. UseStores allows you to go in and actually access information, use information within your directors project itself. So you can access things like all of the permissions users hold, data about the collections, the metadata about collections. And the other one I imported was the specific use items composable. And this will allow you to query data in your director's project directly from your panel.\u003C/p>\u003Cp>So we're not going off to a back end to do this. We're doing this within the client itself.\u003C/p>\u003Cp>Speaker 1: And so In\u003C/p>\u003Cp>Speaker 0: turn, it knows what you have access to and will honor the access control of your logged in user.\u003C/p>\u003Cp>Speaker 1: So yeah. So this is like takes away the need to query a database because you are building an application over inside slash beside your database.\u003C/p>\u003Cp>Speaker 0: Absolutely. Absolutely. You know, I think a lot of people see directors as a CMS or see directors as a back end for an external application. But through a Directus insights and through, like, these panels and through modules, which is another extension type, which is these. So they add they basically give you a blank slate, which you can build.\u003C/p>\u003Cp>Yeah. It's the most low level extension type, I suppose. It's a good base to actually just build your application within the Data Studio itself. So, yeah, pretty pretty nifty. So we're gonna use use API to make that API call to get our token.\u003C/p>\u003Cp>We're gonna use use items to populate the drop down list of all of the users that we can call. We're also just because, just because we're in a, a view app here, we're just gonna import graph. There's not nothing. We don't need to explain that terribly. That's just a kind of quite boilerplatey stuff.\u003C/p>\u003Cp>Now we're going to create an instance of use API. We call it API. Use API. And we're also going to now create our instance of use items as well. So we are going to pull out the items from use items.\u003C/p>\u003Cp>Now the signature here is a little bit wild, so bear with me. The first thing we need to pass in as shown here, I don't know why it's using this. Maybe I've called it a font that doesn't exist, but why the hell does it look like this? Anyway, the first thing we need to pass in is a ref that contains a string of the collection name we're querying. So we don't just pass in the string, we pass in a ref.\u003C/p>\u003Cp>Cool. We can we can rock on there. We can literally just put in ref. Not a problem, and we had to import it to do this. And we're gonna we're gonna query the directus views as collection.\u003C/p>\u003Cp>Speaker 1: So use item The second now gives you access to Yeah. All of the Directus users. You can just pull them in.\u003C/p>\u003Cp>Speaker 0: Yeah. Because that is a collection in your database like any other. Now we do have a distinction between system collections and user collections. User collections you create, System collections are these kind of 20 I showed you earlier out of the box. In fact, we haven't got any user created collections in this project.\u003C/p>\u003Cp>We're not gonna use them. But, yes, you can query these just like you can any other any other collection. So that's completely correct. And the second thing we we provide is a query. Now directors has quite a robust I'll show you here quite a robust query language.\u003C/p>\u003Cp>Here it is. Not query parameters. That's what I was looking for. So you can specify what fields are returned. You don't need everything.\u003C/p>\u003Cp>Right? You might only just need the ID and the title of a blog post, you know, for example. You can apply filters. You can do searches. You can do, like, patch basic pagination, you know, by limiting how many per page and what page you're on and what the offset is and so on and so forth.\u003C/p>\u003Cp>You can apply sorting and just more. Now we're gonna do that just to specify what fields we want returned because I don't want that huge object for every user. I just want a little bit a little bit less. So we're going to pass in fields. And we we want yeah.\u003C/p>\u003Cp>Speaker 1: Yeah. And and Go ahead. You can keep typing, but I'm guessing this is something, like, that people should always be doing because it makes your your moving smaller chunks of data around, so only getting the things you need rather than Got it. Like, I feel like it's a very early in career developer move to just, like, pull down the entire database to get one field from one user.\u003C/p>\u003Cp>Speaker 0: And what's really, really nice about this is we put we expose a GraphQL, you know, API, but we also expose a REST API. And now what I think is one of the core value propositions of GraphQL, which is you you construct these complex queries that only bring back what you need, you can do regardless. You can really pick what's right for you. And this works with, this works with relational data as well. Here in the items, so in the use items composable, I could, for example, let's say there's a relation, right, called, I don't know, posts.\u003C/p>\u003Cp>I could be like post stock title, and I can start getting the relational data too. So it's really powerful.\u003C/p>\u003Cp>Speaker 1: So in in that case, you almost end up querying 2 tables. Right? Because you're querying a table and then querying Yeah. That's critical.\u003C/p>\u003Cp>Speaker 0: And that's where being very selective about what you're bringing back is really critical because another valid fields query is this. Give me everything on this level. Give me everything one level down. And you know what? You know, you can do that.\u003C/p>\u003Cp>This is incredibly computationally expensive. But you could do this. I mean, you could. You could. But at any level, you could do this.\u003C/p>\u003Cp>You could be like, you know, give me everything in the you give me everything in the posts collection, for example. So, you know, we give you that flexibility. You know, you can blow your own shit off.\u003C/p>\u003Cp>Speaker 1: There's great power. Great responsibility.\u003C/p>\u003Cp>Speaker 0: You you absolutely got it. Now, what's gonna come back from here is in fact, this might be a point where if I just console log items, we can just get a little look in on what's actually happening here.\u003C/p>\u003Cp>Speaker 1: Oh, I'm guessing we're gonna see user 1, Nathaniel Okeno.\u003C/p>\u003Cp>Speaker 0: Yeah. And there's 2 users. There's the admin too. Yes. My user.\u003C/p>\u003Cp>I just got phone. Account. Right? You know what? We could apply a filter here that says, just give me users who have phone numbers.\u003C/p>\u003Cp>I'm not going to do that. But if I refresh here, there's the array. The array has 2 users, the admin, no phone, Nathaniel, as we expect. Awesome.\u003C/p>\u003Cp>Speaker 1: Cool.\u003C/p>\u003Cp>Speaker 0: What else what else is important here? I might just rename this users because items is a little bit of a weird word to use when they are users. So that's just a convenience thing, I think. And then what we're gonna do now is populate a a select, a drop down. And then when you select something, we're just gonna bind it to a variable in here.\u003C/p>\u003Cp>So we'll just create that variable now. All we want is their phone number. Right? We don't care about the rest of the objects. We just care about the phone number, and that can start off with a value of null.\u003C/p>\u003Cp>Now that we've pulled in all these users, let's actually display up here. And now I have the kind of pleasure of showing you the component library. Here's something else. This is the component playground. These are components we use within the data studio that we expose to extension authors.\u003C/p>\u003Cp>So we are going to use the v select, and it has all of the kind of styling applied. Really, really nice to kinda out the box. This is what it looks like. We need to v we need to bind it with a with a variable, which is why I just created phone number. Phone number will go in there, and you pass in items.\u003C/p>\u003Cp>Items have text and value. Now our items do not have text and value. They have first name, last name, phone number, and so on and so forth. You can you can change which is which field is used for the text. So we'll we'll do, like, first name.\u003C/p>\u003Cp>In reality, you would probably do, like, a computed, you know, a computed array that would add the first and last name together and display that. I am too lazy for that. We'll just display the first name. But that's what you would do in the real. Right?\u003C/p>\u003Cp>So, that's I mean, that I I may as well copy it, to be honest. There we are. Yeah. Let's put it inside this div here.\u003C/p>\u003Cp>Speaker 1: K. So we don't That is the value is going to be so I'm guessing the value is going to\u003C/p>\u003Cp>Speaker 0: be the phone number\u003C/p>\u003Cp>Speaker 1: that we Yep.\u003C/p>\u003Cp>Speaker 0: Created earlier. Phone number.\u003C/p>\u003Cp>Speaker 1: Yep.\u003C/p>\u003Cp>Speaker 0: And the items, is going to be users.\u003C/p>\u003Cp>Speaker 1: Okay. So it's gonna grab the list of users that we got.\u003C/p>\u003Cp>Speaker 0: And then grab the list of users. As noted, we do not, we don't carry out value. As noted, it's interesting that it did this because you're meant to do this in Vue, when it's an attribute. So we have the item text, which is first name, and there was item value was the one under the item.\u003C/p>\u003Cp>Speaker 1: So you can choose what is visible to click, but then what that value actually trick the way it adds to whatever. Okay. That makes sense.\u003C/p>\u003Cp>Speaker 0: I mean, let let's make this easy. Let's just let's just, you know, print the value. So now if we refresh, there's our drop down admin and Nathaniel. When you pick Nathaniel, there's the\u003C/p>\u003Cp>Speaker 1: top up. Blank when it was admin because admin doesn't have a number.\u003C/p>\u003Cp>Speaker 0: Yeah. Because, actually, initially, it was, on, I think I need to put in, like, a select. It's unset to begin with. It's null. So whatever.\u003C/p>\u003Cp>We would need to put in, like, a select user default setting.\u003C/p>\u003Cp>Speaker 1: Doesn't have a number. So\u003C/p>\u003Cp>Speaker 0: The admin doesn't have one, though. But, yeah, originally, this isn't because admin has nothing. It's because it's null. But whatever, like yeah. Small small nuance, small small edge case kind of thing there.\u003C/p>\u003Cp>So now we have that, then we are\u003C/p>\u003Cp>Speaker 1: I think we need\u003C/p>\u003Cp>Speaker 0: I mean, that's probably we need a we need a button. We do need a button. Can actually I think yeah. Yeah. We do.\u003C/p>\u003Cp>Let's use another component for that. Let's look at here. We have the button, shock horror. I mean, it's just the v button. We actually need to do nothing else.\u003C/p>\u003Cp>I'm not even gonna bother copying that, to be honest. V he says and then makes a typo. V button, call user. And then we will when it's clicked, we will start call. And, of course, we need a function, Let's get this const stop call.\u003C/p>\u003Cp>I think it needs to be an async function for later. I just happened I mean, we could change it later, but we'll do it out there. So there you go. That will start, and then we'll we'll grab the value of phone number at that point. When we start a call, we will then go and grab the token.\u003C/p>\u003Cp>Tokens have a short lifespan, so you really wanna grab them at the point you're gonna use them all very soon before. So we'll we'll go and handle that.\u003C/p>\u003Cp>Speaker 1: Spend the token time time to time to live. It is not recommended for you to just, like, create one that lasts the whole year. You want ones that are gonna last a short amount of time.\u003C/p>\u003Cp>Speaker 0: Yeah. Because they can they I mean, if they're intercepted, they're usable, basically. So, yeah, you do only want them to live They wanna run a short while. So we obviously haven't done anything with the use API yet, but I think you broadly understand what it's about to do. Do you have any questions?\u003C/p>\u003Cp>I'm pretty happy with how it's shaping up.\u003C/p>\u003Cp>Speaker 1: I like how quickly you can build the especially with the component library, how quickly you can build the dysfunctionality, with all of and I'm guessing the great thing is, like, with stuff like that, you're standing on the shoulders of giants, of heroes where, like, they have done all the sorts of things to, like, think like, I saw you change in the width of the button, like, really easy just dragging across and stuff. Yeah. That is really, really useful.\u003C/p>\u003Cp>Speaker 0: This one's my favorite personally. It's called fancy select, and this is just a select 2 items, a divider and a third item. And it's just a select, but look how pretty that is.\u003C/p>\u003Cp>Speaker 1: I love how the pass.\u003C/p>\u003Cp>Speaker 0: And that's used and that's used inside of the UI as well. We actually used it when we, when we or did we use it actually? Nope. Lied to you. I used it earlier today.\u003C/p>\u003Cp>No. So there you go. Right. Okay. Save\u003C/p>\u003Cp>Speaker 1: and refresh.\u003C/p>\u003Cp>Speaker 0: There's the button. The button just runs this function. Now with the point where we can go and get the Twilio voice SDK. So let's let's go ahead and do that. And, yeah, that's a really good point, Alex, in the chat.\u003C/p>\u003Cp>This makes customizations feel native to the by using the component library, it all feels native. Don't get me wrong. Obviously, it lacks polish. It needs some padding. But, actually, to be fair, just by adding padding\u003C/p>\u003Cp>Speaker 1: It would be a really good job.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Yeah. Yeah. Right?\u003C/p>\u003Cp>We're we're lazy, so this is what you get today, but you could Speak\u003C/p>\u003Cp>Speaker 1: for yourself. I'm joking.\u003C/p>\u003Cp>Speaker 0: Speak for you. Yeah. I'm I'm the one with the keyboard, so I'm speaking for both of us. Hey. Right.\u003C/p>\u003Cp>Let's go and install npm install, hang on a minute.\u003C/p>\u003Cp>Speaker 1: Twilio and I think voice SDK.\u003C/p>\u003Cp>Speaker 0: It is. It is. Npm install at Twilio slash voice SDK.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: Not my first rodeo, mister Academy.\u003C/p>\u003Cp>Speaker 1: I have made this mistake multiple times.\u003C/p>\u003Cp>Speaker 0: Now Cool. Before we continue, there is one more piece of configuration. And it does involve going back to that, to the, Docker composed file and adding one more environment variable. Let me let me shove this up. We're still here underneath the eyes.\u003C/p>\u003Cp>Hello. But I just wanna get it to the point where I'm not about to leak all the information. So I will get rid of that now. Okay. Back it back here.\u003C/p>\u003Cp>We're back here. Just above the cursor, a couple of lines are all those environment variables I don't wanna show you, but we do have to add one more. You might be thinking, pretty damn sure I've added enough of these now. Like, I I don't don't wanna do this. But what we do need to do is, let's find it.\u003C/p>\u003Cp>I did write because it's long. I did write it earlier. Needs to add this environment variable. Why? What the hell is this?\u003C/p>\u003Cp>As mentioned earlier, the director's data studio, doesn't allow request to external necessarily without perfect configuration allow requests to third party services, like to 3rd party servers or whatever. And that's a security precaution. That's great. But we are going to be connecting directly from our browser to a phone number. I think, strictly speaking, we could probably build this as an endpoint too and use the endpoint as like a middleman proxy, but we don't we don't have time for that.\u003C/p>\u003Cp>The easiest way to do it is this environment variable where we're basically changing the content security policy, to allow the connection to this WebSocket URL, which is what the voice SDK is gonna do in a bit.\u003C/p>\u003Cp>Speaker 1: I was wondering. I was like, have we ever seen this before? Like, but now that makes sense. So you are giving It\u003C/p>\u003Cp>Speaker 0: took login. It\u003C/p>\u003Cp>Speaker 1: took Yeah. Yeah. So you said\u003C/p>\u003Cp>Speaker 0: to work that out.\u003C/p>\u003Cp>Speaker 1: We are making sure that the, the director's browser I'm gonna call it the director's browser, which has these extra security walls around it. We're just like, yo, let, WebSocket connections to this Twilio u URL happen. Awesome.\u003C/p>\u003Cp>Speaker 0: Yes. And otherwise, that will error. And while we were putting this together, that error, it's like I had to add that environment variable for it to work. It's pretty descriptive. The error tells you it's a CSP, a content security policy problem, and it's on this connect source value.\u003C/p>\u003Cp>And that it was while trying to access this this, URL. So, you know, it it was pretty easy to work out, but let's save ourselves the the pain and do it now. So so we've installed the SDK. Yeah.\u003C/p>\u003Cp>Speaker 1: Talk about. So we've installed the SDK, and as I'm talking, you can maybe, like, type. So with the voice SDK, what we start, we bring in the SDK, and we bring in specifically a device. The idea of a device is a device is anything that can connect to Twilio, because the device right now, it's gonna be a browser, but it could be a phone if you're using the, like, react native or the iOS or Android SDKs. But we are bringing in a device, and that device needs to have permissions, and we give those permissions from a token.\u003C/p>\u003Cp>But we don't have a token now. So how are we gonna get this token from our endpoint? Great.\u003C/p>\u003Cp>Speaker 0: We've kind of spoken about it already. We're gonna use this use API composable. Call our now internal endpoint slash, slash Twilio dash token slash generate as a post request, we'll then get the we'll then get the value here. So, the way we're gonna do that, we've already created this, is we are going to, pull out the data value that comes back. I'm already gonna just call it token.\u003C/p>\u003Cp>Await API dot post because it is just an Axios instance. Ultimately, I'm gonna go to / Twilio token slash generate. That will return a string, which is the token, which we can then remove these question marks. We'll pop this directly in here. We are That's how you pass this device.\u003C/p>\u003Cp>Yes. We're ready to register the device. There is one more thing I would just wanna take a moment to do. And, it's not I've I've not taken note of it, but I just wanna derail this for a moment. I did oh, shall I do it?\u003C/p>\u003Cp>Shall I do it after? It's the whole sending over the sending over the correct user and authentic. I think we'll make the request first, and then we'll we'll see what happens. So we have a device, and then let's just console log device. Realistically, we're either gonna see a device or we're gonna see an error because that token's invalid.\u003C/p>\u003Cp>There is no there is no other outcome. It's one of those two things. Extensions reloaded. I hit save. So we're gonna refresh this.\u003C/p>\u003Cp>Open the console. Go to you. Hit call user, and that's the device. Rock on. What else is important here?\u003C/p>\u003Cp>Oh, no. No. No. No. We're not done.\u003C/p>\u003Cp>We need to also do the device. We need to register the device. That's my bad. That extension's reloaded. I might just rename I think it's alphabetical because, again, lazy.\u003C/p>\u003Cp>Oh, being being too hopeful there, I think. Okay. Whatever. Cool user. Oh, problem.\u003C/p>\u003Cp>Why? WebSocket received error undefined. Oh, I think oh, I didn't restart the I just needed to because I changed the environment error.\u003C/p>\u003Cp>Speaker 1: That was the error he told us we were going to see, and he walked right\u003C/p>\u003Cp>Speaker 0: into it. But there it is, by the way. Refuse to connect to this URL because it violates the following content security policy directive, connect source self HTTPS. So they were there, and then I added on to the end of it the WSS, you know, Twilio voicing. The Twilio token generate is, I feel like you probably came into this, into this session a little bit later on, John.\u003C/p>\u003Cp>But here here it is. We built it first. So, yes, what we've built is a bundle. In the bundle, it's an endpoint and the panel. The panel calls the endpoint.\u003C/p>\u003Cp>The endpoint talks to Twilio, comes back, returns it to the panel. So, yes, it is now a direct test custom endpoint, by virtue of this file here. Okay. Let's do that again. When you see an error in a workshop and you're like, damn.\u003C/p>\u003Cp>I hope this isn't gonna derail everything, but no. There there we go. No errors. So we're good. No errors.\u003C/p>\u003Cp>And we did see there it did a post to the Twilio token generate endpoint. It returned in 25 milliseconds with the\u003C/p>\u003Cp>Speaker 1: a a device, register device with Twilio, and say, hey, device. I'm authenticated. I am now ready to start making calls and receiving calls if you set up to do that.\u003C/p>\u003Cp>Speaker 0: Let's do this, and then we'll go back in the in up the the endpoint because that is there's no problem now for its hard coding user in the endpoint. But in the real, you should not be doing that. So we'll cover that at the end as like a let's it's not a next step. It is critical, but we will treat it as if it's a next step because I wanna I wanna get a call. I wanna get a call going.\u003C/p>\u003Cp>I'm getting impatient now, as you know. That is the person I am. Okay. So, let's now yeah. And once the so there's one other thing here, which is this device doesn't register like that.\u003C/p>\u003Cp>Instead, it emits an event when the device has been successfully registered. Sometimes that takes a moment. I think that it also pops up and, like, asks you for, you know Yes. So My hat says\u003C/p>\u003Cp>Speaker 1: why register can take a couple of seconds is because sometimes it needs to ask permissions to use your microphone, which could take you one second, 2 seconds. In Kevin's case, where he's already done like a we we've done this before\u003C/p>\u003Cp>Speaker 0: It doesn't show up. So it's\u003C/p>\u003Cp>Speaker 1: given it permission. So it's not going to do that. So while it may seem lightning quick for him and you might be like, why do we have to do this and then wait for it to finish? It's not always gonna be like that for all your users. So then what we use are these events where we say when the device has been registered, then we want to do something.\u003C/p>\u003Cp>Now what a lot of people do, especially if they're creating dial up panels, is you might have you might register a device when someone opens the dialer. Right? So that it's quicker that when they click a call, they're already registered. And then you can already, like, say that something's gone wrong. You are not authenticated to use this dialer straight away once they open it.\u003C/p>\u003Cp>Different\u003C/p>\u003Cp>Speaker 0: Can I ask you a question? Yes. The the tokens don't last very long, and we we use the token in the code when we register the device. And I don't think we use the token again. But what happens if the token expires between registering the event and actually trying to make the call?\u003C/p>\u003Cp>At what point have the handshake happened?\u003C/p>\u003Cp>Speaker 1: The handshake happens when you register the event when you register the device. Right?\u003C/p>\u003Cp>Speaker 0: Not when you make the call.\u003C/p>\u003Cp>Speaker 1: Not when you make the\u003C/p>\u003Cp>Speaker 0: call. Oh, interesting.\u003C/p>\u003Cp>Speaker 1: Is you also get a event when your token's about to expire and when your token expires. What we recommend people do is when your token's about to expire, generate a new token. So hit that API call then start a new token get a new token.\u003C/p>\u003Cp>Speaker 0: Yeah. Exactly.\u003C/p>\u003Cp>Speaker 1: Again, this limits your surface area for exploitation. So it's it may seem like a little bit of a faff, but trust me, you do not wanna be hit with a crazy Twilio bill because someone's hijacked your phone call and used it to call premium numbers that paid themselves.\u003C/p>\u003Cp>Speaker 0: That. So we're gonna make a call here. The call is going to be an object. I'm going to save it up here because we want to start the call later. We're gonna wanna handle hanging up.\u003C/p>\u003Cp>So we'll need access to that up to that object in the in the global scope. So we're going to go const, call, I think we'll call it, and we'll just again initialize that with a value of null. Now it's time to actually make the call. So we'll set the value of this ref here, into device dot and yeah. Thank you.\u003C/p>\u003Cp>But my uncertainty was creeping in there and it has a params object and the value is going to be phone number, this one here, dot value. Because it's a And it's that didn't get the raw value.\u003C/p>\u003Cp>Speaker 1: You might be like, why connect and why is it that params too? Because there are a couple of things. You can clone phone numbers, but, like, with Vince with you, you can actually phone video call rooms. So let's say people having a video room, you can literally dive in to it from here. You might want to dial another client, which isn't gonna have a phone number.\u003C/p>\u003Cp>It might have, like, a a name attached to it, like the user that we have. So, there could be a couple of things. We're using phone numbers here, but that could be a few other things, which which is why there is a bit more flexibility in I don't wanna say ambiguity, but a bit more flexibility of what could go into that params. Yeah.\u003C/p>\u003Cp>Speaker 0: Alright. This is everything we need to actually make a call. We're not done. We're not gonna get any UI that we've made a call. There's gonna be no ability to hang up.\u003C/p>\u003Cp>There's none of that, but I I think this is a moment we can actually try this out.\u003C/p>\u003Cp>Speaker 1: Alright.\u003C/p>\u003Cp>Speaker 0: That's it. You know, we've got 33 lines of code with a few lines of white space, and I think this might be all we need. So your phone number's in there.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: Let's ready for the error? I am. I am\u003C/p>\u003Cp>Speaker 1: ready for an error.\u003C/p>\u003Cp>Speaker 0: There it is.\u003C/p>\u003Cp>Speaker 1: Allow. You've hit call. I am getting a phone call. I'm gonna mute my mic and just join the phone call.\u003C/p>\u003Cp>Speaker 0: Hi. Hey. Yeah. You sound you sound suitably terrible as phone calls do. I can't I can't I can't be bothered.\u003C/p>\u003Cp>I literally can't be bothered to figure out sharing your audio. So anyway, the thing is like, you should share my audio. I'm like, I can't be bothered.\u003C/p>\u003Cp>Speaker 1: I was speaking on the phone. He could hear me. Just because we're not sharing audio via the the stream, you couldn't hear me. So next time, I won't mute my microphone. But his browser is you couldn't hear me.\u003C/p>\u003Cp>So next time, I won't mute my microphone. But his browser called my phone. Hey. There was no indication we did it. There was no indication it ended.\u003C/p>\u003Cp>Speaker 0: So there there's some things we need to work out now. But we're basically sound. Yeah.\u003C/p>\u003Cp>Speaker 1: There were\u003C/p>\u003Cp>Speaker 0: some sound things.\u003C/p>\u003Cp>Speaker 1: So, Twilio just built in we have, like, these audio, like, files that, it's, like, plays a sound when you connect. It plays a sound when you disconnect. But, visually, there are no cues. Now you can customize the sounds that you that are in there to have something that's like your own UI. And, like, what we're gonna do next is you probably should change your UI so people can see visually what state the call is in.\u003C/p>\u003Cp>Speaker 0: We're at the point now where we are polishing this. When we get back to disconnecting the call, I think we'll, I'll slow back down. But for the next couple of minutes, I'm gonna speed run this because this is just little view UI, you know, things. So we do this. We currently have this div here that shows start call pick phone number.\u003C/p>\u003Cp>So I'm gonna create a new ref called show call. Sure. Let's make that false because at the beginning, there is no call being made. The call, I believe, also emits events. So call dot value is this, then we'll go ahead and say call dot value dot on.\u003C/p>\u003Cp>And there is a ring ring.\u003C/p>\u003Cp>Speaker 1: There is a Yeah. I don't know if it's answered or connected off the top of my head. I will check it out later, but it's subject to tell you it's connected, and then one to tell you when it's I think\u003C/p>\u003Cp>Speaker 0: we're gonna be lazy and we'll just do ringing and disconnected because disconnected is by either hand, I think. It's just disconnected. It has been dis\u003C/p>\u003Cp>Speaker 1: phone ended. Whether you hung up or they hung up. There is another way to, like, distinctly choose between which person hung up, but we're not gonna be using that today.\u003C/p>\u003Cp>Speaker 0: Exactly. So show call dot value becomes true. And then on disconnect disconnect, we'll set it back to force. Then in here, we'll say v if not show call, do all of this. And then, oh, just some just some chat right here.\u003C/p>\u003Cp>Loving it, but the, the potential tying this ability with other business use cases like customer engagement, etcetera. Yeah. I mean, I see, like, customer, like, outbound call centers as being, like, rich for this. We'll talk about that more more a little bit later because I have some thoughts on where this could go in the future with more time. Also, the possibility to include the verification style systems, generate a confirmation code in direct us.\u003C/p>\u003Cp>You read it out. You know, you could, send it via Twilio and then confirm it on the phone, stuff like that. Yep. Excellent. Yeah.\u003C/p>\u003Cp>I love the part with the browser called the phone. So did we. I'm I'm glad it worked.\u003C/p>\u003Cp>Speaker 1: The first time. Right?\u003C/p>\u003Cp>Speaker 0: The confidence in it. Yeah. We also need, I suppose, just a v l's here, and we'll just say call in let's do it more like this. Call ongoing. Not that.\u003C/p>\u003Cp>E. I just wanna No. I'm just\u003C/p>\u003Cp>Speaker 1: gonna say, what is that p else?\u003C/p>\u003Cp>Speaker 0: No bloody clue. The or the first result having a mad one. And then I think we'll just want another button. But this one is end call. And exactly.\u003C/p>\u003Cp>And we'll do an end call function, which I will just create, and we'll deal with it in a moment. Const end call. I don't doesn't hurt anyone.\u003C/p>\u003Cp>Speaker 1: Yeah. I was it doesn't have to be, but right\u003C/p>\u003Cp>Speaker 0: in there. Yeah. So that's the UI. It'll call ongoing end call, and then there's an end call button. It will do nothing.\u003C/p>\u003Cp>We we do actually need to tie this up to ever get back to that first state by pressing the button. And I think called dot value dot disconnect, the thing where we have a connect, there is also a disconnect. And then call, show call dot value force. I think it's handled by this, but, whatever. I wanted to work first time, so we're just gonna we're just gonna keep throwing some redundancy in there.\u003C/p>\u003Cp>I think that's groovy. So extensions reloaded. Let's try this out. Nathaniel, call. Nice.\u003C/p>\u003Cp>Speaker 1: Good. Call's coming in.\u003C/p>\u003Cp>Speaker 0: Answer.\u003C/p>\u003Cp>Speaker 1: Hello? Testing. Testing. 123.\u003C/p>\u003Cp>Speaker 0: Yeah. I hear you twice through through the browser and through our streaming system, and, it's it's twice too much, to be honest. Bye. Oh, forget that. It did hang up the call.\u003C/p>\u003Cp>Stereo. Yeah. It did it did hang up the call.\u003C/p>\u003Cp>Speaker 1: Dun dun dun. Oh.\u003C/p>\u003Cp>Speaker 0: I don't know what this is. I did hang up the call. Just before we get to this, because I think this might be in the voice SDK. But I'm not convinced. Get on proxy.\u003C/p>\u003Cp>Log is read only and not configurable. Now that means logs being used somewhere inside of the call dot disconnect. Alright. Call dot underscore disconnect. Alright.\u003C/p>\u003Cp>We are yeah. It's here. It's somewhere in here. We are going into the depths of the of the SDK, the voice SDK, and I have a better idea. I have a better idea on how to handle this.\u003C/p>\u003Cp>Call.value.onerror. Yeah. Damn. Does that even work?\u003C/p>\u003Cp>Speaker 1: I I do not know. I I I am desperately looking up the voice SDK to see\u003C/p>\u003Cp>Speaker 0: what Honestly honestly, my guy, it's always the end of our it's always the end of our Workday. Yeah. It's a frantically look up anything. I'm swallowing the error. It's not an error if you if you swallow it and don't display it to the user.\u003C/p>\u003Cp>Speaker 1: Is it. You're you're absolutely right. There is an error. But like, sorry. There is, on error event.\u003C/p>\u003Cp>Speaker 0: Oh, I just I'd made a guess there. But I'm gonna call you again.\u003C/p>\u003Cp>Speaker 1: Hello? How are you? Yeah.\u003C/p>\u003Cp>Speaker 0: Yeah. I I don't care how you are, mate. Sorry. Bye. Okay.\u003C/p>\u003Cp>Interesting. We'll pause on that. I'm gonna I'm gonna refresh and call you one more time, but I'm gonna hang up because we actually haven't tested that side. Sorry. You're gonna hang up the other way.\u003C/p>\u003Cp>I've I've been the one to hit end call.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Interesting. Look. Works great. Call ended. Question.\u003C/p>\u003Cp>It's when Question. I disconnect.\u003C/p>\u003Cp>Speaker 1: Yeah. Question. You, when you disconnect\u003C/p>\u003Cp>Speaker 0: works. Yeah. Here.\u003C/p>\u003Cp>Speaker 1: So you hit disconnect. Do wait. Hold on. I wanna quickly check something because, you've got on disc. That's on disconnect.\u003C/p>\u003Cp>Speaker 0: I'm\u003C/p>\u003Cp>Speaker 1: not sure, actually. Mhmm. I'm not sure.\u003C/p>\u003Cp>Speaker 0: Neither am I. Neither am I. Refresh, reboot, pick up your changes. The changes are happening. Look.\u003C/p>\u003Cp>When I hit save, just watch it. Extensions reloaded and other UI changes were displaying. So I'm confident that happy to call this a a success, to be honest. Now there are a few other things we need to do. We need to like, I I think we must tighten this up a little bit because it's currently a little too open for everyone.\u003C/p>\u003Cp>Because right now do here\u003C/p>\u003Cp>Speaker 1: is anyone who gets access to the URL gets a token that allows them to phone anybody they want and charge it to you.\u003C/p>\u003Cp>Speaker 0: Agree. So, Wreck, I want to say it's accountability. But just in case it isn't, I'm gonna just oh, do you wanna know the way I'm gonna do this is I'm gonna go, object dot keys rec, and we can work from there. Oh, was that too hasty? Oh, was that too hasty?\u003C/p>\u003Cp>Is it building? Oh, but it's still building. It's it's packaging the whole Twilio SDK t k and and that's why it's just taking a hot minute there. So we're gonna do this. What's the best way of doing this?\u003C/p>\u003Cp>I think what I'll do is in the panel is I'll just pause on I'll just comment all of this out briefly. I don't actually wanna be making calls. Interesting there. Refresh and call. And over in this terminal here, great.\u003C/p>\u003Cp>We have\u003C/p>\u003Cp>Speaker 1: You have accountability.\u003C/p>\u003Cp>Speaker 0: Paul, last in the chat, obviously, you can lock this down in direct, but imagine inside of Twilio, there are also options to lock down your numbers. Zip rank as well. No?\u003C/p>\u003Cp>Speaker 1: Lockdown ask that question. I am locked down the\u003C/p>\u003Cp>Speaker 0: Obviously, you can lock this down in direct test. We might need to expand on what that means. But I imagine inside of Twilio, there are options to lock down your numbers or SIP trunk as well.\u003C/p>\u003Cp>Speaker 1: You're not I mean, these on your numbers.\u003C/p>\u003Cp>Speaker 0: Tokens are the way to do that. Yeah.\u003C/p>\u003Cp>Speaker 1: These tokens are the lock. Yeah.\u003C/p>\u003Cp>Speaker 0: So you wanna make sure you're securing the generation of them. So And, accountability.\u003C/p>\u003Cp>Speaker 1: Yeah. The generation of these tokens is essentially how they are locked, and only someone who essentially only someone with your credentials can create tokens, which means only someone\u003C/p>\u003Cp>Speaker 0: with your\u003C/p>\u003Cp>Speaker 1: credentials can create calls.\u003C/p>\u003Cp>Speaker 0: I'm gonna pause you for clarification. Lockdown calling premium numbers or long distance\u003C/p>\u003Cp>Speaker 1: Yes. Oh, yes. Yes. Yes. Yes.\u003C/p>\u003Cp>Yes. Yes. Yes. You can. I can actually show you that in the browser.\u003C/p>\u003Cp>So, there is\u003C/p>\u003Cp>Speaker 0: I'll I'll pop over to you while I just do this. There you go. See yours now.\u003C/p>\u003Cp>Speaker 1: No worries. So you have voice geographic permissions. So eventually, let's go back to that. Voice geographic permissions. And give it a second.\u003C/p>\u003Cp>It will load. And so you've got, programmable voice and SIP trunking as well. And you've got, they give you also an average cost of how much things will be. You've got low risk and high risk. This is a connection.\u003C/p>\u003Cp>This is not just countries, so it's not entire countries that are, high risk. Although it is could be a large proportion of countries, large percentage of that country. It is specifically network carriers in specific countries that have been known to have a higher risk of toll fraud, so you can block them. So I can, like, block the whole of North America. I can block specific country and so on.\u003C/p>\u003Cp>And then you can also pop in a number to check if it's got permissions at all. So that's another way to just, like, lock it down, and this is great because it's on an account level. So your whole account can be locked down.\u003C/p>\u003Cp>Speaker 0: All good? Yep. I did I did the changes. So at the top of the at the top of the, route handler, we inside of this, request, this rack, you get this object called accountability. Accountability contains the following properties, user role, whether or not they're an admin, whether or not they have app access, IP, user agent origin, and permissions.\u003C/p>\u003Cp>So you can further go on and expand, but all we care about are you logged in, and authenticated with director? So so that's all this does. It says, hey. If you don't have a user ID, go away. Now I think about it, there might be more you wanna do here because you could just jam an accountability object and user.\u003C/p>\u003Cp>So there's probably some other levels of, of restriction you wanna do here. For example, checking the user's role, which then will return they don't have a role because they're not a valid user. But I think this is I think this is okay. And then here in identity being\u003C/p>\u003Cp>Speaker 1: Yeah. User, it can be\u003C/p>\u003Cp>Speaker 0: There you go. Rec.accountability.com. So which is the ID.\u003C/p>\u003Cp>Speaker 1: Name of the user that we're logged in on in the system?\u003C/p>\u003Cp>Speaker 0: No. It's the UUID.\u003C/p>\u003Cp>Speaker 1: Okay. UUID. Cool.\u003C/p>\u003Cp>Speaker 0: It's the UUID of the user. I mean, if if you want to see, we can just console log.\u003C/p>\u003Cp>Speaker 1: Well, because I was gonna show how it appears in the Twilio logs when a call\u003C/p>\u003Cp>Speaker 0: comes in. Sure. Give that a moment just to reload one more time.\u003C/p>\u003Cp>Speaker 1: No worries.\u003C/p>\u003Cp>Speaker 0: Again, it's bundling that whole Twilio, helper library, and so it just takes a moment to build. We'll hit that.\u003C/p>\u003Cp>Speaker 1: Did you uncomment the call?\u003C/p>\u003Cp>Speaker 0: No.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: That would help.\u003C/p>\u003Cp>Speaker 1: Alright. We'll make this a short call, just so we can show you the logs. So now only authenticated users on directors can use this application. They he's gonna call me. I'm gonna answer.\u003C/p>\u003Cp>I'm gonna keep it on for one second. He's gonna end the call. My phone call ends. And then I'm just gonna refresh my logs so that we can see the latest call that happened, which was 838. Yep.\u003C/p>\u003Cp>Which was this one, and we can see here it's complete from a client, and it was from client. And then this was the UUID that he talked about. I can actually show you a previous call. Remember, it used to be hard coded to user. If I get to this one over here, we can see it was client user before, and now it's client and then the user ID.\u003C/p>\u003Cp>Speaker 0: That's cool. Cool. So, I mean, I I think and I thought about it more actually. I may update the docs. I've just thought about it in this moment.\u003C/p>\u003Cp>The user actually this doesn't prove the user is authenticated. It proves that there is an accountability object with a user I with a user value that is not foresee. That's what I check against. But what you may wanna do is expand this further, use the permission, service, check they have certain permissions, which they either will or won't have. That is more direct us, that's locked into direct us and it will say no if they don't exist.\u003C/p>\u003Cp>So just a thought there. What we did was a lightweight, like, check, but it is not foolproof. And I've just realized in this moment that that's the case. But that's fine for this. We've acknowledged it.\u003C/p>\u003Cp>We've shared that with you, and we've given you an approach. Now I think that's it for, like, what what we can do here. But what what more could we do? Firstly, we need to handle that rejection state better when I hung up. Can't be bothered now, but just I'll figure it out.\u003C/p>\u003Cp>It'll find its way into the blog post that'll accompany this. One thing that's really cool if I just come back to my screen share is these are real users. Right? And they're queried using the composables that are exposed to view to to this view component here, this extension. But you have access to all the collections.\u003C/p>\u003Cp>So let's say, for example, you have, customer calls or customer notes, customer note whatever, a timeline of events and they exist in your project, you could select the user, get maybe their latest notes, then call them. So now you have context. Additionally, insights dashboards have this concept of a global relational variable. So what this will do is user, is you select directors users in here. Let's say we want first name and last name in here.\u003C/p>\u003Cp>Let's not have them overlapping each other. You could select a user from here, and this now contains the UUID of that user or the object of that user. I can't remember. But you could feed that into this panel. You could feed it into the panel that gets customer information.\u003C/p>\u003Cp>So you can have multiple parts of your dashboard all changing because you selected the user once in this global relational value panel. So there's so much. There's so so much you can do with this. And I'm just really excited for more people to see directors insights as a really valid app builder surface more than just, more than just insights and BI. So with a couple more minutes to to go, if anyone in the question if anyone in the chat has questions, please do chatter and let us know.\u003C/p>\u003Cp>Nathaniel, do you have any any closing thoughts while we wait for those?\u003C/p>\u003Cp>Speaker 1: No questions. But the thing is, like, I'm I'm just really trying to think about, like, other ways. So, like, there's obviously, like, building your own mini contact center. Because contact centers are, like, big and expensive, and actually sometimes what you need is you don't want this massive, like, stood up, like, huge at scale contact center. You just wanna be able to have the ability to contact your users just really, really quickly, and you could start building applications like this.\u003C/p>\u003Cp>You can then also because you're using Twilio phone calls, you can access, like, a lot of other Twilio functionality. Like, we've got, like, voice intelligence transcriptions where, like, you have a phone call. At the end, you can run AI operators on it to just be like, yo, this person, like, that person asked for a manager, or the person was happy. We can just do sentiment analysis and have that, like, just pop straight into a panel, for example, because you are just connected to that ecosystem as well. So, there is way more than just the calling calls that you can do, but, it all starts with, like, ring ring.\u003C/p>\u003Cp>Speaker 0: Yeah. And I suppose you can query APIs for that data as well from within here as we have just demonstrated, and we spoke about with the weather API, you know, example way up top. You can just call off to arbitrary third party APIs and bring data in via custom endpoints. So, yeah, there were no other questions in the chat, so I'm pretty confident at this point saying I think we are off the clock, which is wonderful. This has been awesome.\u003C/p>\u003Cp>Thank you so much for joining me for this. I had a really fun time putting this together and delivering it with you.\u003C/p>\u003Cp>Speaker 1: Always enjoy hanging out with you. Next time we go on a roller coaster journey, we should write another workshop as well.\u003C/p>\u003Cp>Speaker 0: Which is what happened. Sorry. That that that's what happened. We we wrote this basically while while going to a theme park together.\u003C/p>\u003Cp>Speaker 1: We had a\u003C/p>\u003Cp>Speaker 0: long, long, long, long drive, but something could came of it. Weekends. Well, on that note, thank you so much everyone for joining in. We will see you somewhere. Bye.\u003C/p>\u003Cp>Speaker 1: Bye.\u003C/p>\u003Cp>Speaker 0: Oh, wait. Where can people find you? Oh, online at Twilio dot.\u003C/p>\u003Cp>Speaker 1: They can email me at nocennwall@twillio.com. I do check my emails, and, yeah, just say hi.\u003C/p>\u003Cp>Speaker 0: I was ready for you to that. I don't check my emails, but you can send me an email there. Alright. Alright. We're done.\u003C/p>\u003Cp>We're done. We're done. Bye.\u003C/p>","Hello. Hello. Hello. How are you doing? It's good to hear you. Great, mate. How are you? Yeah. Yeah. Yeah. I'm doing alright. I'm doing alright. It's wild to me that we have been friends in the same kind of job family for so long, and I don't think we've ever run a workshop before. I know. I mean, I think it's been a missed opportunity for all the wonderful people that we haven't been able to hang out. Genuinely, it's crazy because we went to university together, for those of you who maybe have never heard. So we we've been known each other for a long time, and our careers have, like, had these moments where they almost look like they're about to cross. And I feel like this might be one of the first professional cross that you've said. I'm really excited. Yeah. So I thought we'd open by just introducing ourselves, introducing what this event is, introducing what we're gonna be doing, how it's gonna work, and then we'll launch straight in and make a mess of it for 2 hours. How's that sound? Sounds good. Sounds good. I could start by introducing you first. Hi, folks. My name is Nathaniel Okenwa. As Kevin would know, I talk a lot and I write code, so my friends call me Chatterbox Coder. That's where you can find me on all the socials. And I work for a company called Twilio, which is kind of what we're gonna be talking about today. Twilio, for those of you who don't know, it does many things. But here's the TLDR. We are telecommunications APIs that help you build amazing communications into your applications. That sounds like a lot of words, but you've definitely used Twilio at some point. If you've ever received a text message from a company, a phone call from a company, maybe a 2 f a text, lots of emails when it comes to Black Friday, even all sorts of communications, chances are they may have been using Twilio under the hood. Some of your biggest some of your biggest brands and favorite brands that people use use Twilio under the hood. But we also do so much more. So if you do wanna find out about some of the more advanced use cases, feel free to chat to me because the well of Twilio can be quite bottomless. Yeah. Definitely one way to put it. Yeah. If you're here, you might be coming from the Twilio world and not have heard of Directus before. So I will also tell you a bit about Directus. Directus is a back end, basically, that you can use to build wicked applications. You connect it to a new or existing database, and any number of asset storage, storages, storage. I don't know if it's like the word sheep where it's the same in plural. And you immediately get developer tooling, including APIs, a real time interface, authentication, and a user management system, and this really lovely web application with which to interact with that database, which you can easily hand to people who aren't developers. So you don't need to build APIs. You don't need to build kind of these admin panels, admin panel back ends. Really cool tool. And today, we get to converge the 2, which I'm really, really excited about. So, the project today, is oh, actually, no. A little bit more preamble. This event's happening as part of Leap Week 3. This is our week of announcements. On Monday, we did a keynote where we announced directors 11, which is coming out this week as a release candidate where we announce new shows for Directus TV. This platform you're watching this video in right now has, now 35 shows worth of content. You can go and potter around and find some cool awesome content too. Like, I must say. Thank thank you very much. Thank you very much for that. I'll pay you under the table later. And also, and and a bunch of other things, as well. This is one of the workshops that's happening this week. So this one right here is this Twilio workshop. After this, like, an hour and a quarter after the end of this event is a 100 apps and a 100 hours live where Bryant and some of our colleagues are gonna build an app in 60 minutes. I don't know what he's gonna build yet. I'm not sure he knows yet, so that would be chaotic as it was last time. Tomorrow, we're doing a workshop with Deepgram, which is a voice AI company. We'll be building a cool project there and then a community social networking thing on Friday using a platform that doesn't suck. So you can come and have a chat with other people who use or know about or are interested in direct us, in hopefully not too of a not too much of an awkward format. But we're getting ahead of ourselves. We're here for this Twilio workshop. And what we will be building today is a panel extension for Directus Insights. So Directus Insights is this dashboard builder tool here that we have. And in a dashboard, you have any number of these panels which can interact with the data in your database. But these panels aren't just for reading data. You can actually put components within them that are interactive. So you can add, like, forms and buttons, and you could just run arbitrary codes in them if you want. And that's what we're gonna be exploiting today. So so what I'm hearing, right, because I have not played around with insights. So, insights essentially just give you the ability to create components that are powered by your data. So there are some obviously components that are really well built. And then see, it doesn't actually have to just be reading data because the thing that I'm really excited about is the fact that the stuff we're gonna do is going to read data, but it's gonna interact with it and cause other things to happen as a result. And that's something which I think is very, very powerful, especially when it starts to come to, like, workflows and building maybe internal tooling as well for people. This could be really, really useful. Ding ding ding. So we've not spoken much about director, so you're kind of coming in it pretty fresh as well. So please keep asking questions. But, yes, you said a word you said a pair of words there, which I think is really interesting, which is internal apps. And I actually think that is the kind of hidden power of director's insights. While all of the panels that ship out the box are very much about, you know, building graphs and charts and reading data and analyzing it, it doesn't it's not just that. And you can also obviously build panel extensions and distribute them through the marketplace. So today, we're gonna build a panel that will have a drop down and show you the users inside of your directors project. With other phone number to those users. And so there'll be a phone number attached to them. And then you press a button, and we'll use the please forgive me if I get it around the Twilio voice SDK. Yes. That's what it's called. Fantastic. And we're gonna call that that person from the browser. So the browser will connect to a phone number and do a two way call. Cool. Yeah. I'm really excited because this is something which I think lots of people oftentimes, we end up having tools in different places. Right? So for example, especially when it comes to telecommunications and telephony, we always, like, separate so that you have your email account or a phone, like, application or something separate to all the places you have you there. And, yeah, I'm not saying that's not bad, but when you start wanting to be efficient or maybe have some more smart and cool interactions, bringing those 2 together gets really powerful. And I think, like, what I could see here is and we could talk about, like, potential use cases. Look, we're not trying to sell you a thing, but kind of just tell you practically some of the ways in which you could be using these these tools. And I think like this, it could be a good way for you to, for example, like, quickly integrate communications without necessarily exposing PII. Like, you can have it so the person using the tool never sees a phone number, but it's still able to phone them. Some of your favorite ride sharing apps kind of use a similar technology behind this if your driver ever calls you to be like, hey. I can't find you. But, anyway, I'll stop talking. Let's get on. No. No. Please do. A couple of other just bits of context. So we'll refer to the director's docs, obviously, a bunch today. We'll take a look at some of the extension docs. As part of that, inside of these docs, there are a couple of existing Twilio guides, and one additional non Twilio guide. And I wanna talk about them before we crack on so you kinda understand the approach which we are going to use today. First of all, there is this guide here called use custom endpoints to create an authenticated API proxy, which is a lot of words. So to break that down, endpoints, custom endpoints are one of Directus' extension types, and they allow you to just create kind of what they sound like, arbitrary endpoints that you can hit. We expose an express router, so it works just like that. You know, you set up route handlers, and then you can hit them. But within the context of directives, you can start doing stuff like checking if the user's authenticated in in directors or checking their permission sets and so on and so forth. So, this is an example where we actually just expose the full Twilio. Let's find it here, where we expose let me find it. Twilio host. Yeah. Where we expose just a root URL. And then what we do is we check whether you are authenticated, and that's the key part right here, which means you can't just hit this endpoint from anywhere. Right? You have to do it either with your Directus API token or cookie, or from within Directus itself. So that's 1. The second one is, another Twilio integration. This uses our automation tool, direct us automate. They're called flows each kind of, workflow. And this one here allows you to send SMS notifications. That's kinda interesting. Can you do for these? Because, like, obviously, with SMS, when I'm talking to devs who are building into their applications, they're often either a couple of things. Either they wanna send a message, like, when a specific action occurs, Maybe they wanna do a batch, like, at the end of the day, or there's, like, a campaign going out, especially when you start getting to marketing and stuff like that. And the same with email, because we we also you we have email API. So, like, how can people trigger those, those things? Those flows? Great question. Five ways. Event hooks. So something happens in your director's project often in one of your collections. A collection is a database with, like, additional director's metadata. So when I say collection, you can think table, but it is not just a table. Right? When something happens in your database, you can immediately fire off an automation. So this can be, a new user is created, a new file is uploaded, or any of your tables are have CRUD operations, executed against them. They can run as blocking, as blocking triggers, which means the whole flow has to see its way to the end, and then the database transaction will execute so it, like, intercepts it. And as a result, you can actually fail out. So, we have an example somewhere in our docs using a verification API where if you fail the verification, we actually just block you from signing up entirely. It just never commits to the database. Ah, it's really cool. On the other hand, it is cool because you can also manipulate data in the middle. Maybe you enrich it or stuff like that. That's really, really good. And then and then you have actions, which happen after data after data has, been committed or after a transaction has been committed, then it will run. So that's event hooks. Then we have webhooks, so just inbound HTTP requests. We have a schedule. So, you know, you set up an interval using, the 6 point Chrome job syntax, and then we'll run the flow, which is how you could then batch. Or, 2 more, actually. There's another flow so you can compartmentalize functionality and trigger other flows from core from, like, your your controller flow and path data in and backup and stuff like that. And then finally, manual. What manual does is in your data views where you've got your list of all your items in a collection or you're in an individual item in the editor, there's a button you can press that will pass in the IDs of the items that you've either checked in the collection or the page, and then send that into the payload as well. Manual flow triggers also have confirmation dialogues so you can pop up and collect a bit more information. There's the button there on the right. And it can you can pop up a box, ask for arbitrary information, and then trigger the flow, which I think is a very, like I'm looking at thinking comms. So they're very communication type, you know, option here. You can go write a message and hit go, for example. So they're the triggers. There's a bunch of inbuilt operations. You can also build custom operations, but that allows you to interact with your database, make it external web requests, just write some arbitrary JavaScript that can manipulate data in a little more of a clever way. But, yeah, that's something that's really cool about this tool. Yeah. No. Absolutely. And a thing which I'm kind of noticing is the number of different ways, the flexibility that is given to you to try and to to trigger these things in different ways allows you to put into put the automation into different parts of your application. Especially, I'm loving the filtering functionality. I can already spin off a couple of great reasons why I'd use that, especially when you start to want to reduce computational load or con reduce things from blocking our operations from going through or vice versa when you actually do wanna filter stuff. So that's useful. I'm gonna play around with that next time I get a chance. So I wanna jump in. There's one more guide I wanna show because this guide is going to basically be the north star for our approach today. And it is this, which is filled a little bit with, but it's using external weather data in a custom panel extension. And you might be thinking, I don't really see the translation between this and using Twilio in a in a panel. So before I explain this, maybe we take a moment to explain the Twilio flow in this all the way from generating I think it'll come in quite nicely. I can share your screen or you can just talk. It's completely up to you. Well, I can start by just talking, and then we can, we can get even further. So we start with the Twibio voice API. So Twilio has a bunch of different APIs, and we have one that we focus on programmable voice. I would say one is more of a collection because even with invoice, there's a lot of diversity depending on what you want to do. In today's example, kind of like the external weather data, we essentially want to have a component in a web page, and this component is going to be able to make phone calls out to the users that are stored in the director's database. Right. So in the director's back end. So what we're going to need to do is we need a couple of things. We need a browser component or a component that that lives in a browser. So essentially a browser that is able to make phone calls. Your browser, unfortunately, doesn't have telephone APIs. So what your browser does is your browser can connect to Twilio, Twilio's servers. And then Twilio then handles and creates telephony using, old school telephony that, like, is really, really boring slash complex or really, really interesting depending on how what floats your boat, and then does all of that in the background. But what we do is we expose an SDK which you can build into your front end applications. Now there is one extra step which maybe adds a little bit of complexity, but I just wanna talk a little bit about that, which is just a bit of authentication and security. Because we can't just hand anyone the keys to make phone calls from any number in the world. Right? So when you have a Twilio account, you have a phone number. And when your browser connects it with you, what happens first is there is an exchange of a token. A token must be generated, it's what we call it, and that browser must use that token to connect to Twilio. Now that token gives it a couple things. It first says, hi. I am Nathaniel's computer, and I am connecting to Twilio, and I'd like to be able to make phone calls. Also, it says I have a phone number identity. So, like, a a a phone number which is attached to my identity. So when I do make phone calls, I will come across as if this phone call is coming from said telephone number. And these things happen in the background. Now the SDK has a lot of, flexibility, so we start off with a few basic methods which you can use. And then if you want to build a really, really custom interface, there are a lot more events that we expose. But if you are looking to get started with Twilio voice, I would recommend heading over to the Twilio docs, which I am showing on my screen. They often take you through starting your first phone call. This is from a server. You may want to start your first phone phone call, but then we also have these client side SDKs, and we talk you through how you use the client side SDKs. We have JavaScript SDKs, iOS, Android, and we even have React Native SDKs. I often like to say it's good to start with a quick start and then work backwards because the quick start builds all of that functionality, and then you can customize it as you are. But then there is also the reference if you want to dig deeper into all of the methods and the things going on behind the scenes. Is there anything I've left out? I know we've talked about this before. No. You did mention the thing I hoped you would mention so I can explain why whether the data and Twilio are related. Before I jump in, actually, a reminder for those watching you, we've got a chat here. I'm watching it. So by all means, ask questions, thoughts, concerns, grievances even, and we'll address them as we go. But let's come back to my screen. Right. Why does this matter? Extensions and directives live in 1 of 2 places. They live in the data studio, which is the web app, or they live in the data engine, which is the back end, an API side extension, you could call it. The browser, because of just security in the browser and the way that we lock that down, can't always confidently make an external web request. Right? It can't go off to Twilio and say, hey. Go generate an access token for me. In this example, the same way it can't go out to a third party weather API confident. Like, if you control both sides and you configure security on both sides, you'll be fine. But you often don't control the the vendor. You don't control Twilio. You don't control the weather API. So what do you do with this? Well, you use an endpoint. Use an endpoint, which is an API extension. Use an endpoint first to actually make those external requests do what it needs. And then your front end extension, the panel in this case, and in this case in the tutorial, then calls an internal endpoint because it's that's what it is now. It's now gonna be like / Twilio token, that my direct test project slash Twilio token. So it's now an internal API. So you you act you treat it like a proxy, which is actually not dissimilar to the post I showed earlier. To bring these together and make sure that you always have both of them and you don't have to deploy them separately, we have a concept called a bundle. So first, you create a bundle extension, which is an empty shell, then you add an endpoint, then you add a panel. And when you install the bundle, you get both of them together as as is required. So that's gonna be more hearing you right. Just as a mental model, what I'm hearing is in this bundle, if this bundle was the application that fits into the panel, you kind of have the component, which would be the front end logic and then the endpoints can stand in for like a like almost like your server side. I know this is a weird way to think about it, but ways that you can query data and interact with other applications. Yes and no. The only thing I'll change there is query data because panel extensions, for example, can query collections in directors without needing a a server. So if it's within the bounds of your director's project and the services we provide, in fact, we have them the the the they are called Query external data. So data from outside. Correct. Correct. Because, for example, your front end, your front end, your app extensions is what we call them, your front end extensions, have immediate access to all of your data because you're in this authenticated, you know, box that you can work within. But, yes, exactly. So the reason I wanted to show this, this weather panel thing is because that's basically we're gonna follow a very similar approach here. We're gonna create a bundle. We're gonna create an endpoint which will allow us to generate a token, then our panel will call that to grab the token and then use the voice SDK. So we've been talking for probably about minutes. I'd love to jump in. Now the format this is going to take, we decided just because, of of of the fiddliness of certain parts of this, is we are gonna go nice and slow, but we're going to present. We're gonna build it together. You can watch, and this will be available on demand if you wanna play along. This will also get turned into a blog post, and hello there is on demand. This will also turn into a blog post you can follow if reading, you know, technical material is more your thing. So if you're watching along, I I would say don't try and play along necessarily, but, take advantage of the fact that chat is here because we're we're here to chat to you. Right? Otherwise, this would just be a just a video, but this is a live event for a reason. Before we jump in, is there anything else you wanna add? Nothing comes to mind. Let's get going. Let's get going. So Why do we do this? Why don't we have you start with, like, spinning up the environment that we're gonna use, getting us set up, so that we are ready to start building the telephony parts? And then I can step in. I can focus on building the Twilio parts. I know we we've we will switch between them and then connect those together and then do a little test test as well. That sounds good. I actually had a little bit of a rundown that structure which looks slightly different. We'll follow yours, but as a result, this might end up being slightly more chaotic, but I like that. I like that format a little bit more. The the gremlins have just returned home, so we'll see what happens in the next 10 minutes. So can you hear that? You can hear that. Yeah. Oh, now I can. Now I can. Yeah. Now you can. So here I have a brand new completely empty code editor. I am running Docker in the background. We'll be running Directus locally with Docker. So if you're following along, you'll need that. And what we're gonna do first is we're going to create a Docker composed dot yml file. And I do just happen to have here sequel I do happen to just have a light one prewritten because, you don't wanna watch me write this. Go grab the latest version of Directus, expose it on port 8255, map some of the internal volumes that Directus uses to local directories. So when we first run this, it will create a database in the uploads and an extensions folder. We need a key and secret, which you should replace with a random value. To me, replace with random value is random enough for today. The initial admin email and password, you, of course, can change that. We're gonna just use SQLite because I don't wanna we don't need anything more heavy, and so that will, pop the file inside of this, data file here. WebSockets are enabled, not that I think we're gonna use them today, but this exposes a WebSocket and GraphQL subscription interface for, subscribing to updates and directors. This one line here is not in our quick start, in our docs, and it just will improve the developer experience of building extensions. Whenever we'll we'll set it running so whenever we save it or rebuild, whenever it rebuilds, it'll auto upload indirectus. So with that done, we're gonna go ahead and, just taking a quick look at the chat. Hello, Scott. And that's c collection that logs different tokens for Twilio or other APIs to variable use token by another key. Then you have Internet access to modify tokens for phone numbers or something. This is to always be missing in Directus, and you end up doing this outside Directus UI. Hold that thought for some short period of time. We know. But today, we'll use environment variables here in, here in the Docker Compose file. But we're not gonna that's not gonna be the approach today in the interest of time. And then Docker Compose. That should just do its thing, he says. There we go. Running all the first time setup, and then it will run-in just a moment at 8055. So local host 8055. There is our brand new director's project. That was so quick. Yeah. I like it. It's pretty sick. And this is also full fact directives. It's not like a shitter version you run locally. Like, it is the full thing when you use direct to this is what you use. The only caveat here is SQLite does not have geospatial plugins included out the box. So if you wanna use the mapping features, you'll wanna use just a different docker compose that's in our docs. It just takes a little longer to bootstrap, and it it wasn't necessary for today. Okay. Right now, there are a bunch of directors collections that have been created out of the box. Directors doesn't alter your tables in order to run. Instead, we create this, I don't know, like 20 ish tables or prepended with directus underscore. That means if you wanna get rid of directus later, you can just delete the application, delete these tables, and it's like we were never there. But this handles all of the UI, all of the settings, all of the configuration, they all live here. And the one thing I'm gonna do, because you can extend these, you can't delete these default fields, but you can add new ones, is for all the things that are here, avatar, email, title, you know, all of these, there isn't one for a phone number. So I am just gonna add phone because I think that's gonna be useful for later. Cool. I, out the box, have this one user here, but I think it would be So quick question. So when you added phone, you you did it as a string. Just question, can you do any sort of validation from there Yes. I can. On that input? Yeah. You can indeed. Over here, edit field validation. Nice. Nice. And and, validation, what kind of validation do you use? What other rules? What is it? Fantastic question. I think you could just throw in a red x to be honest. Yeah. It matches. You could But also, it's got a bunch of other ones. Nice. Nice. That's awesome. Yeah. Yeah. Yeah. It's pretty cool. And you can do, like, you know, logic you can do grouping. Right? And or and stuff like that. We'll leave it, and we'll just trust ourselves to put phone numbers in. Right? Don't do that in the real. I worked at another telephony company that offers APIs in the past. And you and I both know, no one ever knows how to put a phone number in correctly, and it's a huge pain in the ass. So enforced validation. Because many different places, actually regionally write their phone numbers differently. So the way Americans write their phone numbers, especially compared to Europeans, Even, like, recently, we I had been I had built an app. Go to Singapore. We're testing the app. The app fails because Singaporean numbers just have way more digits than we had accounted for. So Interesting. Make sure you, like, keep it flexible for all of these inputs. But anyway, phone number. Give me a phone number in whatever the format Twilio likes me. I think Twilio likes the plus. Plus? 44 Yeah. 79 4757 41 48. Why are you giving those me two numbers at a time? Anyway, whatever. Here. There. When we put you in. This is a user now in our in our project. If I provided the email and the password, you would be able to, you know, log and enable the app access. You would be able to log in here. The password is hashed when you save it, but you could you could just pop it in here. Yeah. Yeah. See? I see it in the chat here. I have to add a one in brackets. Like, it's a huge, honestly. Also, Americans in particular, that one in the brackets and sometimes you put other numbers in the brackets too, Okay. Absolutely. Archaic. Anyway, it's it's like as it's as bad as them in the month before the day and the date. Let's I'm not even gonna Alright. But but seriously, though, there is international standards, ISO. I forget what the rest of the number is. But there is a Is it the E164 thing? I believe so. Yes. There is an international standard for phone yes. E164. That's what I meant. So you know, ISO. I don't know why it where that came from in my brain. But, you should check this out. And what you can often do, there are lots of tools and one that is actually sponsored by Twilio where they can, change inputs to fit the format. So it's always international plus international code, then the digits with no spaces. But, interestingly, other telephony API company matches e164, but doesn't put the plus in front of it. So I don't think it's technically to standard, but it whatever. Twilio needs the plus. We've put the plus in. We we Twilio doesn't need the plus. Like, we will pop in the plus. Interesting. But but the the important thing that I recommend to people is that you use that format when you have it stored because it's then uniform across every single phone. Absolutely. I wanna do is have some people have that ability and some people don't. So whichever way you do it, store it in your in your, in your, database, in your back end as, like, just one format, and then have logic that separates it out and changes what it looks like to users based on regionalization. And not to derail us too much here, but Twilio does provide an API which formats phone numbers. I think it's called insights or lookup or something. Lookup. Lookup like the basic tier or whatever whatever will format the number for you. So you could introduce that as either a filter or an action event based trigger, and you could format it before it ever gets to your database. You rely on, you know, the user to get it somewhat correct in order to know. You know, you drop the country code entirely, for example, and sometimes it won't know which country is it. But in any case, let's I feel I I feel our time slipping away. So we have set up a project. We've added this extra field. I've added you in here. You'll be the guinea pig who gets the calls. Next in my little list was to add a bunch of environment variables and handle, like, all all of the Twilio configurations so we can focus on extension building. But I'm happy to get to the point where we first need a Twilio environment variable to do that. So it's up to you. What do you wanna do? Why don't we jump to my screen and start from a console? I'm gonna start from the dashboard. Like, 5 times zoomed in, like 5 zoom in points, please. Would you like me to zoom in more? No. That's good. That's solid. Cool. Alright. Let's go. So the first thing we're gonna need, because we're going to be making phone calls, is we're going to need a phone number. So we're gonna come over here in our dashboard, and we're gonna find our active numbers. Now I actually went in and bought a phone number earlier. And the reason I did this is because, phone regulations and some some of you maybe who've used Twilio in the past, Buying a phone number can be very, very quick and simple. Simple. However, there are regulations that come in from countries and, governments, and it's really good because it helps protect us from spam, which means you often have to upload, either ID or maybe address an address and other sorts of identification to say that you are who you say you are. So I had to do this. It literally I uploaded my ID with this number, and within half an hour, it was done. I will say I have a at Twilio dot com email so that expedites it. But within 48 hours, we usually get 1 get it done for you. And you can still start building and sending messages to the phone number that was verified with your account. So, for example, when you create an account, you have a 2 f a number. You can start messaging that 2 f a number, but then start messaging all users. You then need to have that extra level of, ID and regulation com regulatory compliance. Okay. So I'm based in the UK, so I think we should just use a UK phone number. Works for me. So now I have a UK phone number, and this is the first, I number that I'm going to hold on and keep into a safe place. So now I own a phone number. Now the thing is, for us Could I put could I pause you? Yes. Throw me the phone number in our little joint chat. I wanna show what I'm gonna do with it because then I don't need to show again what we're gonna do with them. If it's okay with you, give me 2 ticks. Steal it stealing it for a moment. Over here in our Docker Compose file, we have these environment variables. I've added one called Twilio phone number. I'm gonna take the one that Nathaniel just sent me here, and there will be a series of other environment variables as Nathaniel goes around the UI. I will populate here. A couple of them are sensitive, so I'm no longer going to show the Docker Compose. I'll show you what happens once we get them all populated, though. Okay. As you were. Awesome. So now that we have this, the next thing we're going to need, and it's just underneath, is a TwiML application. TwiML applications are pretty important. They are ways that we can essentially give Twilio some instructions of what what to do when phone calls happen. We are going to create a new to more application, and we're going to call this directus for the workshop. Now, what you often have is you have a voice configuration. This says what we should what should happen when a phone call is made to or from this TwiML app. I'm going to come and fill this in in just one moment. But I'm gonna hit create. So now we've got this director's Twilio workshop. We're gonna open it into a new tab. And I'm gonna come back and grab this SID. I need this SID for what we're going to do next. So this is the identifier of the application that we are going to be using. Alright. One more thing that I need to do. I'm going to be using something called functions to do this. You don't actually need to use functions. You can write your own endpoints, and your own ways to do this. I am going to be using this. I'm going to create a new function. I'm going to be creating a function that uses the Twilio client quick start, and I'm going to give it that AppSid that we just collected. I'm also going to give it a phone number. This phone number is the caller ID, and we're going to create it. Drum roll. It's creating in the background Any second now. I know why it's failed to create Twilio cogs go brr. The reason why it's failed to create it is because, I did a practice run a couple of minutes ago, and they have the same name. So I'm just gonna quickly delete these and then start again. Create this, pop in a caller ID and pop in that ID. This time, touching wood, it should be fine. Uh-oh. Okay. I might have to come back to this. Come on, folks. I can always do this step as well. Like, it's all good. We'll we'll we'll we'll get this. We'll get there. You got this. Okay. Once I've done this one, we'll we'll come back to it in a second. I think it's just, like, deploying, and it's actually going and taking down some assets, and it maybe needs to be taken a second. But we'll come back to this. Okay. So we'll come back and do this later, but for now, we have our Twilio phone number. We have our Twilio API appsid, and then we also need one more thing. We need to get API keys and tokens. That's How many do you account set? Yep. As well. Yeah. Keys and tokens and then accounts set. And this is essentially how we can, authenticate with Twilio. Give me a second. Just need to log in because I've been in for a certain amount of time, and I'm trying to access a secure page. And I need to turn over and hit my So when all is said and done, to generate tokens using a an endpoint in Directus and to do all the stuff we're gonna need to do later, there are 5 pieces of information we will need from Twilio. We need a phone number. We need the the TwiML app SID, which you've generated. I'll grab it off you in a moment. We need your account SID. We need an API key and secret, which we're in the middle of generating now. We'll use the API key and secret to generate tokens, and we'll do that in the endpoint. Cool. Sending over the SIT and sending Lovely. This is the TwiML the TwiML app, Sid? These are the API keys and secrets, and the TwiML Sid begins with an a p. So if it starts with a s k, it is always a secret key. If it starts with a a p Man, you just sent you just sent me 3 long ass numbers, and I don't know it right. The first one is what? So if it starts with a s k, it's a secret key. Yeah. Right. Right. Right. Got you. Got you. Got you. Got you. Got you. The one under the secret key is the, secret no. So the see. Yeah. Secret keys partner. Secretly keys is the secret secret? Yeah. And then the last one that starts with an a p is the TwiML app. Yeah. Right. I'm gonna just say this again. The one that begins AP is the TwiML app. Yes. The one that begins 2 k is the API key. Is the API secret? Alright. And the one that begins SK is the API key. Right. Okay. Cool. That's why we double check. Can you imagine if we got much further and we're just like, why the hell isn't this working? It's because I missed copy and pasted a key. Freaking hell, man. I need your account, Sid, too. And then we've got everything we need from the Twilio side. Account Sid asked. Thank you. And we need to just make sure that function can be deployed correctly. And that's linked to the TwiML app. Let me quickly go there and try this. I'm just gonna quickly hit this function. Alright. Good. We're literally still in Sab. I'm having a blast doing this with you, by the way. We should do it more often. Yeah. Alright. Now it works perfectly fine because there's nothing else that has the same name. And let me just quickly tell people what this generates. This generates a function with just a little bit of code that just says, that if this number is coming from from a Twilio client, it should be forwarded to, whoever it's going to be calling and vice versa. Right? I'm gonna copy this It's kinda nice that you could just create it from the boilerplate and forget about it to a degree. I still need your account, Sid. And then we are all groovy. I mean, get onto building building. Yeah. You didn't send it to me. Sid. It's the last one. A c. Oh, cool. Alright. And, last thing, I'm just gonna quickly grab this URL, head over to the TwilioTwiML app that we created, and pop it into our voice. And voila, we are done. Lovely. And what I will just say is the recording will be available on demand. We'll also write this up. So there was there was quite a few little moving parts there. They do all have purposes that, you know, that are important in this whole journey. We'll detail them there as well so you can in case it hasn't quite sunk in, it can. Of course, we also have the chat if you wanna ask further questions now. But with that, can I take my screen share back? Yep. Wonderful. So I popped all of those environment variables inside of the Docker Compose file, and now I just need to restart. So control c, up and enter. And now we restart the Docker container, and it will grab those variables and put them in the environment. So there we are. So now we're ready to actually go ahead and just start creating an extension. So what we're gonna do is I'm gonna create a new a new, terminal over here. And, in here, this is this folder, I wanna get into this extensions directory. So cd extensions, and I'm gonna run the npx create directors extension. Extension. Extension. That doesn't look right. Extension. Extend. That's correct. Latest. Just to make sure I definitely have the latest version of that. So, I get to pick the extension type that it will boilerplate, and I actually want a bundle, which I just happen to know is the last in the list. So I went up. I can call it whatever I want. I suppose I'll call it, Twilio, Twilio. I'll call it Twilio, whatever. Auto install dependencies. So just a reminder that a bundle isn't really an extension type in its own right. It's a shell where we can put multiple extensions. Now when these were originally created, they were actually to share dependencies and, you know, and reduce the overall code bundle size, But they also have this purpose of making sure you can distribute multiple extensions together. So it's just going and scaffolding that now. And the moment that's done, we're gonna go ahead and add an extension straight into that bundle. So we'll just give that a moment there. There we go. We will go into this new Twilio directory, and we will go, we'll run npm run ads. I believe that's what it is. And once again, we get to pick an extension type. Now if you remember, we're gonna build 2. We're gonna build a panel and an end point. The end point will generate the token. So we're gonna do that first, make sure we can generate a token, then we'll move on to building the panel. So we want an endpoint, and we'll call this one Twilio token. We'll just do it in JavaScript, and it will go and add that to this, package here. And for those of you who are wondering why are we doing all of these Twilio tokens? Twilio tokens are because you're not going to actually put any of the credentials that we've just generated. Most of them none of them actually going to live in the browser, because that's unsafe. Someone could find them in the client. But what they do is they can be used by this function to create a temporary token, which then gets sent to the browser. Does that make sense? So we use these tokens from Twilio or no. These credentials from Twilio, I'm gonna call them, to create tokens. That token, it uses the account SID API key and secret to say, I am a Twilio approved. But I am the person on this account making this request. It then uses the TwiML appsid to say, this is the application I want to interact with and I would like to have permission to interact with. And then that generates a token which gets sent over to the browser, which can now interact with Twilio. Hang on a minute. Oh, did it did it update in the thing here? It does say extensions reloaded. That was just while it was in the middle of boiler plating. Just to I'm I'm just gonna restart the Docker container. I don't think I actually need to do this. I just saw an error and I was like, let's work that out. So I think it should just be Twilio token. Twilio token. Oh, there it is. There we go. So that so that is this wrap. Ahoy ahoy world. Ahoy, world. And for those of you who are wondering why we said ahoy, world. Ahoy was the greeting that was created for it wasn't created for, but it was used for phones when phones were first created. I thought it was the first word ever said down a phone line. Yeah. Yeah. Okay. Oh, holy world. Great. So we check that that works. Now in here, we wanna go ahead and actually, and generate a token. So I'll just create a new endpoint, a new route handler here. So router dot post, and we'll call this one generate rec res. Again, if you've ever done any kind of Node. Js web development, this will feel very, very familiar to you with good reason. This is just the express route to the I was talking about it feels like express. Yep. Yep. Yep. That's exactly what this is. Now inside of here, we wanna go ahead and use the Twilio, helper library SDK. What do you call it? From here, it will be the Twilio helper library. The Twilio helper library. And so we actually wanna go ahead and install that. So npm install Twilio. Fantastic. Now we wanna go ahead and, and use it. So there is a page in the docs. Let's find it. I actually saved the link earlier. It's just shortcut here. This is the access tokens page, and it shows us how we can generate access tokens. The access token oh my god. What are these kids doing? I swear they're getting, like, pamphlets, and they're just going, can you hear them? I can a little bit. Yeah. Like this. Anyway, so an access token is just this really, really long string here, that contains this information. Web token, just in case some people know. So you can break it apart into its pieces. Now we just wanna go ahead and generate them, and the docs, for Twilio provide these, these these different, snippets here. We are gonna be using the voice SDK in the browser. So we're actually gonna use create an access token, create an access token. So what we're gonna do here is is go ahead and copy and paste this, into this. Now there's a few things we're gonna do, we're gonna do ahead of time. So we are going to one moment. We are going to, obviously, pull these to the top. Those we definitely need. We need to bring in our environment variables. We do need to bring in a a couple of our environment variables, or we could just use them use them directly from process to end. But I was gonna say this is an ESM environment, so we are just gonna very quickly change, change the way that we import Twilio here. So import Twilio from Twilio, and then the access token will be twilio.jwtdotaccess. That was just a small small, semantic change there, but it is important. Then we'll take the rest of this That should bring you in the environment. You're dead right. You're dead right. And I believe that is exactly what I called them in the Docker Compose file, so I don't need to do anything with those. Then and this is now where we're moving into the, into the actual route handler itself. We're gonna go ahead and generate a ticket. Through that as well so that people know what's going on. So the outgoing application SID is the application SID we created earlier. So if you remember, I generated it, and he's going to pop that in. And then we also have an identity. Now remember, because this is a browser panel, this browser panel could be anyone. Usually, you would decide a string to identify who is this browser. Quick question. Do we have access to who the logged in you Directus user is right here? We absolutely do, and we're gonna come back to this once we have the panel because the panel is gonna send an authenticated request to this endpoint. And within that authentication will come the ID of the user along with all the roles and permissions that that user holds. So for now, we'll just hard code it as a user, but we are gonna swing back around to this later and, as you said, actually provide the user ID. Cool. Alright. Now I don't think if we take a look, you know, alright. We're not doing incoming calls, but I think we'll just leave it as is. We generate a new access token with with all of these values we've set up. The only thing left to actually do is, is return this. Yeah. Res.send, and we'll just send this value instead of the document. People are wondering, they're like, what is that voice grant stuff I am seeing? That's because you can you might want to give multiple permissions to different products with one token. So you can give a voice grant, a, a messaging grant, a conversations grant, a video grant. You could do all of that and send it in one token just to give that token lots of permissions rather than doing this if you're using multiple products. Absolutely. So with that, it's built, which is cool. This should generate a token for us if we go to slash willio token slash generate as a post request. Once again, it has the voice grounds. It has our account SID, API key, and secret. It has our identity, which is, for now, just a fixed string, but we will update that in a moment. And then it will go ahead and generate a token and, and provide it as a JSON web token, which is what we're gonna need in our panel. So let's just quickly test this. Expect to see a string of epic proportions and randomness show up in a second, if this works. Yep. So Twilio token slash generate. And we do. Perfect. Lovely. So that's fantastic. That's actually really at its absolute core, all this endpoint needs to do. There are 2 things it doesn't do, which we're gonna do later. The first is actually identify the user, and the second is be authenticated at all. Right now, any user on the web, if this was a hosted application, could hit this endpoint and generate tokens. Not good. So, later on, we're going to make sure that this is more locked down. But, for now, I think this is good. Yeah. And I think that and you can keep going while I say this. I think that people must remember on the web is just because a, URL is not publicly advertised does not mean that people will not be able to find it. And oftentimes, people use, like, just URLs are public to create really important private stuff like this and end up creating exploit, like, holes that the application can be exploited through. So make sure you protect this, outside of Demoland as well. Okay. So we're creating now our actual panel. Yeah. Is that how you spell dialer? With yeah. That was perfect. You corrected yourself. Nice. We'll just do it with JavaScript again. So we're adding this to the bundle now. The nice thing about this is we only need to be running that npm run dev at a bundle level, and it will rebuild when the things under are updated as well. Cool. So I think you see the extension. Yeah. I let's, let's npm run dev this, and, let's take let's take a little look firstly around the code and then then what it does. So there is. That's like, where is it? Oh, there we are, the dialer. The dialer is made up of 2 files, the index dot JS and the panel dot Vue. The front end of directives is built in Vue. JS. And therefore, when you're building these app extensions, they are also built in Vue. Js. So we have an ID. This has to be unique across the whole system. So, you know, you can't have 2 conflicting extensions. It's generally best practice to prepend this with your author name, to, you know, namespace it somewhat. But for now, I'll just be like Twilio dialer, whatever. We provide a name. This will show in the UI. Don't know why I keep writing it like that. We are going to put in an icon. You can use any Google material icon. Descriptions, make own calls. Now all I want you to do for a moment is take in the fact that there is this option called text. And I'm gonna show you what that does in the actual directus UI in just a moment. Now the panel, is a view component. It takes in the props from from the index JS. So here we have the text. Text is there. That's how data gets passed between this kind of configuration pane and the panel itself, and then it's just a view component. By default, out of the box, it comes with the options API. You are, of course, completely, able to use the composition API, which is, I think, what we're gonna do today. I think most of you developers now kind of lean towards expecting to see that. So what I'm actually gonna do is delete no. I'm not gonna delete anything yet. I'm gonna show you what this panel does. So let's create a new insights dashboard. We'll call this Twilio workshop, and we will add we will restart container. I didn't need to do that. I just needed to refresh the browser. That was it. I couldn't remember what step I needed to take, so I'm like, let's do them all. There we go. Gotta be sure. Better safe. You gotta be sure. There's our Twilio guy. There's the icon of phone. I was really also pass in yeah. It was easy. Thanks. I mean You can also pass in these SVGs. Drop it. I'm still impressed. Cool. You can pop in SVGs too. And as you can see, the SVGs are all purple. That is the theme color of the director's projects. You can also use these CSS variables in the SVG, which is kind of nifty. So they all feel, you know, like they belong. There's the text that this is the show header show header right here. You can add extra configuration if you want, and there it is. Show header, you can put some text in, Call people. There it is. There's the text that came from the from this configuration options. I think it's called options pane. We don't want text, but, just wanna show you that's that's kinda how how it works. So now we're going to start now we're going to start ripping it apart. First thing I think we're going to do is we are going to let's just have a think here. I think we're going to remove all the options. I don't think we need options in this. No. It has you know, it's it's going to just show the users. Like an example is, for example, a lot of companies are international. So they end up having, like, a German number, a UK number, a different number. So I'm saying I'm probably seeing you could have, like, as an option, a drop down to be, like, I wanna phone this person, but from this number because they're in Germany, and they wanna use the German number. So they it feels familiar to them, something like that. Absolutely. Absolutely. I think what we'll do here is we'll just empty that out to a div and onto the let's just let's just rock on. Let's just you know? Yeah. I love some boiler plating, but we're just gonna we we get we're gonna go from scratch. So but and I'm gonna use setup here, so we'll use the composition API. I'm not gonna bother reloading the browser because nothing's gonna show up. It's gonna be an empty box. So now we're at the point where with this blank slate, we can really talk about every line of code we're gonna write, help you understand what it does, and build this extension that will call this this endpoint we've created, which is now an internal API endpoint. We'll instal and configure the voice SDK, and we will eventually just make a phone call. I'm gonna pause for just a just a moment. We're about halfway through our time, and this is comfortable. This is a good spot to be in. Does anyone have any questions in the chat? And, Nathaniel, too, you're kinda seeing this set up, I think, for, you know, one of the first few times. Do you have any questions so far? No questions. I'm curious. I'm really excited about some of the the options as well. So you know how it had text. What are the types of things we can put into the options? So, like, I I just said, like, a drop down list, but are there other kinds of, interactions or ways we can select? Yeah. Yeah. Yeah. All of the built in they're called interfaces in Director, so form input, you know, you could we call an interface. You can use any of the built in interfaces. So you have the the WYSIWYG. You have a codes input box that does some syntax highlighting. What the hell is going on? You have a text box that, you know, can be integer or float type. Like, you can enforce that. You have sliders. You have relationships. You can actually pull data from collections and select an item within the collection. All of the built in interfaces and directives are exposed to you through through this. Interesting. Good to know. Good to know. Yep. Yep. Yep. And you can also in fact, if I just if I just undo this slightly, there's other, like, meta information you can provide. You can, hide values, make them like a password It says width full. You can also do width half and put 2 things side by side. So we give you some flexibility around how that looks and feels. And last but not least, like, because I'm thinking, like, we're making this panel and it's kind of just a demo one. But people can people, share panels maybe for other users to to maybe try out? Yeah. So, on the director's marketplace, you can publish them on npm. If you boilerplate it with, the CLI, which I boiler plated mine with, we everything's set up. You can just push it straight to NPM, and in a few hours, you'll see it appear in the marketplace. Awesome. Yeah. Cool. Right. So let's move into the panel dot view. Let's move into the panel dot view. So we're starting from scratch. What we're gonna do here? First thing we're gonna do is in is import all of the composables that come with the view, with the directors extensions SDK that we're gonna use today. So we're gonna import use API. We'll talk about what that does in just a moment. And use items. Once again, we'll talk about what that does in just a moment. Is related to how well, I know we're gonna talk about in just a second. No. Yeah. These are things that are coming from Directus that we can use Yeah. In our component. Correct. And, actually, I might just take this moment to pause and show you inside of the composables right here. These components make working with direct this easier. Use API effectively wraps your API request with all of the authentication that comes in your that is in your Directus client. So you could, of course, just use fetch. But then how does your your how does your endpoint know it's you? But if you instead use the use API, which, I think, yes, actually, it's under the hood. It also sends all of your client details. And that's how we're gonna know which users there, whether they're authenticated and so on and so forth. And this is really because I'm guessing it means you don't need to write, like, all of that metadata into your access to your endpoint URL. We're just gonna use the use API composable instead. UseStores allows you to go in and actually access information, use information within your directors project itself. So you can access things like all of the permissions users hold, data about the collections, the metadata about collections. And the other one I imported was the specific use items composable. And this will allow you to query data in your director's project directly from your panel. So we're not going off to a back end to do this. We're doing this within the client itself. And so In turn, it knows what you have access to and will honor the access control of your logged in user. So yeah. So this is like takes away the need to query a database because you are building an application over inside slash beside your database. Absolutely. Absolutely. You know, I think a lot of people see directors as a CMS or see directors as a back end for an external application. But through a Directus insights and through, like, these panels and through modules, which is another extension type, which is these. So they add they basically give you a blank slate, which you can build. Yeah. It's the most low level extension type, I suppose. It's a good base to actually just build your application within the Data Studio itself. So, yeah, pretty pretty nifty. So we're gonna use use API to make that API call to get our token. We're gonna use use items to populate the drop down list of all of the users that we can call. We're also just because, just because we're in a, a view app here, we're just gonna import graph. There's not nothing. We don't need to explain that terribly. That's just a kind of quite boilerplatey stuff. Now we're going to create an instance of use API. We call it API. Use API. And we're also going to now create our instance of use items as well. So we are going to pull out the items from use items. Now the signature here is a little bit wild, so bear with me. The first thing we need to pass in as shown here, I don't know why it's using this. Maybe I've called it a font that doesn't exist, but why the hell does it look like this? Anyway, the first thing we need to pass in is a ref that contains a string of the collection name we're querying. So we don't just pass in the string, we pass in a ref. Cool. We can we can rock on there. We can literally just put in ref. Not a problem, and we had to import it to do this. And we're gonna we're gonna query the directus views as collection. So use item The second now gives you access to Yeah. All of the Directus users. You can just pull them in. Yeah. Because that is a collection in your database like any other. Now we do have a distinction between system collections and user collections. User collections you create, System collections are these kind of 20 I showed you earlier out of the box. In fact, we haven't got any user created collections in this project. We're not gonna use them. But, yes, you can query these just like you can any other any other collection. So that's completely correct. And the second thing we we provide is a query. Now directors has quite a robust I'll show you here quite a robust query language. Here it is. Not query parameters. That's what I was looking for. So you can specify what fields are returned. You don't need everything. Right? You might only just need the ID and the title of a blog post, you know, for example. You can apply filters. You can do searches. You can do, like, patch basic pagination, you know, by limiting how many per page and what page you're on and what the offset is and so on and so forth. You can apply sorting and just more. Now we're gonna do that just to specify what fields we want returned because I don't want that huge object for every user. I just want a little bit a little bit less. So we're going to pass in fields. And we we want yeah. Yeah. And and Go ahead. You can keep typing, but I'm guessing this is something, like, that people should always be doing because it makes your your moving smaller chunks of data around, so only getting the things you need rather than Got it. Like, I feel like it's a very early in career developer move to just, like, pull down the entire database to get one field from one user. And what's really, really nice about this is we put we expose a GraphQL, you know, API, but we also expose a REST API. And now what I think is one of the core value propositions of GraphQL, which is you you construct these complex queries that only bring back what you need, you can do regardless. You can really pick what's right for you. And this works with, this works with relational data as well. Here in the items, so in the use items composable, I could, for example, let's say there's a relation, right, called, I don't know, posts. I could be like post stock title, and I can start getting the relational data too. So it's really powerful. So in in that case, you almost end up querying 2 tables. Right? Because you're querying a table and then querying Yeah. That's critical. And that's where being very selective about what you're bringing back is really critical because another valid fields query is this. Give me everything on this level. Give me everything one level down. And you know what? You know, you can do that. This is incredibly computationally expensive. But you could do this. I mean, you could. You could. But at any level, you could do this. You could be like, you know, give me everything in the you give me everything in the posts collection, for example. So, you know, we give you that flexibility. You know, you can blow your own shit off. There's great power. Great responsibility. You you absolutely got it. Now, what's gonna come back from here is in fact, this might be a point where if I just console log items, we can just get a little look in on what's actually happening here. Oh, I'm guessing we're gonna see user 1, Nathaniel Okeno. Yeah. And there's 2 users. There's the admin too. Yes. My user. I just got phone. Account. Right? You know what? We could apply a filter here that says, just give me users who have phone numbers. I'm not going to do that. But if I refresh here, there's the array. The array has 2 users, the admin, no phone, Nathaniel, as we expect. Awesome. Cool. What else what else is important here? I might just rename this users because items is a little bit of a weird word to use when they are users. So that's just a convenience thing, I think. And then what we're gonna do now is populate a a select, a drop down. And then when you select something, we're just gonna bind it to a variable in here. So we'll just create that variable now. All we want is their phone number. Right? We don't care about the rest of the objects. We just care about the phone number, and that can start off with a value of null. Now that we've pulled in all these users, let's actually display up here. And now I have the kind of pleasure of showing you the component library. Here's something else. This is the component playground. These are components we use within the data studio that we expose to extension authors. So we are going to use the v select, and it has all of the kind of styling applied. Really, really nice to kinda out the box. This is what it looks like. We need to v we need to bind it with a with a variable, which is why I just created phone number. Phone number will go in there, and you pass in items. Items have text and value. Now our items do not have text and value. They have first name, last name, phone number, and so on and so forth. You can you can change which is which field is used for the text. So we'll we'll do, like, first name. In reality, you would probably do, like, a computed, you know, a computed array that would add the first and last name together and display that. I am too lazy for that. We'll just display the first name. But that's what you would do in the real. Right? So, that's I mean, that I I may as well copy it, to be honest. There we are. Yeah. Let's put it inside this div here. K. So we don't That is the value is going to be so I'm guessing the value is going to be the phone number that we Yep. Created earlier. Phone number. Yep. And the items, is going to be users. Okay. So it's gonna grab the list of users that we got. And then grab the list of users. As noted, we do not, we don't carry out value. As noted, it's interesting that it did this because you're meant to do this in Vue, when it's an attribute. So we have the item text, which is first name, and there was item value was the one under the item. So you can choose what is visible to click, but then what that value actually trick the way it adds to whatever. Okay. That makes sense. I mean, let let's make this easy. Let's just let's just, you know, print the value. So now if we refresh, there's our drop down admin and Nathaniel. When you pick Nathaniel, there's the top up. Blank when it was admin because admin doesn't have a number. Yeah. Because, actually, initially, it was, on, I think I need to put in, like, a select. It's unset to begin with. It's null. So whatever. We would need to put in, like, a select user default setting. Doesn't have a number. So The admin doesn't have one, though. But, yeah, originally, this isn't because admin has nothing. It's because it's null. But whatever, like yeah. Small small nuance, small small edge case kind of thing there. So now we have that, then we are I think we need I mean, that's probably we need a we need a button. We do need a button. Can actually I think yeah. Yeah. We do. Let's use another component for that. Let's look at here. We have the button, shock horror. I mean, it's just the v button. We actually need to do nothing else. I'm not even gonna bother copying that, to be honest. V he says and then makes a typo. V button, call user. And then we will when it's clicked, we will start call. And, of course, we need a function, Let's get this const stop call. I think it needs to be an async function for later. I just happened I mean, we could change it later, but we'll do it out there. So there you go. That will start, and then we'll we'll grab the value of phone number at that point. When we start a call, we will then go and grab the token. Tokens have a short lifespan, so you really wanna grab them at the point you're gonna use them all very soon before. So we'll we'll go and handle that. Spend the token time time to time to live. It is not recommended for you to just, like, create one that lasts the whole year. You want ones that are gonna last a short amount of time. Yeah. Because they can they I mean, if they're intercepted, they're usable, basically. So, yeah, you do only want them to live They wanna run a short while. So we obviously haven't done anything with the use API yet, but I think you broadly understand what it's about to do. Do you have any questions? I'm pretty happy with how it's shaping up. I like how quickly you can build the especially with the component library, how quickly you can build the dysfunctionality, with all of and I'm guessing the great thing is, like, with stuff like that, you're standing on the shoulders of giants, of heroes where, like, they have done all the sorts of things to, like, think like, I saw you change in the width of the button, like, really easy just dragging across and stuff. Yeah. That is really, really useful. This one's my favorite personally. It's called fancy select, and this is just a select 2 items, a divider and a third item. And it's just a select, but look how pretty that is. I love how the pass. And that's used and that's used inside of the UI as well. We actually used it when we, when we or did we use it actually? Nope. Lied to you. I used it earlier today. No. So there you go. Right. Okay. Save and refresh. There's the button. The button just runs this function. Now with the point where we can go and get the Twilio voice SDK. So let's let's go ahead and do that. And, yeah, that's a really good point, Alex, in the chat. This makes customizations feel native to the by using the component library, it all feels native. Don't get me wrong. Obviously, it lacks polish. It needs some padding. But, actually, to be fair, just by adding padding It would be a really good job. Yeah. Yeah. Yeah. Yeah. Right? We're we're lazy, so this is what you get today, but you could Speak for yourself. I'm joking. Speak for you. Yeah. I'm I'm the one with the keyboard, so I'm speaking for both of us. Hey. Right. Let's go and install npm install, hang on a minute. Twilio and I think voice SDK. It is. It is. Npm install at Twilio slash voice SDK. Yeah. Not my first rodeo, mister Academy. I have made this mistake multiple times. Now Cool. Before we continue, there is one more piece of configuration. And it does involve going back to that, to the, Docker composed file and adding one more environment variable. Let me let me shove this up. We're still here underneath the eyes. Hello. But I just wanna get it to the point where I'm not about to leak all the information. So I will get rid of that now. Okay. Back it back here. We're back here. Just above the cursor, a couple of lines are all those environment variables I don't wanna show you, but we do have to add one more. You might be thinking, pretty damn sure I've added enough of these now. Like, I I don't don't wanna do this. But what we do need to do is, let's find it. I did write because it's long. I did write it earlier. Needs to add this environment variable. Why? What the hell is this? As mentioned earlier, the director's data studio, doesn't allow request to external necessarily without perfect configuration allow requests to third party services, like to 3rd party servers or whatever. And that's a security precaution. That's great. But we are going to be connecting directly from our browser to a phone number. I think, strictly speaking, we could probably build this as an endpoint too and use the endpoint as like a middleman proxy, but we don't we don't have time for that. The easiest way to do it is this environment variable where we're basically changing the content security policy, to allow the connection to this WebSocket URL, which is what the voice SDK is gonna do in a bit. I was wondering. I was like, have we ever seen this before? Like, but now that makes sense. So you are giving It took login. It took Yeah. Yeah. So you said to work that out. We are making sure that the, the director's browser I'm gonna call it the director's browser, which has these extra security walls around it. We're just like, yo, let, WebSocket connections to this Twilio u URL happen. Awesome. Yes. And otherwise, that will error. And while we were putting this together, that error, it's like I had to add that environment variable for it to work. It's pretty descriptive. The error tells you it's a CSP, a content security policy problem, and it's on this connect source value. And that it was while trying to access this this, URL. So, you know, it it was pretty easy to work out, but let's save ourselves the the pain and do it now. So so we've installed the SDK. Yeah. Talk about. So we've installed the SDK, and as I'm talking, you can maybe, like, type. So with the voice SDK, what we start, we bring in the SDK, and we bring in specifically a device. The idea of a device is a device is anything that can connect to Twilio, because the device right now, it's gonna be a browser, but it could be a phone if you're using the, like, react native or the iOS or Android SDKs. But we are bringing in a device, and that device needs to have permissions, and we give those permissions from a token. But we don't have a token now. So how are we gonna get this token from our endpoint? Great. We've kind of spoken about it already. We're gonna use this use API composable. Call our now internal endpoint slash, slash Twilio dash token slash generate as a post request, we'll then get the we'll then get the value here. So, the way we're gonna do that, we've already created this, is we are going to, pull out the data value that comes back. I'm already gonna just call it token. Await API dot post because it is just an Axios instance. Ultimately, I'm gonna go to / Twilio token slash generate. That will return a string, which is the token, which we can then remove these question marks. We'll pop this directly in here. We are That's how you pass this device. Yes. We're ready to register the device. There is one more thing I would just wanna take a moment to do. And, it's not I've I've not taken note of it, but I just wanna derail this for a moment. I did oh, shall I do it? Shall I do it after? It's the whole sending over the sending over the correct user and authentic. I think we'll make the request first, and then we'll we'll see what happens. So we have a device, and then let's just console log device. Realistically, we're either gonna see a device or we're gonna see an error because that token's invalid. There is no there is no other outcome. It's one of those two things. Extensions reloaded. I hit save. So we're gonna refresh this. Open the console. Go to you. Hit call user, and that's the device. Rock on. What else is important here? Oh, no. No. No. No. We're not done. We need to also do the device. We need to register the device. That's my bad. That extension's reloaded. I might just rename I think it's alphabetical because, again, lazy. Oh, being being too hopeful there, I think. Okay. Whatever. Cool user. Oh, problem. Why? WebSocket received error undefined. Oh, I think oh, I didn't restart the I just needed to because I changed the environment error. That was the error he told us we were going to see, and he walked right into it. But there it is, by the way. Refuse to connect to this URL because it violates the following content security policy directive, connect source self HTTPS. So they were there, and then I added on to the end of it the WSS, you know, Twilio voicing. The Twilio token generate is, I feel like you probably came into this, into this session a little bit later on, John. But here here it is. We built it first. So, yes, what we've built is a bundle. In the bundle, it's an endpoint and the panel. The panel calls the endpoint. The endpoint talks to Twilio, comes back, returns it to the panel. So, yes, it is now a direct test custom endpoint, by virtue of this file here. Okay. Let's do that again. When you see an error in a workshop and you're like, damn. I hope this isn't gonna derail everything, but no. There there we go. No errors. So we're good. No errors. And we did see there it did a post to the Twilio token generate endpoint. It returned in 25 milliseconds with the a a device, register device with Twilio, and say, hey, device. I'm authenticated. I am now ready to start making calls and receiving calls if you set up to do that. Let's do this, and then we'll go back in the in up the the endpoint because that is there's no problem now for its hard coding user in the endpoint. But in the real, you should not be doing that. So we'll cover that at the end as like a let's it's not a next step. It is critical, but we will treat it as if it's a next step because I wanna I wanna get a call. I wanna get a call going. I'm getting impatient now, as you know. That is the person I am. Okay. So, let's now yeah. And once the so there's one other thing here, which is this device doesn't register like that. Instead, it emits an event when the device has been successfully registered. Sometimes that takes a moment. I think that it also pops up and, like, asks you for, you know Yes. So My hat says why register can take a couple of seconds is because sometimes it needs to ask permissions to use your microphone, which could take you one second, 2 seconds. In Kevin's case, where he's already done like a we we've done this before It doesn't show up. So it's given it permission. So it's not going to do that. So while it may seem lightning quick for him and you might be like, why do we have to do this and then wait for it to finish? It's not always gonna be like that for all your users. So then what we use are these events where we say when the device has been registered, then we want to do something. Now what a lot of people do, especially if they're creating dial up panels, is you might have you might register a device when someone opens the dialer. Right? So that it's quicker that when they click a call, they're already registered. And then you can already, like, say that something's gone wrong. You are not authenticated to use this dialer straight away once they open it. Different Can I ask you a question? Yes. The the tokens don't last very long, and we we use the token in the code when we register the device. And I don't think we use the token again. But what happens if the token expires between registering the event and actually trying to make the call? At what point have the handshake happened? The handshake happens when you register the event when you register the device. Right? Not when you make the call. Not when you make the call. Oh, interesting. Is you also get a event when your token's about to expire and when your token expires. What we recommend people do is when your token's about to expire, generate a new token. So hit that API call then start a new token get a new token. Yeah. Exactly. Again, this limits your surface area for exploitation. So it's it may seem like a little bit of a faff, but trust me, you do not wanna be hit with a crazy Twilio bill because someone's hijacked your phone call and used it to call premium numbers that paid themselves. That. So we're gonna make a call here. The call is going to be an object. I'm going to save it up here because we want to start the call later. We're gonna wanna handle hanging up. So we'll need access to that up to that object in the in the global scope. So we're going to go const, call, I think we'll call it, and we'll just again initialize that with a value of null. Now it's time to actually make the call. So we'll set the value of this ref here, into device dot and yeah. Thank you. But my uncertainty was creeping in there and it has a params object and the value is going to be phone number, this one here, dot value. Because it's a And it's that didn't get the raw value. You might be like, why connect and why is it that params too? Because there are a couple of things. You can clone phone numbers, but, like, with Vince with you, you can actually phone video call rooms. So let's say people having a video room, you can literally dive in to it from here. You might want to dial another client, which isn't gonna have a phone number. It might have, like, a a name attached to it, like the user that we have. So, there could be a couple of things. We're using phone numbers here, but that could be a few other things, which which is why there is a bit more flexibility in I don't wanna say ambiguity, but a bit more flexibility of what could go into that params. Yeah. Alright. This is everything we need to actually make a call. We're not done. We're not gonna get any UI that we've made a call. There's gonna be no ability to hang up. There's none of that, but I I think this is a moment we can actually try this out. Alright. That's it. You know, we've got 33 lines of code with a few lines of white space, and I think this might be all we need. So your phone number's in there. Yeah. Let's ready for the error? I am. I am ready for an error. There it is. Allow. You've hit call. I am getting a phone call. I'm gonna mute my mic and just join the phone call. Hi. Hey. Yeah. You sound you sound suitably terrible as phone calls do. I can't I can't I can't be bothered. I literally can't be bothered to figure out sharing your audio. So anyway, the thing is like, you should share my audio. I'm like, I can't be bothered. I was speaking on the phone. He could hear me. Just because we're not sharing audio via the the stream, you couldn't hear me. So next time, I won't mute my microphone. But his browser is you couldn't hear me. So next time, I won't mute my microphone. But his browser called my phone. Hey. There was no indication we did it. There was no indication it ended. So there there's some things we need to work out now. But we're basically sound. Yeah. There were some sound things. So, Twilio just built in we have, like, these audio, like, files that, it's, like, plays a sound when you connect. It plays a sound when you disconnect. But, visually, there are no cues. Now you can customize the sounds that you that are in there to have something that's like your own UI. And, like, what we're gonna do next is you probably should change your UI so people can see visually what state the call is in. We're at the point now where we are polishing this. When we get back to disconnecting the call, I think we'll, I'll slow back down. But for the next couple of minutes, I'm gonna speed run this because this is just little view UI, you know, things. So we do this. We currently have this div here that shows start call pick phone number. So I'm gonna create a new ref called show call. Sure. Let's make that false because at the beginning, there is no call being made. The call, I believe, also emits events. So call dot value is this, then we'll go ahead and say call dot value dot on. And there is a ring ring. There is a Yeah. I don't know if it's answered or connected off the top of my head. I will check it out later, but it's subject to tell you it's connected, and then one to tell you when it's I think we're gonna be lazy and we'll just do ringing and disconnected because disconnected is by either hand, I think. It's just disconnected. It has been dis phone ended. Whether you hung up or they hung up. There is another way to, like, distinctly choose between which person hung up, but we're not gonna be using that today. Exactly. So show call dot value becomes true. And then on disconnect disconnect, we'll set it back to force. Then in here, we'll say v if not show call, do all of this. And then, oh, just some just some chat right here. Loving it, but the, the potential tying this ability with other business use cases like customer engagement, etcetera. Yeah. I mean, I see, like, customer, like, outbound call centers as being, like, rich for this. We'll talk about that more more a little bit later because I have some thoughts on where this could go in the future with more time. Also, the possibility to include the verification style systems, generate a confirmation code in direct us. You read it out. You know, you could, send it via Twilio and then confirm it on the phone, stuff like that. Yep. Excellent. Yeah. I love the part with the browser called the phone. So did we. I'm I'm glad it worked. The first time. Right? The confidence in it. Yeah. We also need, I suppose, just a v l's here, and we'll just say call in let's do it more like this. Call ongoing. Not that. E. I just wanna No. I'm just gonna say, what is that p else? No bloody clue. The or the first result having a mad one. And then I think we'll just want another button. But this one is end call. And exactly. And we'll do an end call function, which I will just create, and we'll deal with it in a moment. Const end call. I don't doesn't hurt anyone. Yeah. I was it doesn't have to be, but right in there. Yeah. So that's the UI. It'll call ongoing end call, and then there's an end call button. It will do nothing. We we do actually need to tie this up to ever get back to that first state by pressing the button. And I think called dot value dot disconnect, the thing where we have a connect, there is also a disconnect. And then call, show call dot value force. I think it's handled by this, but, whatever. I wanted to work first time, so we're just gonna we're just gonna keep throwing some redundancy in there. I think that's groovy. So extensions reloaded. Let's try this out. Nathaniel, call. Nice. Good. Call's coming in. Answer. Hello? Testing. Testing. 123. Yeah. I hear you twice through through the browser and through our streaming system, and, it's it's twice too much, to be honest. Bye. Oh, forget that. It did hang up the call. Stereo. Yeah. It did it did hang up the call. Dun dun dun. Oh. I don't know what this is. I did hang up the call. Just before we get to this, because I think this might be in the voice SDK. But I'm not convinced. Get on proxy. Log is read only and not configurable. Now that means logs being used somewhere inside of the call dot disconnect. Alright. Call dot underscore disconnect. Alright. We are yeah. It's here. It's somewhere in here. We are going into the depths of the of the SDK, the voice SDK, and I have a better idea. I have a better idea on how to handle this. Call.value.onerror. Yeah. Damn. Does that even work? I I do not know. I I I am desperately looking up the voice SDK to see what Honestly honestly, my guy, it's always the end of our it's always the end of our Workday. Yeah. It's a frantically look up anything. I'm swallowing the error. It's not an error if you if you swallow it and don't display it to the user. Is it. You're you're absolutely right. There is an error. But like, sorry. There is, on error event. Oh, I just I'd made a guess there. But I'm gonna call you again. Hello? How are you? Yeah. Yeah. I I don't care how you are, mate. Sorry. Bye. Okay. Interesting. We'll pause on that. I'm gonna I'm gonna refresh and call you one more time, but I'm gonna hang up because we actually haven't tested that side. Sorry. You're gonna hang up the other way. I've I've been the one to hit end call. Okay. Interesting. Look. Works great. Call ended. Question. It's when Question. I disconnect. Yeah. Question. You, when you disconnect works. Yeah. Here. So you hit disconnect. Do wait. Hold on. I wanna quickly check something because, you've got on disc. That's on disconnect. I'm not sure, actually. Mhmm. I'm not sure. Neither am I. Neither am I. Refresh, reboot, pick up your changes. The changes are happening. Look. When I hit save, just watch it. Extensions reloaded and other UI changes were displaying. So I'm confident that happy to call this a a success, to be honest. Now there are a few other things we need to do. We need to like, I I think we must tighten this up a little bit because it's currently a little too open for everyone. Because right now do here is anyone who gets access to the URL gets a token that allows them to phone anybody they want and charge it to you. Agree. So, Wreck, I want to say it's accountability. But just in case it isn't, I'm gonna just oh, do you wanna know the way I'm gonna do this is I'm gonna go, object dot keys rec, and we can work from there. Oh, was that too hasty? Oh, was that too hasty? Is it building? Oh, but it's still building. It's it's packaging the whole Twilio SDK t k and and that's why it's just taking a hot minute there. So we're gonna do this. What's the best way of doing this? I think what I'll do is in the panel is I'll just pause on I'll just comment all of this out briefly. I don't actually wanna be making calls. Interesting there. Refresh and call. And over in this terminal here, great. We have You have accountability. Paul, last in the chat, obviously, you can lock this down in direct, but imagine inside of Twilio, there are also options to lock down your numbers. Zip rank as well. No? Lockdown ask that question. I am locked down the Obviously, you can lock this down in direct test. We might need to expand on what that means. But I imagine inside of Twilio, there are options to lock down your numbers or SIP trunk as well. You're not I mean, these on your numbers. Tokens are the way to do that. Yeah. These tokens are the lock. Yeah. So you wanna make sure you're securing the generation of them. So And, accountability. Yeah. The generation of these tokens is essentially how they are locked, and only someone who essentially only someone with your credentials can create tokens, which means only someone with your credentials can create calls. I'm gonna pause you for clarification. Lockdown calling premium numbers or long distance Yes. Oh, yes. Yes. Yes. Yes. Yes. Yes. Yes. You can. I can actually show you that in the browser. So, there is I'll I'll pop over to you while I just do this. There you go. See yours now. No worries. So you have voice geographic permissions. So eventually, let's go back to that. Voice geographic permissions. And give it a second. It will load. And so you've got, programmable voice and SIP trunking as well. And you've got, they give you also an average cost of how much things will be. You've got low risk and high risk. This is a connection. This is not just countries, so it's not entire countries that are, high risk. Although it is could be a large proportion of countries, large percentage of that country. It is specifically network carriers in specific countries that have been known to have a higher risk of toll fraud, so you can block them. So I can, like, block the whole of North America. I can block specific country and so on. And then you can also pop in a number to check if it's got permissions at all. So that's another way to just, like, lock it down, and this is great because it's on an account level. So your whole account can be locked down. All good? Yep. I did I did the changes. So at the top of the at the top of the, route handler, we inside of this, request, this rack, you get this object called accountability. Accountability contains the following properties, user role, whether or not they're an admin, whether or not they have app access, IP, user agent origin, and permissions. So you can further go on and expand, but all we care about are you logged in, and authenticated with director? So so that's all this does. It says, hey. If you don't have a user ID, go away. Now I think about it, there might be more you wanna do here because you could just jam an accountability object and user. So there's probably some other levels of, of restriction you wanna do here. For example, checking the user's role, which then will return they don't have a role because they're not a valid user. But I think this is I think this is okay. And then here in identity being Yeah. User, it can be There you go. Rec.accountability.com. So which is the ID. Name of the user that we're logged in on in the system? No. It's the UUID. Okay. UUID. Cool. It's the UUID of the user. I mean, if if you want to see, we can just console log. Well, because I was gonna show how it appears in the Twilio logs when a call comes in. Sure. Give that a moment just to reload one more time. No worries. Again, it's bundling that whole Twilio, helper library, and so it just takes a moment to build. We'll hit that. Did you uncomment the call? No. Yeah. That would help. Alright. We'll make this a short call, just so we can show you the logs. So now only authenticated users on directors can use this application. They he's gonna call me. I'm gonna answer. I'm gonna keep it on for one second. He's gonna end the call. My phone call ends. And then I'm just gonna refresh my logs so that we can see the latest call that happened, which was 838. Yep. Which was this one, and we can see here it's complete from a client, and it was from client. And then this was the UUID that he talked about. I can actually show you a previous call. Remember, it used to be hard coded to user. If I get to this one over here, we can see it was client user before, and now it's client and then the user ID. That's cool. Cool. So, I mean, I I think and I thought about it more actually. I may update the docs. I've just thought about it in this moment. The user actually this doesn't prove the user is authenticated. It proves that there is an accountability object with a user I with a user value that is not foresee. That's what I check against. But what you may wanna do is expand this further, use the permission, service, check they have certain permissions, which they either will or won't have. That is more direct us, that's locked into direct us and it will say no if they don't exist. So just a thought there. What we did was a lightweight, like, check, but it is not foolproof. And I've just realized in this moment that that's the case. But that's fine for this. We've acknowledged it. We've shared that with you, and we've given you an approach. Now I think that's it for, like, what what we can do here. But what what more could we do? Firstly, we need to handle that rejection state better when I hung up. Can't be bothered now, but just I'll figure it out. It'll find its way into the blog post that'll accompany this. One thing that's really cool if I just come back to my screen share is these are real users. Right? And they're queried using the composables that are exposed to view to to this view component here, this extension. But you have access to all the collections. So let's say, for example, you have, customer calls or customer notes, customer note whatever, a timeline of events and they exist in your project, you could select the user, get maybe their latest notes, then call them. So now you have context. Additionally, insights dashboards have this concept of a global relational variable. So what this will do is user, is you select directors users in here. Let's say we want first name and last name in here. Let's not have them overlapping each other. You could select a user from here, and this now contains the UUID of that user or the object of that user. I can't remember. But you could feed that into this panel. You could feed it into the panel that gets customer information. So you can have multiple parts of your dashboard all changing because you selected the user once in this global relational value panel. So there's so much. There's so so much you can do with this. And I'm just really excited for more people to see directors insights as a really valid app builder surface more than just, more than just insights and BI. So with a couple more minutes to to go, if anyone in the question if anyone in the chat has questions, please do chatter and let us know. Nathaniel, do you have any any closing thoughts while we wait for those? No questions. But the thing is, like, I'm I'm just really trying to think about, like, other ways. So, like, there's obviously, like, building your own mini contact center. Because contact centers are, like, big and expensive, and actually sometimes what you need is you don't want this massive, like, stood up, like, huge at scale contact center. You just wanna be able to have the ability to contact your users just really, really quickly, and you could start building applications like this. You can then also because you're using Twilio phone calls, you can access, like, a lot of other Twilio functionality. Like, we've got, like, voice intelligence transcriptions where, like, you have a phone call. At the end, you can run AI operators on it to just be like, yo, this person, like, that person asked for a manager, or the person was happy. We can just do sentiment analysis and have that, like, just pop straight into a panel, for example, because you are just connected to that ecosystem as well. So, there is way more than just the calling calls that you can do, but, it all starts with, like, ring ring. Yeah. And I suppose you can query APIs for that data as well from within here as we have just demonstrated, and we spoke about with the weather API, you know, example way up top. You can just call off to arbitrary third party APIs and bring data in via custom endpoints. So, yeah, there were no other questions in the chat, so I'm pretty confident at this point saying I think we are off the clock, which is wonderful. This has been awesome. Thank you so much for joining me for this. I had a really fun time putting this together and delivering it with you. Always enjoy hanging out with you. Next time we go on a roller coaster journey, we should write another workshop as well. Which is what happened. Sorry. That that that's what happened. We we wrote this basically while while going to a theme park together. We had a long, long, long, long drive, but something could came of it. Weekends. Well, on that note, thank you so much everyone for joining in. We will see you somewhere. Bye. Bye. Oh, wait. Where can people find you? Oh, online at Twilio dot. They can email me at nocennwall@twillio.com. I do check my emails, and, yeah, just say hi. I was ready for you to that. I don't check my emails, but you can send me an email there. Alright. Alright. We're done. We're done. We're done. Bye.","dc015249-9e16-4724-b6d2-d16cbaae7a2e",[154,155],"75d81975-d6de-4576-b6c5-e5ca707efd53","f07c9fd0-4608-4f90-a95a-079338db2d37",[],{"id":133,"number":134,"show":122,"year":135,"episodes":158},[137,138,139,140],{"id":138,"slug":160,"vimeo_id":161,"description":162,"tile":163,"length":164,"resources":8,"people":8,"episode_number":165,"published":166,"title":167,"video_transcript_html":168,"video_transcript_text":169,"content":8,"seo":170,"status":130,"episode_people":171,"recommendations":174,"season":175},"deepgram-audio-podcast-summarizer","964316267","Join Kevin and Damien Murphy, Solutions Engineer at Deepgram, as they use Deepgram to build an audio podcast summarizer in Directus Automate.","e77b0dc4-21ba-4657-aa71-bfb4c09cd917",84,2,"2024-06-21","Build an Audio Podcast Summarizer in Directus Automate with Deepgram","\u003Cp>Speaker 0: Hello. Hello. Hello. Hello. Damien, you are still muted, but we are we are here.\u003C/p>\u003Cp>Hello. I'm Kevin.\u003C/p>\u003Cp>Speaker 1: I'm Damien. Yeah.\u003C/p>\u003Cp>Speaker 0: Nice to meet you. Yeah. For the next hour and a half, we're gonna be trying to get things to work maybe successfully. We'll see. We'll talk about the project in just a moment, but I actually thought some more thorough introductions might be in order.\u003C/p>\u003Cp>Damien, would you like to tell us who you are and who you work\u003C/p>\u003Cp>Speaker 1: for? Yeah. I'm Damien Murphy, applied engineer here at Deepgram. So, you know, working with customers, building, you know, real time low latency voice spots and transcribing their audio.\u003C/p>\u003Cp>Speaker 0: Yeah. Excellent. And I am very, very, very fond of Deepgram, so I'm really excited and thankful that you're joining us for the next little bit. My name's Kevin. I work on the director's core team, and in this workshop or rather, this workshop is part of, Leap Week.\u003C/p>\u003Cp>Hopefully, you are already aware, but Leap Week is our week of announcements where we announce new features and also run a series of other events to celebrate directors and our community. We're starting to near the end of the week now, but don't worry. There's still lots more to come. Tomorrow, we are doing a community networking social. And right now, right here, we're gonna be building some cool stuff with directors and Deepgram.\u003C/p>\u003Cp>Maybe if we take a moment to talk about the project, that'd be a cool way to to stop. So podcasts. I love podcasts. Podcasts are actually all standards. Podcasts are just an RSS feed that contains some metadata and links to episodes.\u003C/p>\u003Cp>And in this workshop, we're going to string together using Director's automate and flows, our kind of visual automation, tool, a, you know, semi complex automation where we are going to go grab a RSS feed of a podcast, go grab the latest episode, send it off to Deepgram's transcription service. So maybe before we I jump straight into the whole project, maybe we break down each part. Could you tell us a little bit about, Deepgram's transcription service?\u003C/p>\u003Cp>Speaker 1: Yeah. So we're able to process, you know, audio, video, pretty much any format, and turn that into, text. Right? So we'll basically transcribe every bit of speech that's spoken and then give you back a word level and time stamp level, you know, what was spoken. We also have multiple other APIs, which we'll get into a little bit later.\u003C/p>\u003Cp>But,\u003C/p>\u003Cp>Speaker 0: I mean, we can we can rock on now. So we're going to go and transcribe these podcasts. I listened to one the other day that was like an hour long. Then we're gonna use this audio intelligence. Tell us about this one.\u003C/p>\u003Cp>Speaker 1: Yeah. So we have the ability to pass the transcript once it's transcribed through our audio intelligence features. So this can do things like sentiment analysis, summarization, intent detection, and topic detection. And this can be really useful for, you know, pulling out that valuable metadata, and it's all time stamped as well. So you can even, you know, build an overview of the podcast, using those, audio intelligence features.\u003C/p>\u003Cp>Speaker 0: Cool. And then you can also understand it on, like, a segment basis as well. Right?\u003C/p>\u003Cp>Speaker 1: Yeah. Yeah. So each part of the the audio that comes through will pick up topics as they happen. So we can do major topics and, minor topics as well.\u003C/p>\u003Cp>Speaker 0: Awesome. Oh, that's really interesting. Justine, a question here in chat. And, yes, please do use the chat. I will answer the question while encouraging you to use the chat.\u003C/p>\u003Cp>Will this demo be available on demand? Yes. Like everything at Leap Week, it is all recorded. It will be available on DIRECTUS TV tomorrow. In fact, the workshop from yesterday with Twilio is already up in our brand new show called Enter the Workshop as you will be able to watch this on demand, of course.\u003C/p>\u003Cp>So, but by being here live, you have access to the chat, so take advantage of it. I'll be monitoring it. You can ask either of us questions about Directus or Deepgram or what we're doing, and we'll be more than happy to answer in that. So we're gonna transcribe a latest podcast episode. We are going to use the audio intelligence, features that, Deepgram offers.\u003C/p>\u003Cp>I'm gonna struggle because Directus in Deepgram both start with these. So sometimes I might do this. I feel myself maybe doing it already. And then finally, we will use, one of the newer Deepgram products, Text to Speech. Tell us about this one.\u003C/p>\u003Cp>Speaker 1: Yeah. So we recently released, our text to speech. It's one of the the lowest latency text to speeches on the market, with high quality voices. So you can get a very low latency text to speech generated at a very low price point as well.\u003C/p>\u003Cp>Speaker 0: Just to help me understand, because latency only, I suppose, matters well, it doesn't only matter, but it matters more when you're doing live, like, real time stuff. So you can use this real time as well?\u003C/p>\u003Cp>Speaker 1: Yeah. Absolutely. And that's where we see a lot of the demand in the market is for, you know, building real time voice box with sub second latency. So with this text to speech, you can get about 250 milliseconds of of, latency for time to first byte.\u003C/p>\u003Cp>Speaker 0: Excellent. We won't be using it real time today because obviously podcast episodes are already static hosted files, but that's, I suppose, where the latency matters. So you can do, like, true conversational voice bots, I suppose. Cool. So we're gonna do all of that.\u003C/p>\u003Cp>Just to summarize how this is going to work, we are going to first build a flow that will take in a podcast URL. We will grab the latest podcast episode from that podcast feed. We will send that off to Deepgram to receive a transcript, then we're gonna send it off for, text intelligence, so text to text API that Deepgram offers. We'll talk a little bit about why they're separate when they don't have to be. You can do those 2 steps together, but it will become clear as we go through the workshop.\u003C/p>\u003Cp>Then armed with a summary of that podcast, we are going to send it back off to Deepgram to generate a short summary, I suppose, in the audio bite, which we will then save back to the director's project so you can go and listen to it at your leisure. Any questions in the chat? Any thoughts, Damian, before we kick off?\u003C/p>\u003Cp>Speaker 1: Yeah. If anybody wants to sign up for Deepgram, we give $200 in free credits as well. So, you'll be able to transcribe about 750 hours of of audio for for free, essentially.\u003C/p>\u003Cp>Speaker 0: Yeah. It's really, really cool. Really nice way to get started. And indeed, that is what we will be doing today. Okay.\u003C/p>\u003Cp>I think that means we are ready to kind of, jump in and get started. And the very first thing we are going to do here is we are gonna set up a directors project running locally now. I will give you a very quick summary of what Directus is in case you're coming from the Deepgram world and you've not heard of Directus before. So Directus is a really cool back end that you can use as a developer to build your applications. You connect it to a database.\u003C/p>\u003Cp>We provide developer tooling and this really beautiful web application which you can use to interact with that data. And it's, suitable for handing to non developers as well, which is not very typical of back end, back end tooling. So we're gonna spin this up, and then we are specifically going to use Directus Automate, which is part of this application in order to build this kind of multistep flow, something that looks a bit like this, except each one will take on one of the steps we described in our project. This project will use some, extensions that we built and published to the marketplace, which is available in all directors projects. We can go and do that together.\u003C/p>\u003Cp>And then that very final step where we create a new audio file and save it back to our directors project, we're gonna build that extension together because it doesn't currently exist. So that's that's the kind of rundown of how this is going to shake out. So with that in mind, I have this empty directory here on my local machine. It's just this empty directory called live. Let's move into it here, and we're gonna spin up a director's project.\u003C/p>\u003Cp>The first thing we're gonna do is create a docker compose dot yml file. And I do happen to have one here. This is the docker compose file for spinning up directives locally with a SQLite database. There isn't too much to talk about here. We will use the latest version of directives that has been published on Docker Hub.\u003C/p>\u003Cp>We have 3 volumes. So these are, directories that exist inside of the Docker container that we are going to map to local directories. And you'll see exactly what these do in about a minute. We need some environment variables, a key and a secret. You should replace both with random values.\u003C/p>\u003Cp>For the sake of this workshop, I think replace with random value is random enough, so we'll leave that be. The initial admin email and password, which, of course, you can go change. The database client and being SQLite is just a file, So we're just telling it where that file will live. We have WebSockets enabled so you could do, like, real time subscriptions. It's part of my kind of default snippet that I have.\u003C/p>\u003Cp>We're not gonna use that today. And then we're also turning on extensions auto reload, which is gonna be really important for the developer experience of building our extension at the very end of this work shop. So with all of that done, you can just run docker compose up. No. Oh, did I hit save?\u003C/p>\u003Cp>I did not. There we go. And so it's now gonna go ahead and, spin that up. And you'll notice immediately an upload and extensions and a database folder. So they are the 3 volumes that are inside of the Docker container, but also mapped to a local volume.\u003C/p>\u003Cp>It did a whole bunch of, like, first time, you know, seeding, and then we have directives running right now on local host 8055 with my admin, email, and password that we set in the Docker Compose. That's it. That's how I was having set up directives. This is the full fat version of directives running here. It's the same version we host in Directus Cloud, and we can with that jump straight in.\u003C/p>\u003Cp>Damian, I might just give you a quick tour of it if that makes sense. We have a database. Yeah. We have a database. It's that SQL like database.\u003C/p>\u003Cp>In here, we can create tables in that database and we can query them. We can interact with the data. Great. We also have users that we can create. We have a whole auth service.\u003C/p>\u003Cp>So immediately, we have this admin user that you can invite other users. Users in turn can have various permissions, which grant them access to do different actions on collection. So create, read, update, delete, and share. We also connect to your asset storage, or you can save files locally as well. So this will connect to an s 3 bucket and, an Azure storage, Backblaze, and various others.\u003C/p>\u003Cp>We are gonna use this later to actually save the summary back, from Deepgram. By default, if you don't say anything, it will be just local file storage, and it'll actually just get dumped here in this in this uploads folder right here in the sidebar. We have a little insights dashboard builder. We used that in yesterday's workshop. And then over here in settings, we have access to flows, which is the automation builder, which is what we're gonna use today.\u003C/p>\u003Cp>I think the only other thing we wanna do before we kick off is let me just have a quick think here. The only other thing we wanna do is our public role. So this is this represents, all of the requests that are made that have no permissions that have not authenticated. And I'm just gonna give it the ability to read and write files. In the real, you shouldn't do this.\u003C/p>\u003Cp>But for the sake of this, it'll be fine. What's the worst that could happen? So this will allow us to read and write files without needing to authenticate with with directors. What else is needed? We need we need the extensions from the marketplace.\u003C/p>\u003Cp>So there are 3 extensions we need here. If I type in Deepgram, I built a few. I don't I don't like the spinning wheel. There there it is, Deepgram. So we have the AI transcription operation, and we have the AI text intelligence operation.\u003C/p>\u003Cp>We believe in making things nice and small and modular, so we have separated them, and each one's very simple. In reality actually, question. I think in reality, you could do the the intelligence at the same time as the transcription. Right?\u003C/p>\u003Cp>Speaker 1: Yeah. Yeah. You can send a single request, and you'll basically just enable those parameters, and you'll get both back.\u003C/p>\u003Cp>Speaker 0: Great. As these are don't know what's going on there. Although, I've had issues with my Internet all day, so I'm gonna go out. I'd rather this was a bit slow than you not being able to hear or see what's going on. So we did the AI transcription, and then we had the AI text intelligence.\u003C/p>\u003Cp>So we'll just install both of those. So these were released, last month as part of our directors AI bundle of, of operations for our automation builder. And then there's one more that I created just to make our life a bit easier today, and it's this extension here, RSS to JSON. It will allow you to go off and get an RSS feed, and it will return it will, pass it and turn it into a JSON object. And this will be really helpful because we obviously need to pass the RSS feed of a podcast.\u003C/p>\u003Cp>So we'll go ahead and install that too. There we go. We'll just give it a quick refresh as it is prompting us to do, and we're ready to rock on. So we're gonna create a new flow. Podcast summarizer summarizer summarizer.\u003C/p>\u003Cp>Sure. I don't think summarizer is a word, so I don't know why I am. So hooking up on it. And we can trigger this automation in 1 of 5 ways. We can do an event hook.\u003C/p>\u003Cp>So an event hook can be triggered whenever or will be triggered whenever something happens in your database. So it could be, a new item is created in the posts collection or a new user is registered or a new file is uploaded. We have, webhooks, which takes an inbound HTTP request, so you can receive data from third party services. In the world of Deepgram, how we actually use it here on DIRECTOR's TV, our on demand shows all have transcripts. Some of our shows are very long, so we use Deepgram's, asynchronous callback mode.\u003C/p>\u003Cp>So it goes up and does work and then pings you once it's done. And so that would be a webhook trigger. You can run them based on time, you know, schedules. You can have flows trigger other flows. So if you have complex, you know, use cases, you can kind of bounce portions off into their own modularized automations and then return the data back up.\u003C/p>\u003Cp>And finally, manual. And this will add a button, this will add a button to the side of the data studio when you're in collections or item pages, and you can go and trigger it from there. We're gonna use a webhook because I just want the ability to call it really quickly and just making a quick call request is probably gonna be the easiest way to do it. I don't care about any of this because it really is just a quick trigger. So if I hit this URL in fact, let's do that.\u003C/p>\u003Cp>I open a new, let me just build these 2 terminals. If I open a new terminal and just call this URL and refresh here, we'll see it's been triggered once in the logs. So I think that's gonna be the quickest way of just constantly running it as we go to to test it. Okay. Any questions so far?\u003C/p>\u003Cp>Anyone in the chat? I I raised through this. I got us to this point super, super quick. We We scheduled an hour and a half in for this, and I think it won't take long at all. So unless questions are asked.\u003C/p>\u003Cp>So, feel feel free. Not that you have to, although you need questions. A question for you though, Damien. With Deepgram's callback mode, can you give some use cases for when that's useful? Because it's a really good, you know, demonstration, I think, of the fact that you can do you can trigger flows based on webhooks.\u003C/p>\u003Cp>Speaker 1: Yeah. A lot of customers use it, because it allows their server to, you know, get back to doing other tasks. Right? So rather than waiting for the response, the more features you enable, the, you know, the longer the request will take. So, you know, adding summarization and topic detection, entity detection, you know, it it can go up into, you know, the 30, 42nd range, and as the audio gets longer as well.\u003C/p>\u003Cp>But yeah. Like, by default, if you've if you're just transcribing, you know, you can transcribe an hour long podcast in probably, you know, 10 seconds. Right? So, one of the other cool features is you can pass a URL to, like, an s three Booker. So you can tell us, hey, you know, when you're transcribing it, instead of me sending you the file, go pull it from an s 3 Booker.\u003C/p>\u003Cp>And you can even tell us to put it back into an s 3 bucket as well, which is pretty cool.\u003C/p>\u003Cp>Speaker 0: Yeah. We have actually, over in our docs, I've written a post before a deep gram post right here. Right. But that makes sense. It's to stop you having, like, hanging long connections open.\u003C/p>\u003Cp>Right. And that that makes total sense. So this, what this does, is it listens for any file upload. It verifies that it's an audio file, and then it will send the URL of your of your file directly to to Deepgram authenticated with your token. It has a transcript returned, and then you can save that straight back to the file.\u003C/p>\u003Cp>So it's placed right next to the file, which is really cool. It's a really straightforward automation here. And this also featured on, let me find it. This also featured on our quick connect series right here. So it's that same project but over in video form.\u003C/p>\u003Cp>So if you're interested in kinda learning more about what's quite a common automation, I think, with Deepgram, you can see how to set that up. Okay. First thing we need to do then is we need to go ahead and get a podcast, like, actually go get, an RSS feed. I have loads of podcasts. I actually agonized over which to pick.\u003C/p>\u003Cp>So I picked Darknet Diaries. You heard of Darknet Diaries?\u003C/p>\u003Cp>Speaker 1: No. Haven't heard of it.\u003C/p>\u003Cp>Speaker 0: Fantastic podcast all about cybersecurity. Really, really, really good. I just listened to just listened to this latest episode here, Anom, like, 2 days ago, came out June 4th. It was it was so good. It was not what I expected.\u003C/p>\u003Cp>But a 146 episodes of Darknet Diaries and any I'm gonna say true podcast because I think Spotify has started to screw with the definition of a podcast is just an RSS feed, and they all follow exactly the same format. If it's not if it doesn't have an open RSS feed, it isn't actually a podcast. It's an appropriation of the term podcast. But the podcast is this kind of XML document, this RSS feed, and they all have, you know, some metadata that, you know, will be shown in your podcast acts. And then they have a number of items here.\u003C/p>\u003Cp>So this item here that I'm highlighting is a single episode. It's that one we just saw, Anum. And you'll notice here in the enclosure, there is this attribute called URL, and that contains a direct m p 3 link. And that's how podcasts will work. And that's really handy because with, Deepgram, you can send a a binary file or you can send a URL.\u003C/p>\u003Cp>And podcasts have this URL just hanging out there. So our job is get the URL. I can take this whole feed URL and use our brand new, I built it yesterday, RSS to JSON, RSS to JSON operation here, and I'm gonna call it feed. The fact I call it feed will become clear in their own. Why does this key matter when it has a name?\u003C/p>\u003Cp>Why does this key matter? We'll talk about that in just a moment. We'll stick the URL in there, Save it. Hit it again. And I think we configured this flow to actually return the data from the last step.\u003C/p>\u003Cp>So we are expecting to basically see it here. Yeah. There it is. The whole RSS feed, but turned to JSON. If we refresh here, we can also see it in our logs.\u003C/p>\u003Cp>There it is. So there it is. That's pretty cool. There's our item. Where is it?\u003C/p>\u003Cp>Here we are. There's our item array, and there is the MP 3. Now it does actually say in the docs of this, extension that I built yes yesterday. If ever there's an attribute, you'll note that you may remember it was an enclosure. I can show you.\u003C/p>\u003Cp>It was an enclosure tag with an attribute of URL. And somehow I had to map that to a JSON object. So the chosen method was to make it an object and the attributes are just underscored. I think that's valid. So now we wanna dig in and actually get that data.\u003C/p>\u003Cp>We wanna get that URL. So we will create a new we will create a new, step here. And this one, I will call, latest, I guess. Latest because we just wanna get the latest episode. This has all the episodes.\u003C/p>\u003Cp>And we're gonna just run some JavaScript in here. Now the this, kind of, boilerplate here, is it is the zoom level okay?\u003C/p>\u003Cp>Speaker 1: Yeah. It looks okay.\u003C/p>\u003Cp>Speaker 0: Yeah. Cool. Have this data property. And data is a big object and properties in that object include the keys of all of the steps. So I can get the I can go and get the feed step by, you know, going data dot feed, and that's that whole object that was returned.\u003C/p>\u003Cp>So if you name the keys, you can more easily pick specific values from all the way up what we call the data chain, and every operation adds a new object to the data chain. So we have data dot feed here. Now I happen to know because I didn't wanna I didn't wanna sync too much time here. I know where the value of the URL is. It's in dot RSS dot channel dot items dot item That's an array and we want the first item.\u003C/p>\u003Cp>That's the value of the episode. Suppose we'll just store that. And now that that episode had a ton of data, how long is it? When was it published? What's the description?\u003C/p>\u003Cp>What's the title? What's the cover art? The m p 3, obviously, and a whole bunch of additional metadata. It was huge. It was a really, really big object, actually.\u003C/p>\u003Cp>The ID, the pub date, the link to the, like, web website, the description formatted, the URL, and data about the URL. Some data specifically for iTunes, the author, iTunes summary so much so much. But actually not so much. That's the end of it. I reached the end, but significant.\u003C/p>\u003Cp>We don't need all of it. We only need some of it. So we're gonna just stop pulling out some values. So what we'll do is we'll grab the date That feels like a viable thing to to store. We'll turn that into a JavaScript date.\u003C/p>\u003Cp>What was it called? Pub date. Pub date. And I know that we want it in an just in a an ISO string. So that kind of standardizes it.\u003C/p>\u003Cp>So I don't think it comes in an ISO string. No. It comes in whatever this archaic thing is. That's the date. We want the title that also fills the digits at episode dot title.\u003C/p>\u003Cp>We could grab the description. There are a few variants of this description. Taking a look. Let's take a look what's the difference. This one has HTML tags, p and 2 break tags.\u003C/p>\u003Cp>This one does not. So this is the one we want here. The Itunes summary. Itunes colon summary, which means we have to use this syntax to dig in there. And finally, the actual URL, of course, episode dot enclo enclosure_url because it was it was a an attribute.\u003C/p>\u003Cp>Okay. Looks legit. Save that. Let's run it again. Nothing.\u003C/p>\u003Cp>Great. That's not what we want. What happened here? To ISO string is not a function. Oh, because it said to ISO sting.\u003C/p>\u003Cp>That's a typo. Ring. There it is. The date, the description, the title, the URL. Cool.\u003C/p>\u003Cp>Yeah. It's a pretty nice little automation builder here. Now we have the URL. I mean, strictly speaking, we didn't need that step. Right?\u003C/p>\u003Cp>We could just crack on, but I like just reducing down that complex data structure into something quite known. So that we called this latest. We'll need that in this next step, which is actually gonna be the AI transcription operation that, that we built and released. So there are some options here. The first thing we need is a Deepgram API key, which you can get from your Deepgram dashboard.\u003C/p>\u003Cp>We'll do that together in a moment. You need a full file URL, which we have. It's the it's the m p 3. You can provide a callback URL optionally and then sort of flip over into callback mode, which again stops long hanging, you know, connections, but this will be fine for this. We allow you to enable diarization, which do you know why it's called diarization?\u003C/p>\u003Cp>This isn't leaving. I don't know the answer.\u003C/p>\u003Cp>Speaker 1: Yeah. It could be called speaker identification as well, but, yeah, I think the research term first is a diarization. So it basically tells you who's speaking when you have a mono channel, and multiple speakers. If you have multichannel audio, you you don't really need to diarize, because you know each speaker's on a different channel. But, yeah, a lot a lot of people have a single channel, especially with a podcast.\u003C/p>\u003Cp>It's it's not multichannel.\u003C/p>\u003Cp>Speaker 0: Yes. And, thank you, Ramsey. I'm glad I caught it really quickly, but, yes, there was a missing r in ISO string. So you can optionally enable diarization, and then you can also add keywords. Talk to us about keywords.\u003C/p>\u003Cp>Someone who works for a word that sounds like directors, I'm very, very intimately familiar with this.\u003C/p>\u003Cp>Speaker 1: Yeah. So so keywords allows you to kind of increase the probability that we would, you know, pick up the rectus and direct us. Right? You know, as a single word versus, like, direct us. Right?\u003C/p>\u003Cp>So if you put in that keyword with the spelling and then you increase the intensifier, and the intensity is actually it's a exponential scale. So as you go up higher, it gets extremely strong. Yeah, value of 1 or 2 is is pretty normal. If you were to put in a value of a 1,000, nearly every word will start turning into direct us. But that kinda gives you an idea of how you can leverage that feature.\u003C/p>\u003Cp>Speaker 0: Yeah. Interestingly, it's not direct to us. It's always directors. Like, I am the director of the film. That's always like if when it's wrong, that's how it gets it wrong.\u003C/p>\u003Cp>We don't need to use keywords for this. So first thing we'll need is a Deepgram API key. Here is our director. Here is our Deepgram console. Signed up for an account.\u003C/p>\u003Cp>And you can go make a new API key. You can give it a nice name here so we can call this leap week work shop, workshop. You can optionally set an expiration. I will do that. I will expire this after 1 hour.\u003C/p>\u003Cp>Right? Because I don't we won't be going for more than an hour, and then this key will just stop working. You can also, add some tags, but this is the thing that's interesting. You can change the permissions of the key, which is nice. Yeah.\u003C/p>\u003Cp>Do you have any notes about this or just yeah. You can do that.\u003C/p>\u003Cp>Speaker 1: Yeah. Like, if if you have certain needs, right, sometimes you might wanna generate keys, like, more API keys with an API key.\u003C/p>\u003Cp>Speaker 0: Build an admin. Like, if you're if you're creating this as a service, for example, you're using Deepgram in, like, yeah. Cool. That makes sense. You get an API key, which probably shouldn't share, but mine is in an hour and a half, and it has a fixed amount of credit and no credit card.\u003C/p>\u003Cp>So the US, we'll pop the Deepgram API key in there. Next thing we want is the file URL. You can add dynamic values using mustache impacts, double squigglies on each side. The last step was called latest and the value was URL. So that will resolve to the full URL that was inside of that enclosure.\u003C/p>\u003Cp>And I think we'll leave everything else. I think that's that's the shortest version. I'll call this transcription transcription. Sure. Hit save.\u003C/p>\u003Cp>No. Let's try it out. So now it's taking a little bit longer because it's not just making one HTTP request. We are waiting for for it to happen. Now by default, I happen to know because I built this extension.\u003C/p>\u003Cp>We do turn on a couple of features. So I'll wait for this to finish and then we'll talk about those features. Maybe taking a hot minute there. Has it? Oh, oh, there we are.\u003C/p>\u003Cp>There we are. Boom. Look at that. Huge. Right.\u003C/p>\u003Cp>Before we look at the data structure that comes back, I will tell you that we are using smart format and we are using the Nova 2 model. So maybe let's talk briefly about each. Should we start with the model?\u003C/p>\u003Cp>Speaker 1: Mhmm. Yeah. So so the base model is our oldest model. So that was, you know, from kinda 2020 18, 2019 era. It's an extremely performant model, but the accuracy is is a lot lower.\u003C/p>\u003Cp>Some customers still opt to use it because it is just so compute efficient. And then we have our enhanced model, which, you know, added a bit more compute to it. But, yeah, our nova 2 model is the most accurate model that we have, and it's, yeah, available now in 36 languages, and we're we're adding more languages every month.\u003C/p>\u003Cp>Speaker 0: Nice. And that is what we're using here in this, operation. And then what smart format do? I think smart format basically checks a bunch of other boxes for us.\u003C/p>\u003Cp>Speaker 1: Yeah. So smart formatting, is actually baked into the model. So the model itself when it's transcribing is is generating the formatting. If you turn that off and you enable, like, punctuation and and numerals and things like that, that will apply post processing formatting, and which tends to lose a little bit of the, the context. Because, you know, some like, the number one isn't always meant to be a number.\u003C/p>\u003Cp>Right? Like, if if I you know, I am the the one and only. You don't want the digit to come in there. Right? So that that's essentially what that's there for.\u003C/p>\u003Cp>Speaker 0: Fascinating. So we applied smart formats. So we make that we make that available. So you don't have the option to turn those off or change them. That's just what you get with this, with this extension.\u003C/p>\u003Cp>Okay. Let's look at what came back there. Big old payload. Now, just because this is a slightly I've gone into the big data structure that Deepgram returns, which, Damian, you've probably spotted that immediately. This is the first alternative is always returned.\u003C/p>\u003Cp>So I can just speed speed up our I can speed run us here. So the first thing is this transcript, which yeah? Like you said, it's nice. It's formatted. Interest I didn't know it was baked into the model and that it's not post processing and that's the difference.\u003C/p>\u003Cp>I thought it was just a shortcut to checking a few other boxes, but it isn't. It actually does something different.\u003C/p>\u003Cp>Speaker 1: Yeah. And some customers will want digits but not punctuation or punctuation and not digits. So having them split out as well allows them to pick and choose between the the features.\u003C/p>\u003Cp>Speaker 0: Right. So we have reached the point where this is to Deepgram directs us. This is too big for me to just scroll through and and talk about. So what I'll do is I'll just look at the docs for this specific extension, and we can talk about about it. So this was the AI transcription operation.\u003C/p>\u003Cp>This is the data structure that's returned if it was a really short transcript. So we have the transcript. We saw that. We didn't actually manage to scroll to the end of it. Can you talk to us about the other objects that are all the other, properties that are returned?\u003C/p>\u003Cp>Speaker 1: Yeah. So the words array is gonna give you the start and end times of each of the words, also the confidence that we have for that word. Like, if you detect a very low confidence word, you know, some people will actually choose to omit it. Right? It could have just been, you know, picked up from a cough or something like that.\u003C/p>\u003Cp>And, yeah, if it if it's down at, like, 5%, it's usually, probably gonna be wrong. Right? But for the most part, you'll see confidences in the high nineties. We also have the punctuated words, so, you know, you you'll get the word as it was, you know, printed out, without any punctuation or formatting.\u003C/p>\u003Cp>Speaker 0: What about that?\u003C/p>\u003Cp>Speaker 1: And then what what we apply to\u003C/p>\u003Cp>Speaker 0: They're not the same words. Like, it's a typo. Oh, it's a typo in my readme. It's a typo in my readme. Ignore me.\u003C/p>\u003Cp>I'll go and fix that another time.\u003C/p>\u003Cp>Speaker 1: Yeah. You would have seen lowercase h I and no no full stop in that case. Yeah.\u003C/p>\u003Cp>Speaker 0: And then there's also paragraphs, which is which is also interesting. Mhmm.\u003C/p>\u003Cp>Speaker 1: Yeah. So we can we can split it up, by paragraphs. If you enable diarization, we'll also split it up by, you know, who who said what as they said. Cool. And you can do utterances as well.\u003C/p>\u003Cp>So that will give you kind of logical semantic breaks in in speech as well. Yeah. And if you were to enable diarization in each word, you would get a speaker ID as well. So you would have, like, speaker 0, speaker 1, and, you know, whoever spoke the word.\u003C/p>\u003Cp>Speaker 0: Yeah. Lovely. And we see here the transcript is there. It's formatted, but it adds these line breaks in. So, you know, you can kind of print that.\u003C/p>\u003Cp>We get paragraphs. We get sentences. We get start ends for all of them. So It's really nice and flexible. Yeah.\u003C/p>\u003Cp>I see sentences could work quite nicely for, putting captions on a screen, like a sentence at a time or something like that. Okay. So that's the data that comes back from that. I think for this, all we really care about is this top level transcript, But the rest of it does exist. Now just a reminder, you can do audio intelligence within that single request if you're using the Deepgram API or SDK, but we've chosen to split them into 2 distinct operations so you can just have what you need, and each one can be a little more simple rather than being a kitchen sink of options.\u003C/p>\u003Cp>So let's crack on then. Let's go ahead and add the text intelligence right here. So I'll call this analyze, I think. Once again, Deepgram API key. I think I, I can't see it again.\u003C/p>\u003Cp>Oh, wait. The it's in the it's in the last one. We'll do that. We'll grab it out of here. There it is.\u003C/p>\u003Cp>There's the raw value, which again will expire by the time this is over. Right. AI text intelligence and lies. Deepgram API key and the text is going to be transcription was the name of that was the key of the last step dot transcript. Now this is a point to and it will be the last operation.\u003C/p>\u003Cp>I hate this. As a as a educators, I like, you know, lead educator. I think this lets you foot gum. If you start rewiring your operations, this value is not always the same. That's why I'm personally a big fan of explicit naming of keys and explicit inclusion of keys.\u003C/p>\u003Cp>But key sorry. Last always exists. Another one that exists is the trigger, which would give you data from that, from that very first step. So it's just a couple of conveniences there. But we will make this try and go ahead.\u003C/p>\u003Cp>Speaker 1: Is there any way to see all the available, step values or objects or explore them?\u003C/p>\u003Cp>Speaker 0: Yes. There's a number of ways that debugging flows is an area we know needs improvement. I'm gonna just save this. You can take a look over here. Right?\u003C/p>\u003Cp>And you can look through the logs and you can go, right, well, this was for each step, but and it was called latest, but you don't have the key immediately available. You can simply just log them. We have a logging step, which will add an extra operation. You can also just return it in the last step and it will return here. Or rather, I think when you configure the trigger, let's take a look, you can get all data back and that will return the entire object.\u003C/p>\u003Cp>So you have options, but no, there isn't a really nice way to do this right now.\u003C/p>\u003Cp>Speaker 1: It might be a cool addition. I know when I when I use, like, email, syntax injection, there's, like, a little list that lets me pick from them.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. No. That that makes no sense. And, this was actually the topic of back to Directus TV we go.\u003C/p>\u003Cp>Of one of our recent request reviews. What was it? It was the improvements to flows debugging. So we spoke about it for a whole hour, with our community around what they'd like to see based on an open feature request. So maybe that's something we'll see in the not so not too distant future.\u003C/p>\u003Cp>Alright. Let's try this. Now we are expecting to wait a moment for this because it's going to transcribe the whole hour, then it's gonna run the text intelligence. So I'll kick it off, but I am expecting to to wait.\u003C/p>\u003Cp>Speaker 1: And then are are flows always triggered from an API request, or is there a way\u003C/p>\u003Cp>Speaker 0: to There was there was 5 different triggers. So now we're a little bit deeper. I'll do this again because I think you're building more context around this. So the first is an event hook. So you can say, hey.\u003C/p>\u003Cp>Whenever an item is created in this specific collection or these collections, trigger the flow. So you can do event based hooks. You can either do it before the database transaction occurs so you can validate, manipulate the inbound data before it gets committed or perhaps stop it in its tracks, right, and fail out if something that isn't correct, or you can do it after the data has been committed. So that's the event hook. We have the web hook, which we are using for this just for speedy rerunning.\u003C/p>\u003Cp>We can run it on a schedule, so you can provide the 6 point cron syntax here and run it up to every minute. You can trigger it based on another flow. So one of the, operations in the list was to run another flow. You can put data in, and it will return data out so you can modularize your your flows a little. And then finally, manual.\u003C/p>\u003Cp>And I think the easiest way to look at manual is probably just a quick trip to the docs. The manual flow trigger, you pick a set of collections and it adds this button over here to the sidebar. So this sidebar, it requires you to check 1 or more you 1 or more items and hits and hit the button, or you can do it from within an individual record, an individual item. And it will send the IDs of those items into the flow as part of the trigger. That did take a little while there.\u003C/p>\u003Cp>You can additionally add this confirmation dialogue and collect per invocation values. So this could be useful for things like sending an email. Right? So you type in a message, you hit go, you've maybe picked some users or send a text message with Twilio, press a button, and off it goes. So they're the 5 ways to trigger, to trigger your flow.\u003C/p>\u003Cp>But we're just using the webhook so I can just run it just by hitting up and enter here. Mhmm. Let's, see what that big object look like. We have the summary, which is nice. It's a nice length for an hour's worth of footage.\u003C/p>\u003Cp>Can you talk to us about the rest? That's the summary, but we've got more.\u003C/p>\u003Cp>Speaker 1: Yeah. So the topics, it's it's got a a lot of predetermined topics that the model's capable of picking up. You also have the option to pass custom topics. So if you have a topic that's kind of nuanced, very unusual, you can add that in as well. And that's gonna figure out, like, okay.\u003C/p>\u003Cp>Whereabouts in, you know, this transcript, right, based on, the text in in this case. And whereabouts was it talking about WikiLeaks or fake off or scammer or spyware? And and that's really useful because now you have the ability to actually jump to that position. Right? So you could imagine if you wanted to find, you know, the area that was talking about, WikiLeaks, you could just click a button, and it would jump you to that segment in in the actual transcript.\u003C/p>\u003Cp>Speaker 0: Yeah. Exactly. You could build out a search, You're not searching just the raw transcripts. You're you're searching for topics because that's more realistic to people's usage. That's really cool.\u003C/p>\u003Cp>This again is quite long. So let's find our way to the to the example I I have written up here. So, yes, we get the topics on a per segment basis. You get the intents. Let's talk about intents.\u003C/p>\u003Cp>And can we talk a bit about how intents are different to topics? Because I'm a little fuzzy on it.\u003C/p>\u003Cp>Speaker 1: Yeah. So so topics can can be all sorts of things. You're probably gonna have, you know, say, 10 x topics versus intents. An intent is really like if if I'm making a phone call and I want to cancel my plan, you know, or update my address, right, that might be one thing. But I may go off on a tangent and start talking about my holiday in Spain and do you know what I mean?\u003C/p>\u003Cp>And that that could be a topic, but the intent of the call really was to, you know, achieve something. And the same can be said about, you know, a video, a podcast. And so, yeah, I'm I'm interested about the intents that actually brought back for that podcast.\u003C/p>\u003Cp>Speaker 0: Let's take a look. Might have to yeah. I might have to do the mother of all scrolls here.\u003C/p>\u003Cp>Speaker 1: You could try a control f me.\u003C/p>\u003Cp>Speaker 0: Yeah. I could. Yeah. You think I'd know that using a computer every day. What do we call it?\u003C/p>\u003Cp>Intent. Thank you for that. Wow. Okay. Yes.\u003C/p>\u003Cp>So Explore Samsung Smart TV features. That's funny. Though they were talking about because I just listened to this the other day. Basically, Samsung TVs have this feature built in where you could put it in, like, low low power mode where, like, it looks off, but it's not. And so the yeah.\u003C/p>\u003Cp>If you push mute 182 and then power the TV appears to be off, but it isn't. And then if you basically run spyware on it and then put it in that mode, no one knows. So instead of needing to plant bugs, you could actually just use the Samsung Smart TV, which will record to the TV, and then you just go by and retrieve it later.\u003C/p>\u003Cp>Speaker 1: And you can see how useful this intent is. Right? Like, straight away, it got us to something, you know, very interesting. Right?\u003C/p>\u003Cp>Speaker 0: Yeah.\u003C/p>\u003Cp>Speaker 1: Yeah. So there there's definitely gonna be intense. Yeah.\u003C/p>\u003Cp>Speaker 0: Yeah. That's where discuss Anum's features. That is what happened. Interesting. Okay.\u003C/p>\u003Cp>It might make a little less sense here, but in a call center context in particular Yeah. So with sentiment, and and it's pretty cool. I don't know if you're\u003C/p>\u003Cp>Speaker 1: Yeah. So with sentiment, and and it's pretty cool. I don't I don't know if you have the playground up. There's a good visualization of the sentiment in there.\u003C/p>\u003Cp>Speaker 0: Yes.\u003C/p>\u003Cp>Speaker 1: Yeah. So if you scroll up and then you see just at the very top Per month. Okay. Next to summary. Yeah.\u003C/p>\u003Cp>Sentiment. So so you can track the sentiment over time. Right? Because we're giving you, like, you know, sentiment, you know, at each sentence or utterance. And if you scroll down, you can see as the sentiment changes, you know, it goes to see negative negative negative.\u003C/p>\u003Cp>So that kinda gives you an idea of, you know, what's happening throughout, you know, the show or or the phone call.\u003C/p>\u003Cp>Speaker 0: Got it. Got it. But you might only need to know the average. So I think if memory serves me right, there is also I think it is literally called average. Mhmm.\u003C/p>\u003Cp>Yeah. An average sentiment as well.\u003C/p>\u003Cp>Speaker 1: Yeah. And and the average is gonna tend towards neutral. Right? Because, you know, the vast majority of of text is is kind of neutral. Right?\u003C/p>\u003Cp>It's only it's only gonna be parts of the call go negative. So, like, if if you see there's I don't know if you can search for positive, see how many results you get.\u003C/p>\u003Cp>Speaker 0: And so\u003C/p>\u003Cp>Speaker 1: I'll say 10 and then negative.\u003C/p>\u003Cp>Speaker 0: 69.\u003C/p>\u003Cp>Speaker 1: Yeah. And then neutral. 81. Okay. So so so it looks like it it was kind of 5050 on the on the neutral and negative.\u003C/p>\u003Cp>Yeah. Just enough to bring it kind of back to that new Yeah.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Well, it's just gonna average the sentiment scores, which are between minus 1 and 1, I'm guessing, given that this is minus 0.0.15. Mhmm. Okay.\u003C/p>\u003Cp>So we now have a summary, and now it's time to go ahead. And, and that summary was held in the output of that. I think it's called summary dot text. Summary was an object. And now it's time to use, the text to speech, APIs.\u003C/p>\u003Cp>And to do that, we are going to build an extension, which I'm really excited about. Now for those watching, this isn't intended to be a play along, so I'm gonna go a little bit faster than I would running a hands on workshop because this is gonna be available tomorrow on director's TV. I'm also gonna turn this into a blog post sometime in the next couple of weeks so you can follow step by step in written form if that's more your thing. So, we're gonna go into our extensions folder here and MPX create direct us extent extend. This always gives me extension Extension.\u003C/p>\u003Cp>Direct. Yeah. Sure. Let wait for the latest version, please. And here are all of the extension types.\u003C/p>\u003Cp>You can create custom panels for direct us insights, the dashboard builder, custom interfaces, which are form inputs for the editor, but we are going to create an operation for flows. And I will call this Deepgram TTS, text to speech. I'll just write it in JavaScript and auto install dependencies. And given the speed of other things that have happened on my system during this session, I think we'll just be waiting a hot minute for that. But what we're gonna do here is we're gonna set up this operation, and we are going to use Deepgram's JavaScript SDK, which makes interacting with Aura, the text to speech service, Just a lot just a lot easier.\u003C/p>\u003Cp>So while that's scaffolding, let's, oh, it did it. It did it. So I next time next time we wait, we'll take a look at Aura. So we're gonna jump we're gonna jump in here. Right.\u003C/p>\u003Cp>Let's take a look inside of our new Deepgram TTS extension at the code. There are 2 files that matter. The first is app dotjs, and this describes all of the configuration. So this, says what is shown on the card here on this kind of overview and what options are presented here and then fed into the into the back end. So the API key and the text and stuff like that.\u003C/p>\u003Cp>The, App. Js, yeah, will also do things like what icon is shown here, what text and description, stuff like that. And then there is the API JS which runs server side and actually executes executes the, you know, the the operation, then it will be here where we install and use the SDK. So let's\u003C/p>\u003Cp>Speaker 1: that you can that you can build the UI through that code and do all back end process. Lot of other ideas kinda come into mind now that I see it.\u003C/p>\u003Cp>Speaker 0: Yeah. And the on a lot with breakfast in multiple ways. You know? Yesterday, we built out actually, not for those watching on demand. Sorry for kind of crossing streams.\u003C/p>\u003Cp>So you probably, you you may have already seen this. If we take a look at Directus and just take a very quick look at yesterday's workshop, another thing you may not consider is I'm just mute that. You may not have considered is, you know, we have this dashboard builder, and you'll be thinking, oh, okay. You know, it's all out displaying displaying insights. You know?\u003C/p>\u003Cp>That could be useful or whatever. But what? Look at that, look at that quality there. I'll click back over here in a minute. Maybe just here.\u003C/p>\u003Cp>Here. But this panel, you pick a user from a drop down and hit call, and it would use the Twilio voice SDK to actually do a two way phone call from your browser to the target to the user's phone number. So, yeah, really, really flexible. You can very much build a lot in it. Anyway, right.\u003C/p>\u003Cp>So we're gonna create a custom operation. So first thing we're gonna do here is we are going to change the ID. The ID has to be unique across all operations in your project. So it's quite typical that, you know, people will prefix the name of their extension with their author name. I'm just gonna call this one Deepgram TTS because I doubt there will be another one called that.\u003C/p>\u003Cp>And that has to be the same in both files. So also here, the ID. We'll call this 1 Deepgram TTS. What are we gonna do for icon? We will use record voice over.\u003C/p>\u003Cp>I think it's what the one that I've used in the past for, Deepgram. And then for description, generate text to speech. Well, we don't need to save too much time insights. Just a a little visual thing. Now what we need to do is we're gonna pop in some text.\u003C/p>\u003Cp>So actually, I think we'll just leave that as it is, but we are also going to pick the model. So let's actually take this moment to pause. Could you talk to us about the models in Aura?\u003C/p>\u003Cp>Speaker 1: Yeah. In the playground that you you should be able to access, and we we literally just added it the other day. So on the top at the right hand side the very top right hand.\u003C/p>\u003Cp>Speaker 0: Oh, text to speech. I didn't see that then. Oh, perfect. Yes. Yeah.\u003C/p>\u003Cp>Speaker 1: So you can type in any text you want here, and it will generate it. Yeah. And you can just hit play on one of the voices. Angus at the very top, actually, is my voice. So, yeah, if you ever wanna\u003C/p>\u003Cp>Speaker 0: Actually, I listened to it yesterday. That's so funny. So we have 2 here. You know what? Let's just for sake of argument, we'll just pick the top 2, Angus and Arcus.\u003C/p>\u003Cp>But they each have this model name, Aura Angus e n, and AURA ARCUS en. So we're going to provide a way to do a drop down and just pick between them. And in theory, you would populate as many as you wish, or you would take away the choice and just pick 1 and not provide this option. But we can do that. So we have this text box in the option.\u003C/p>\u003Cp>Let's, go and create a new one. So we will do field. This is what you name it. So I'll name it model. Right?\u003C/p>\u003Cp>That's like the key that we saw. We're gonna give it a visual name. So we'll capitalize that. That is ultimately just going to store a string. And then we get to provide some information in here.\u003C/p>\u003Cp>First thing we'll do is just the the width will make it full, which just means I'll go under it. So you can make them half, but whatever. But the more important thing here is the interface or the form input where you can create custom interfaces as we spoke about earlier. And the one we want is called select drop down like so. This interface has some options.\u003C/p>\u003Cp>As you would probably expect, it's the choices. And each choice has a text, and that has a value. And, like I said, we will do 2. So the text for the first one is Angus, and we can see here, Aura Angus e n. Is that what it is?\u003C/p>\u003Cp>Yeah. Aura Angus e n, and Arcus was the second one. Aura, Arcus, e n. Nice. Now the only other thing we'll do here is we'll just show it on that card.\u003C/p>\u003Cp>This is optional. This is just this is just, you know, UI further to a to a degree, but we will and sorry. It's over here in the in the overview. So we'll also bring in the model, and we will show that on the card as well. You'll see what that does in just a moment.\u003C/p>\u003Cp>Speaker 1: And what would be the default if that wasn't populated? Or is it just always It would just\u003C/p>\u003Cp>Speaker 0: be an empty card. It would just be an Oh, it would be an empty card. Just like this.\u003C/p>\u003Cp>Speaker 1: If the if the model wasn't selected.\u003C/p>\u003Cp>Speaker 0: Good question. I think it might default to the first. Did I? You could probably set a default or handle the default over on the server side. If not selected, pick this.\u003C/p>\u003Cp>I think kinda similar approach to most drop downs that you could build. So let's let's run this. Let's go npm run dev. And that's going to build our extension, watch for changes, and rebuild it whenever there's a change. Over in our first terminal here, we see here extensions reloaded.\u003C/p>\u003Cp>If I hit save, it will rebuild the extension. Directors will see that I've rebuilt the that I've that the that the extension has been rebuilt. Will it?\u003C/p>\u003Cp>Speaker 1: You might need to make a change rather than\u003C/p>\u003Cp>Speaker 0: Yeah. That's what I was that's what I was waiting on. Interesting. I might just quickly restart it and see if it needed just a one time restart. And if that continues, then whatever will might just have to kick it up the bum.\u003C/p>\u003Cp>So that's rerunning now. So I'll just save that. There you go. Extensions reloaded. Okay.\u003C/p>\u003Cp>Just needed one quick one quick kick up the bum. So let's, let's see what happens now. So we will add to this new extension on the end. There it is. Deepgram TTS.\u003C/p>\u003Cp>There's the icon we pick. That's the text. That's the title. We pass in the text, which we know is annual annualize dot summary Mhmm. Dot x, I think.\u003C/p>\u003Cp>So analyze dot summary.text. Sure. And we pick the model, and there they are. So we'll pick your voice. That's quite funny.\u003C/p>\u003Cp>I didn't know. I didn't know that. That's pretty cool. And we'll hit save. So and you see there the model is shown on the front, and that's the text input that we put in.\u003C/p>\u003Cp>So we hit save. When this, operation runs, the API side will run. So the first thing we'll do is we will go ahead and, so go ahead and just pull the model in as well. So that'll just be the the e n, you know, Angus or aura Angus e n. Let's let's do this.\u003C/p>\u003Cp>I'm really excited, actually. Right. We are gonna use the Deepgram SDK. So npm install at deepgram/sdk. Good.\u003C/p>\u003Cp>Good. Good. And we'll go ahead and, initialize this. It's funny when I was a developer advocate at Deepgram, I did this all the time. So import, create, client from not that, from Deepgram SDK, and then you create an instance, Deepgram equals create client, and the API key have to go in here.\u003C/p>\u003Cp>Yeah. Obviously, I Eric, why did I even bother hitting save? We need to pass in the API key here. We We don't really wanna hard code it in our extension. So instead, what we're gonna do is we're gonna add it here to our Docker compose file, which will bring it into the environment variables.\u003C/p>\u003Cp>So mostly because I've already forgotten it, let's grab that key again. Let's pop it in here. We'll call it deep oh, Deepgram API key. There it is. We do need to restart our Docker container whenever we update the Docker Compose file because it just reads that once it load.\u003C/p>\u003Cp>And straightaway in here, process dotm.deepgramapi key. One moment. These are fine. These are not errors. These are little warnings, not a problem.\u003C/p>\u003Cp>It's just, some of some of the way that the, yeah, some of the the build of the Deepgram SDK, but it's not a bother at all. They are just warnings. And now it's time to actually build the hands, build out the the handler. So, what happens? We press the button, it goes in.\u003C/p>\u003Cp>Now what we're gonna want to do here is ultimately we want to save a file to our director's project. And we expose a bunch of services to your project, which you can use to interact directly with these kind of directors primitives. Now, the first thing we wanna do here is we wanna go ahead and just add in here a second variable called context. And inside of here, con, const. There we go.\u003C/p>\u003Cp>Const. We want services and we also want get schema, which we'll need to, initialize the service. Services is a list of all the services. The user service, the item service, the permission service, the role service. We only care about the files service.\u003C/p>\u003Cp>So we'll just pull that out just just to make it easier. And then we'll go ahead and we will initialize a files service. A new file service. And in here, you have to pass in the schema of your project, and that thankfully is just returned in get schema. And I did just catch in the little tool tip there that that needs to be, awaited, and therefore, this needs to be async.\u003C/p>\u003Cp>Not there. That's an object. So that's us creating the file service. That means we now have an interface with which we can create a file in just a moment. Next, we're gonna go ahead and use the Deepgram SDK to generate a a stream of audio, and this was lovely by the way.\u003C/p>\u003Cp>I was speaking to to Luke, one of the DX engineers at at Deepgram recently about this. And the fact that this SDK uses the native interface makes this next bit really, really nice. So what we wanna do here is create I'll call it response for now, I guess. Or e g response maybe. Deepgram response.\u003C/p>\u003Cp>What we can do here is just use the initialized client here with our environment variable dot speak dot request. First first argument here is the source, so we can just pass in the text. So you would yeah. This is how you do it, but a shorthand because the name of the of the, property and the value is the same, you can just shorthand it there. And secondly, any options that we want to use.\u003C/p>\u003Cp>I might just create options as its own just to keep it really clear. I might just do this above here and then feed it in. So what do we wanna do here? I think all we really wanna do is we wanna pick the model. The same thing, we can use the shorthand.\u003C/p>\u003Cp>So that'll be, Angus. And we're going to tell it what file format we want it to return. If memory serves me right, you can return quite a lot of audio formats from Deepgram. Right?\u003C/p>\u003Cp>Speaker 1: Yeah. We we support quite a few different formats. Yeah. If you wanna play it back, typically m p 3 or WAV, if you wanna stream it, like, to Twilio and things like that, you'd probably do, like, raw audio linear 16 or Yeah.\u003C/p>\u003Cp>Speaker 0: That's cool. We'll do this because in just a moment, we are gonna need to know the file format. So I want to explicitly ask it for a files format. So we know with confidence it's gonna be an MP 3. Then finally oh, hang on.\u003C/p>\u003Cp>Let's pass this in. DG options. There we go. And finally is the file stream. I might call it dgstream, just again to be very explicit.\u003C/p>\u003Cp>Now gonna await response dot get stream Stream. That's it. That is a DG response. That is now just a live stream of audio, which is fantastic. Because it is a file stream, we can pass it straight into the file service to upload it.\u003C/p>\u003Cp>Now before we continue, we are also going to need a file name. Right? We we need to tell direct us when we create a file, what we want the file name to be at least as when you download it. And I think what we might wanna do is actually collect that from the user upfront. Like, hey.\u003C/p>\u003Cp>What filename do you want this to be? So let's go through the motions here of adding a new item here and just a new text box. Right? And we'll call this one file name, I guess. Field name type.\u003C/p>\u003Cp>I think that's all we need. File name. I And you\u003C/p>\u003Cp>Speaker 1: need to change the name. Right?\u003C/p>\u003Cp>Speaker 0: Nope. Oh, yeah. Yeah. Yeah. Sure.\u003C/p>\u003Cp>But I mean, that's that's that will stop us getting confused, but strictly speaking, you you don't need to. So we have the file name. Great. We're not gonna bother showing it on the overview, so it can just stay here. And then over on the API side, we just wanna pull that pull that in here.\u003C/p>\u003Cp>File name. Yeah. Cool. And just to make this consistent, I might just move this, and we can call this, like, request or something like that. Just, again, kind of handle these the same way.\u003C/p>\u003Cp>So now we have the file name that we'll pass in. Let's save this. Let's, just refresh this. Make sure all of that works. And let the white screen stay for just a moment longer than I would have liked it to.\u003C/p>\u003Cp>Look, there's a problem here, which tells me something went wrong. But I don't know what. Look. They've all gone a bit they've all gone a bit funny, which means I have caused a problem. Love that for me.\u003C/p>\u003Cp>Might just zoom out one step for the sake of scanning a larger surface of code file name. I could have broken at any of those points. We haven't refreshed this in a while. Thought there. And these are, let let me scan this because they were just, these are just warnings there.\u003C/p>\u003Cp>Oh, no. No. No. That's still the warning. Interesting.\u003C/p>\u003Cp>Interesting. Let's look over here again. We passed in the the create client. Let's just save ourselves just a moment of effort and just figure out if it's in here, first of all. It was.\u003C/p>\u003Cp>Okay. That at least helped somewhat. Let's. Was it maybe the fact that you I've called it request? It's request meant no.\u003C/p>\u003Cp>That's fine. Okay. I mean, this is a top tier debugging. This. Okay.\u003C/p>\u003Cp>It's something down here. Okay. I mean, okay. WAVs. Yeah.\u003C/p>\u003Cp>So there's nothing wrong. Yeah. Let me just make sure the extension reloads. Fine. No bloody problem.\u003C/p>\u003Cp>Nothing happened here. Right. Let's open this one up, and now there's a file name. Right? So I suppose we can put in a dynamic value here and call it and, and call it latest, which was the latest episode dot I think we called it title.\u003C/p>\u003Cp>I'll save it, and then I'll just look back. Latest. Yeah. We called it title. That'll be the title.\u003C/p>\u003Cp>That's a dynamic value, which is nice. So that gets passed in as a file name. We get the stream, and, all we need to do now is upload it to directives. And the way we do that is let's do the same thing again. We'll call it, directives options.\u003C/p>\u003Cp>Just so again be really explicit. We need 3 things, I think, are mandatory here or it won't work. The first thing is the file name downloads. Whenever you download the file from director status, the file name that it will have, We've already established, so that's gonna be file name. Oh, dotmp 3, I suppose.\u003C/p>\u003Cp>I'm not sure if that's needed, but I'm gonna do it anyway. It needs a MIME type. We just call it type here. We already know that's an MP 3. I know the standard format for MIME types is this, audio slash.\u003C/p>\u003Cp>And again, that's why we specifically requested a file format. It doesn't really matter which one it was and where is it going. This matters because you can connect more than one asset storage to Directus. By default, it will use local storage, which is just this up which is just this kind of link to this uploads folder, but you can connect it to an s 3 bucket, and Azure Blob storage and and many more. So we're just telling it, hey.\u003C/p>\u003Cp>This is where we want storage. Should that\u003C/p>\u003Cp>Speaker 1: should that meme type be MPEG rather than MP 3?\u003C/p>\u003Cp>Speaker 0: No. I think it's this. I think it's I think if if I caught my mind back, I think that's right. Don't know. We'll find out in a couple of minutes.\u003C/p>\u003Cp>So that'll be, that'll be our first point of debugging. And then the final thing we'll do, although it is optional, is we will actually set the title of the new of the new file. So this is the, like, visual labeled title where the other one is what happens when you download it. That isn't strictly needed, but we'll we'll do that. Then what we'll do is we'll call it directors file, I guess.\u003C/p>\u003Cp>The new file is we will use our file service. We will upload 1, and the first value is going to be a stream. Unfortunately, Deepgram just returns a stream, so we can dump that in there. And the second one is the directors options. What is returned from upload 1 is the new primary key of the new file that's being created.\u003C/p>\u003Cp>So we will just return the rectus file. That's the whole thing. That's the whole operation. Before I good before I get all excited and describe everything that's happened, let's make sure it works because it it might not. Right.\u003C/p>\u003Cp>Nothing seems to have gone wrong here. We have a file name set, so let's trigger this one more time, and I am expecting to have to wait now because we are doing all of these steps back to back. I'm expecting to have to wait maybe 30 ish, 40 ish seconds. But we'll see. It's an hour of audio.\u003C/p>\u003Cp>So while that's happening, let's recap what's happening. We're going and grabbing the RSS feed and converting it to JSON with this custom extension you can download for free in the marketplace. We just, you know, traverse that that big objects that that's returned to just get the latest item. We didn't end up doing anything with the date, actually. Now I think about it or the description.\u003C/p>\u003Cp>We transcribe it using the AI transcription endpoint and then run text intelligence. Both of these are Deepgram to receive the summary. And then this custom built extension we just built does text to speech. You pass in some text, You pick a model from the drop down. We just pick the first two out the list, Arcus and Angus, and you give it the file name for the original file for the new file.\u003C/p>\u003Cp>And as you see here, what was returned was a string, which means there wasn't an error, which means there is our 42 second summary. Let me listen to your voice. I don't think this will come through. It it's you. It's so you.\u003C/p>\u003Cp>That's so funny. So do people do people at Deepgram just get proposition to, like, get their voices up?\u003C/p>\u003Cp>Speaker 1: 6 of the 12 voices are Deepgrammers, and then the 6 are voice actors.\u003C/p>\u003Cp>Speaker 0: It's really funny. But there's your little summary. There's your summary. That's sounds freaking cool. So let's talk about the code, and then we'll talk about what more you could do with this.\u003C/p>\u003Cp>Right? So we grab the data that was actually, you know, provided by the operation. We grab the file service and the get schema function, and we initialize a new file service that allows us to interact directly with the director's files collection. These three lines is all that's needed, and strictly speaking, you know, this object could go in here. So two lines is all that's needed to request a text to speech, a text to speech operation, I suppose, from Deepgram.\u003C/p>\u003Cp>That's all that's needed and it returns a stream of data. Then we, you know, configure all of our options for our new file we're about to create, and we upload the file by just providing the stream directly, providing the options, and that returns the primary key, which we return. That's the whole thing. Do you have any questions, in the chat? Who have been remarkably quiet, by the way.\u003C/p>\u003Cp>I hope you've enjoyed. Damien, do you have any questions, thoughts?\u003C/p>\u003Cp>Speaker 1: Yeah. I'm I'm just amazed at how easy that was. Like, less than 30 times code, and it's all hooked up and it works. And a lot a lot of that code is verbose code as well. Right?\u003C/p>\u003Cp>Like it's Absolutely. Trying to be expanded. So, absolutely really easy. And, yeah, being able to pass that stream straight into the file\u003C/p>\u003Cp>Speaker 0: was very useful. It was very, very useful indeed. So what more could you do with this? Well, the obvious kind of first step is that load more data is returned from Deepgram. So you can do more with that.\u003C/p>\u003Cp>You know, you why we could save the description directly to the file if we wanted. We can provide you know, we can maybe tag it with topics. We could do whatever whatever we want here. It's completely up to us. You could also run further automations either as part of the same flow or a separate flow.\u003C/p>\u003Cp>It's all good that, you know, a new podcast has been transcribed, but do we know? Maybe we send an alert. We send, you know, an email or a notification to the user, which if we take a look here, there is a send email operation right here. So you could tell them that there's a new summary and maybe directly link them to the director's files m p 3, Because everything in directors, if we take a look at this new file here, it has this ID, and you can just go to local host 8055/assets/that. And there is our m p 3.\u003C/p>\u003Cp>So you could link them directly to it if you fancied. Another thing is that, you know, this was a slightly conceited example in that we have to manually run it. But you could run a crop. You could use a chrome here. You could use a chrome, grab the feed and say, hey.\u003C/p>\u003Cp>Has there been anything new in the last 24 hours since I last ran? Okay. Now go and transcribe the latest episode. You know? So you could run this on a schedule and make it like a daily roll up of new shows, new episodes that you could listen to.\u003C/p>\u003Cp>Speaker 1: One other idea is, like, obviously, this was audio to begin with, so we we kinda compressed it, and we create a summary that became audio. But maybe there is, like, you know, a cool blog that you follow, but you may not have the chance to read the blog, but you'd like to listen to it. Right? You know, maybe in the car. So you could take a blog, turn it into a, like, you know, an audiobook, very easily, or you could even, you know, summarize it.\u003C/p>\u003Cp>Right? And and play it out. I had a pretty interesting idea of, like, a a real time radio station that's basically, you know, tailored to exactly what you like. Right? So you could have a, you know, maybe archive research papers being fed in, and then it's giving you kinda the updates in real time.\u003C/p>\u003Cp>Speaker 0: Oh, pretend I didn't see that. I remember I'm not sure if it will still be live. Yeah. Here it is. This is a post I wrote.\u003C/p>\u003Cp>The date's wrong because I I worked here at that point. But what this did, it used my JSON. It literally literally live transcribed a radio station. I could edit in BBC Radio 4. Mhmm.\u003C/p>\u003Cp>And it would live transcribe it, which was super cool. Super, super cool.\u003C/p>\u003Cp>Speaker 1: Yeah. So now you can even do the reverse.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Yeah. That's pretty, pretty cool. There's so much scope for this, you know, based on more complex triggers, you know, more logic in the middle.\u003C/p>\u003Cp>Like I said, you know, this could be a cron instead of a trigger. So many options. But I think that is just about our time, and we have 2 more minutes. So, yeah, thank you so much for indulging me and and and and getting involved in this and sharing your insights. I learned actually quite a lot, during that.\u003C/p>\u003Cp>Especially the smart format being part of the model itself. Fascinating. Not not what I thought.\u003C/p>\u003Cp>Speaker 1: Yeah. Thanks for having me. This this is super interesting, and I'm kind of amazed that you're able to build all this from scratch, you know, in in the length of time that we're chatting here. And, yeah, it really just shows what's possible with Directus. So I might be I might be building a few little, thing that flows with it myself.\u003C/p>\u003Cp>Speaker 0: That's how we get you. That's how we get you. And you can run it locally. Right? And it's the full fat thing.\u003C/p>\u003Cp>You know, it's not like a a less good version. Like, it is the full thing. It's what we host. I will say that I need to add it to this, I think it's still in a PR, actually. I don't think we merged it in yet.\u003C/p>\u003Cp>But the RSS to JSON operation, I will show it because it is also really, like, light. I just didn't wanna have to do it now because it kinda wasn't the point. We're taking the URL as you saw, and all we do here is that all the code for this operation is here. That's it. That's the whole thing, the whole operation.\u003C/p>\u003Cp>We import a library called XML parser. We go off and get the RSS feed. And assuming everything was good, we just pass it, the attribute name prefix underscore, and then we returned we returned the past data. So that whole operation, that first one was the code. We could have built it live.\u003C/p>\u003Cp>I just didn't think it was gonna be that interesting. Thank you so much folks in the chat, for your kind words. I'm glad we made your life easier. I like lots of claps. Yes.\u003C/p>\u003Cp>There are lots of use cases, both for Directus and Deepgram and the 2 together, and I completely echo Jonathan's sentiment. Welcome to the director's community. We're very happy to have you. Great. And with that, we are at time.\u003C/p>\u003Cp>So have a wonderful rest of your week, everyone. Have a wonderful rest of your, week, Damien. And tomorrow, just a reminder that there is one more event this week week and then there's this community networking social. It is using the one and only platform I have ever done networking on that doesn't absolutely suck. So if you're interested in meeting other people who are interested or use or involved in Directus in some way, shape, or form, drop by.\u003C/p>\u003Cp>It's at if you go to leapweek.dev, it will be, localized to your time zone. But here in Berlin, in Central European, it is at 4 PM. So, yeah, hopefully, we'll see you at that tomorrow. Damien, anything else you wanna share just before we hit end?\u003C/p>\u003Cp>Speaker 1: No. Thanks very much everybody for joining. And, yeah, really interesting possibilities. This is open all.\u003C/p>\u003Cp>Speaker 0: Excellent. Right. With that, have a good rest of your day, nerds. Bye for now.\u003C/p>\u003Cp>Speaker 1: Bye bye.\u003C/p>","Hello. Hello. Hello. Hello. Damien, you are still muted, but we are we are here. Hello. I'm Kevin. I'm Damien. Yeah. Nice to meet you. Yeah. For the next hour and a half, we're gonna be trying to get things to work maybe successfully. We'll see. We'll talk about the project in just a moment, but I actually thought some more thorough introductions might be in order. Damien, would you like to tell us who you are and who you work for? Yeah. I'm Damien Murphy, applied engineer here at Deepgram. So, you know, working with customers, building, you know, real time low latency voice spots and transcribing their audio. Yeah. Excellent. And I am very, very, very fond of Deepgram, so I'm really excited and thankful that you're joining us for the next little bit. My name's Kevin. I work on the director's core team, and in this workshop or rather, this workshop is part of, Leap Week. Hopefully, you are already aware, but Leap Week is our week of announcements where we announce new features and also run a series of other events to celebrate directors and our community. We're starting to near the end of the week now, but don't worry. There's still lots more to come. Tomorrow, we are doing a community networking social. And right now, right here, we're gonna be building some cool stuff with directors and Deepgram. Maybe if we take a moment to talk about the project, that'd be a cool way to to stop. So podcasts. I love podcasts. Podcasts are actually all standards. Podcasts are just an RSS feed that contains some metadata and links to episodes. And in this workshop, we're going to string together using Director's automate and flows, our kind of visual automation, tool, a, you know, semi complex automation where we are going to go grab a RSS feed of a podcast, go grab the latest episode, send it off to Deepgram's transcription service. So maybe before we I jump straight into the whole project, maybe we break down each part. Could you tell us a little bit about, Deepgram's transcription service? Yeah. So we're able to process, you know, audio, video, pretty much any format, and turn that into, text. Right? So we'll basically transcribe every bit of speech that's spoken and then give you back a word level and time stamp level, you know, what was spoken. We also have multiple other APIs, which we'll get into a little bit later. But, I mean, we can we can rock on now. So we're going to go and transcribe these podcasts. I listened to one the other day that was like an hour long. Then we're gonna use this audio intelligence. Tell us about this one. Yeah. So we have the ability to pass the transcript once it's transcribed through our audio intelligence features. So this can do things like sentiment analysis, summarization, intent detection, and topic detection. And this can be really useful for, you know, pulling out that valuable metadata, and it's all time stamped as well. So you can even, you know, build an overview of the podcast, using those, audio intelligence features. Cool. And then you can also understand it on, like, a segment basis as well. Right? Yeah. Yeah. So each part of the the audio that comes through will pick up topics as they happen. So we can do major topics and, minor topics as well. Awesome. Oh, that's really interesting. Justine, a question here in chat. And, yes, please do use the chat. I will answer the question while encouraging you to use the chat. Will this demo be available on demand? Yes. Like everything at Leap Week, it is all recorded. It will be available on DIRECTUS TV tomorrow. In fact, the workshop from yesterday with Twilio is already up in our brand new show called Enter the Workshop as you will be able to watch this on demand, of course. So, but by being here live, you have access to the chat, so take advantage of it. I'll be monitoring it. You can ask either of us questions about Directus or Deepgram or what we're doing, and we'll be more than happy to answer in that. So we're gonna transcribe a latest podcast episode. We are going to use the audio intelligence, features that, Deepgram offers. I'm gonna struggle because Directus in Deepgram both start with these. So sometimes I might do this. I feel myself maybe doing it already. And then finally, we will use, one of the newer Deepgram products, Text to Speech. Tell us about this one. Yeah. So we recently released, our text to speech. It's one of the the lowest latency text to speeches on the market, with high quality voices. So you can get a very low latency text to speech generated at a very low price point as well. Just to help me understand, because latency only, I suppose, matters well, it doesn't only matter, but it matters more when you're doing live, like, real time stuff. So you can use this real time as well? Yeah. Absolutely. And that's where we see a lot of the demand in the market is for, you know, building real time voice box with sub second latency. So with this text to speech, you can get about 250 milliseconds of of, latency for time to first byte. Excellent. We won't be using it real time today because obviously podcast episodes are already static hosted files, but that's, I suppose, where the latency matters. So you can do, like, true conversational voice bots, I suppose. Cool. So we're gonna do all of that. Just to summarize how this is going to work, we are going to first build a flow that will take in a podcast URL. We will grab the latest podcast episode from that podcast feed. We will send that off to Deepgram to receive a transcript, then we're gonna send it off for, text intelligence, so text to text API that Deepgram offers. We'll talk a little bit about why they're separate when they don't have to be. You can do those 2 steps together, but it will become clear as we go through the workshop. Then armed with a summary of that podcast, we are going to send it back off to Deepgram to generate a short summary, I suppose, in the audio bite, which we will then save back to the director's project so you can go and listen to it at your leisure. Any questions in the chat? Any thoughts, Damian, before we kick off? Yeah. If anybody wants to sign up for Deepgram, we give $200 in free credits as well. So, you'll be able to transcribe about 750 hours of of audio for for free, essentially. Yeah. It's really, really cool. Really nice way to get started. And indeed, that is what we will be doing today. Okay. I think that means we are ready to kind of, jump in and get started. And the very first thing we are going to do here is we are gonna set up a directors project running locally now. I will give you a very quick summary of what Directus is in case you're coming from the Deepgram world and you've not heard of Directus before. So Directus is a really cool back end that you can use as a developer to build your applications. You connect it to a database. We provide developer tooling and this really beautiful web application which you can use to interact with that data. And it's, suitable for handing to non developers as well, which is not very typical of back end, back end tooling. So we're gonna spin this up, and then we are specifically going to use Directus Automate, which is part of this application in order to build this kind of multistep flow, something that looks a bit like this, except each one will take on one of the steps we described in our project. This project will use some, extensions that we built and published to the marketplace, which is available in all directors projects. We can go and do that together. And then that very final step where we create a new audio file and save it back to our directors project, we're gonna build that extension together because it doesn't currently exist. So that's that's the kind of rundown of how this is going to shake out. So with that in mind, I have this empty directory here on my local machine. It's just this empty directory called live. Let's move into it here, and we're gonna spin up a director's project. The first thing we're gonna do is create a docker compose dot yml file. And I do happen to have one here. This is the docker compose file for spinning up directives locally with a SQLite database. There isn't too much to talk about here. We will use the latest version of directives that has been published on Docker Hub. We have 3 volumes. So these are, directories that exist inside of the Docker container that we are going to map to local directories. And you'll see exactly what these do in about a minute. We need some environment variables, a key and a secret. You should replace both with random values. For the sake of this workshop, I think replace with random value is random enough, so we'll leave that be. The initial admin email and password, which, of course, you can go change. The database client and being SQLite is just a file, So we're just telling it where that file will live. We have WebSockets enabled so you could do, like, real time subscriptions. It's part of my kind of default snippet that I have. We're not gonna use that today. And then we're also turning on extensions auto reload, which is gonna be really important for the developer experience of building our extension at the very end of this work shop. So with all of that done, you can just run docker compose up. No. Oh, did I hit save? I did not. There we go. And so it's now gonna go ahead and, spin that up. And you'll notice immediately an upload and extensions and a database folder. So they are the 3 volumes that are inside of the Docker container, but also mapped to a local volume. It did a whole bunch of, like, first time, you know, seeding, and then we have directives running right now on local host 8055 with my admin, email, and password that we set in the Docker Compose. That's it. That's how I was having set up directives. This is the full fat version of directives running here. It's the same version we host in Directus Cloud, and we can with that jump straight in. Damian, I might just give you a quick tour of it if that makes sense. We have a database. Yeah. We have a database. It's that SQL like database. In here, we can create tables in that database and we can query them. We can interact with the data. Great. We also have users that we can create. We have a whole auth service. So immediately, we have this admin user that you can invite other users. Users in turn can have various permissions, which grant them access to do different actions on collection. So create, read, update, delete, and share. We also connect to your asset storage, or you can save files locally as well. So this will connect to an s 3 bucket and, an Azure storage, Backblaze, and various others. We are gonna use this later to actually save the summary back, from Deepgram. By default, if you don't say anything, it will be just local file storage, and it'll actually just get dumped here in this in this uploads folder right here in the sidebar. We have a little insights dashboard builder. We used that in yesterday's workshop. And then over here in settings, we have access to flows, which is the automation builder, which is what we're gonna use today. I think the only other thing we wanna do before we kick off is let me just have a quick think here. The only other thing we wanna do is our public role. So this is this represents, all of the requests that are made that have no permissions that have not authenticated. And I'm just gonna give it the ability to read and write files. In the real, you shouldn't do this. But for the sake of this, it'll be fine. What's the worst that could happen? So this will allow us to read and write files without needing to authenticate with with directors. What else is needed? We need we need the extensions from the marketplace. So there are 3 extensions we need here. If I type in Deepgram, I built a few. I don't I don't like the spinning wheel. There there it is, Deepgram. So we have the AI transcription operation, and we have the AI text intelligence operation. We believe in making things nice and small and modular, so we have separated them, and each one's very simple. In reality actually, question. I think in reality, you could do the the intelligence at the same time as the transcription. Right? Yeah. Yeah. You can send a single request, and you'll basically just enable those parameters, and you'll get both back. Great. As these are don't know what's going on there. Although, I've had issues with my Internet all day, so I'm gonna go out. I'd rather this was a bit slow than you not being able to hear or see what's going on. So we did the AI transcription, and then we had the AI text intelligence. So we'll just install both of those. So these were released, last month as part of our directors AI bundle of, of operations for our automation builder. And then there's one more that I created just to make our life a bit easier today, and it's this extension here, RSS to JSON. It will allow you to go off and get an RSS feed, and it will return it will, pass it and turn it into a JSON object. And this will be really helpful because we obviously need to pass the RSS feed of a podcast. So we'll go ahead and install that too. There we go. We'll just give it a quick refresh as it is prompting us to do, and we're ready to rock on. So we're gonna create a new flow. Podcast summarizer summarizer summarizer. Sure. I don't think summarizer is a word, so I don't know why I am. So hooking up on it. And we can trigger this automation in 1 of 5 ways. We can do an event hook. So an event hook can be triggered whenever or will be triggered whenever something happens in your database. So it could be, a new item is created in the posts collection or a new user is registered or a new file is uploaded. We have, webhooks, which takes an inbound HTTP request, so you can receive data from third party services. In the world of Deepgram, how we actually use it here on DIRECTOR's TV, our on demand shows all have transcripts. Some of our shows are very long, so we use Deepgram's, asynchronous callback mode. So it goes up and does work and then pings you once it's done. And so that would be a webhook trigger. You can run them based on time, you know, schedules. You can have flows trigger other flows. So if you have complex, you know, use cases, you can kind of bounce portions off into their own modularized automations and then return the data back up. And finally, manual. And this will add a button, this will add a button to the side of the data studio when you're in collections or item pages, and you can go and trigger it from there. We're gonna use a webhook because I just want the ability to call it really quickly and just making a quick call request is probably gonna be the easiest way to do it. I don't care about any of this because it really is just a quick trigger. So if I hit this URL in fact, let's do that. I open a new, let me just build these 2 terminals. If I open a new terminal and just call this URL and refresh here, we'll see it's been triggered once in the logs. So I think that's gonna be the quickest way of just constantly running it as we go to to test it. Okay. Any questions so far? Anyone in the chat? I I raised through this. I got us to this point super, super quick. We We scheduled an hour and a half in for this, and I think it won't take long at all. So unless questions are asked. So, feel feel free. Not that you have to, although you need questions. A question for you though, Damien. With Deepgram's callback mode, can you give some use cases for when that's useful? Because it's a really good, you know, demonstration, I think, of the fact that you can do you can trigger flows based on webhooks. Yeah. A lot of customers use it, because it allows their server to, you know, get back to doing other tasks. Right? So rather than waiting for the response, the more features you enable, the, you know, the longer the request will take. So, you know, adding summarization and topic detection, entity detection, you know, it it can go up into, you know, the 30, 42nd range, and as the audio gets longer as well. But yeah. Like, by default, if you've if you're just transcribing, you know, you can transcribe an hour long podcast in probably, you know, 10 seconds. Right? So, one of the other cool features is you can pass a URL to, like, an s three Booker. So you can tell us, hey, you know, when you're transcribing it, instead of me sending you the file, go pull it from an s 3 Booker. And you can even tell us to put it back into an s 3 bucket as well, which is pretty cool. Yeah. We have actually, over in our docs, I've written a post before a deep gram post right here. Right. But that makes sense. It's to stop you having, like, hanging long connections open. Right. And that that makes total sense. So this, what this does, is it listens for any file upload. It verifies that it's an audio file, and then it will send the URL of your of your file directly to to Deepgram authenticated with your token. It has a transcript returned, and then you can save that straight back to the file. So it's placed right next to the file, which is really cool. It's a really straightforward automation here. And this also featured on, let me find it. This also featured on our quick connect series right here. So it's that same project but over in video form. So if you're interested in kinda learning more about what's quite a common automation, I think, with Deepgram, you can see how to set that up. Okay. First thing we need to do then is we need to go ahead and get a podcast, like, actually go get, an RSS feed. I have loads of podcasts. I actually agonized over which to pick. So I picked Darknet Diaries. You heard of Darknet Diaries? No. Haven't heard of it. Fantastic podcast all about cybersecurity. Really, really, really good. I just listened to just listened to this latest episode here, Anom, like, 2 days ago, came out June 4th. It was it was so good. It was not what I expected. But a 146 episodes of Darknet Diaries and any I'm gonna say true podcast because I think Spotify has started to screw with the definition of a podcast is just an RSS feed, and they all follow exactly the same format. If it's not if it doesn't have an open RSS feed, it isn't actually a podcast. It's an appropriation of the term podcast. But the podcast is this kind of XML document, this RSS feed, and they all have, you know, some metadata that, you know, will be shown in your podcast acts. And then they have a number of items here. So this item here that I'm highlighting is a single episode. It's that one we just saw, Anum. And you'll notice here in the enclosure, there is this attribute called URL, and that contains a direct m p 3 link. And that's how podcasts will work. And that's really handy because with, Deepgram, you can send a a binary file or you can send a URL. And podcasts have this URL just hanging out there. So our job is get the URL. I can take this whole feed URL and use our brand new, I built it yesterday, RSS to JSON, RSS to JSON operation here, and I'm gonna call it feed. The fact I call it feed will become clear in their own. Why does this key matter when it has a name? Why does this key matter? We'll talk about that in just a moment. We'll stick the URL in there, Save it. Hit it again. And I think we configured this flow to actually return the data from the last step. So we are expecting to basically see it here. Yeah. There it is. The whole RSS feed, but turned to JSON. If we refresh here, we can also see it in our logs. There it is. So there it is. That's pretty cool. There's our item. Where is it? Here we are. There's our item array, and there is the MP 3. Now it does actually say in the docs of this, extension that I built yes yesterday. If ever there's an attribute, you'll note that you may remember it was an enclosure. I can show you. It was an enclosure tag with an attribute of URL. And somehow I had to map that to a JSON object. So the chosen method was to make it an object and the attributes are just underscored. I think that's valid. So now we wanna dig in and actually get that data. We wanna get that URL. So we will create a new we will create a new, step here. And this one, I will call, latest, I guess. Latest because we just wanna get the latest episode. This has all the episodes. And we're gonna just run some JavaScript in here. Now the this, kind of, boilerplate here, is it is the zoom level okay? Yeah. It looks okay. Yeah. Cool. Have this data property. And data is a big object and properties in that object include the keys of all of the steps. So I can get the I can go and get the feed step by, you know, going data dot feed, and that's that whole object that was returned. So if you name the keys, you can more easily pick specific values from all the way up what we call the data chain, and every operation adds a new object to the data chain. So we have data dot feed here. Now I happen to know because I didn't wanna I didn't wanna sync too much time here. I know where the value of the URL is. It's in dot RSS dot channel dot items dot item That's an array and we want the first item. That's the value of the episode. Suppose we'll just store that. And now that that episode had a ton of data, how long is it? When was it published? What's the description? What's the title? What's the cover art? The m p 3, obviously, and a whole bunch of additional metadata. It was huge. It was a really, really big object, actually. The ID, the pub date, the link to the, like, web website, the description formatted, the URL, and data about the URL. Some data specifically for iTunes, the author, iTunes summary so much so much. But actually not so much. That's the end of it. I reached the end, but significant. We don't need all of it. We only need some of it. So we're gonna just stop pulling out some values. So what we'll do is we'll grab the date That feels like a viable thing to to store. We'll turn that into a JavaScript date. What was it called? Pub date. Pub date. And I know that we want it in an just in a an ISO string. So that kind of standardizes it. So I don't think it comes in an ISO string. No. It comes in whatever this archaic thing is. That's the date. We want the title that also fills the digits at episode dot title. We could grab the description. There are a few variants of this description. Taking a look. Let's take a look what's the difference. This one has HTML tags, p and 2 break tags. This one does not. So this is the one we want here. The Itunes summary. Itunes colon summary, which means we have to use this syntax to dig in there. And finally, the actual URL, of course, episode dot enclo enclosure_url because it was it was a an attribute. Okay. Looks legit. Save that. Let's run it again. Nothing. Great. That's not what we want. What happened here? To ISO string is not a function. Oh, because it said to ISO sting. That's a typo. Ring. There it is. The date, the description, the title, the URL. Cool. Yeah. It's a pretty nice little automation builder here. Now we have the URL. I mean, strictly speaking, we didn't need that step. Right? We could just crack on, but I like just reducing down that complex data structure into something quite known. So that we called this latest. We'll need that in this next step, which is actually gonna be the AI transcription operation that, that we built and released. So there are some options here. The first thing we need is a Deepgram API key, which you can get from your Deepgram dashboard. We'll do that together in a moment. You need a full file URL, which we have. It's the it's the m p 3. You can provide a callback URL optionally and then sort of flip over into callback mode, which again stops long hanging, you know, connections, but this will be fine for this. We allow you to enable diarization, which do you know why it's called diarization? This isn't leaving. I don't know the answer. Yeah. It could be called speaker identification as well, but, yeah, I think the research term first is a diarization. So it basically tells you who's speaking when you have a mono channel, and multiple speakers. If you have multichannel audio, you you don't really need to diarize, because you know each speaker's on a different channel. But, yeah, a lot a lot of people have a single channel, especially with a podcast. It's it's not multichannel. Yes. And, thank you, Ramsey. I'm glad I caught it really quickly, but, yes, there was a missing r in ISO string. So you can optionally enable diarization, and then you can also add keywords. Talk to us about keywords. Someone who works for a word that sounds like directors, I'm very, very intimately familiar with this. Yeah. So so keywords allows you to kind of increase the probability that we would, you know, pick up the rectus and direct us. Right? You know, as a single word versus, like, direct us. Right? So if you put in that keyword with the spelling and then you increase the intensifier, and the intensity is actually it's a exponential scale. So as you go up higher, it gets extremely strong. Yeah, value of 1 or 2 is is pretty normal. If you were to put in a value of a 1,000, nearly every word will start turning into direct us. But that kinda gives you an idea of how you can leverage that feature. Yeah. Interestingly, it's not direct to us. It's always directors. Like, I am the director of the film. That's always like if when it's wrong, that's how it gets it wrong. We don't need to use keywords for this. So first thing we'll need is a Deepgram API key. Here is our director. Here is our Deepgram console. Signed up for an account. And you can go make a new API key. You can give it a nice name here so we can call this leap week work shop, workshop. You can optionally set an expiration. I will do that. I will expire this after 1 hour. Right? Because I don't we won't be going for more than an hour, and then this key will just stop working. You can also, add some tags, but this is the thing that's interesting. You can change the permissions of the key, which is nice. Yeah. Do you have any notes about this or just yeah. You can do that. Yeah. Like, if if you have certain needs, right, sometimes you might wanna generate keys, like, more API keys with an API key. Build an admin. Like, if you're if you're creating this as a service, for example, you're using Deepgram in, like, yeah. Cool. That makes sense. You get an API key, which probably shouldn't share, but mine is in an hour and a half, and it has a fixed amount of credit and no credit card. So the US, we'll pop the Deepgram API key in there. Next thing we want is the file URL. You can add dynamic values using mustache impacts, double squigglies on each side. The last step was called latest and the value was URL. So that will resolve to the full URL that was inside of that enclosure. And I think we'll leave everything else. I think that's that's the shortest version. I'll call this transcription transcription. Sure. Hit save. No. Let's try it out. So now it's taking a little bit longer because it's not just making one HTTP request. We are waiting for for it to happen. Now by default, I happen to know because I built this extension. We do turn on a couple of features. So I'll wait for this to finish and then we'll talk about those features. Maybe taking a hot minute there. Has it? Oh, oh, there we are. There we are. Boom. Look at that. Huge. Right. Before we look at the data structure that comes back, I will tell you that we are using smart format and we are using the Nova 2 model. So maybe let's talk briefly about each. Should we start with the model? Mhmm. Yeah. So so the base model is our oldest model. So that was, you know, from kinda 2020 18, 2019 era. It's an extremely performant model, but the accuracy is is a lot lower. Some customers still opt to use it because it is just so compute efficient. And then we have our enhanced model, which, you know, added a bit more compute to it. But, yeah, our nova 2 model is the most accurate model that we have, and it's, yeah, available now in 36 languages, and we're we're adding more languages every month. Nice. And that is what we're using here in this, operation. And then what smart format do? I think smart format basically checks a bunch of other boxes for us. Yeah. So smart formatting, is actually baked into the model. So the model itself when it's transcribing is is generating the formatting. If you turn that off and you enable, like, punctuation and and numerals and things like that, that will apply post processing formatting, and which tends to lose a little bit of the, the context. Because, you know, some like, the number one isn't always meant to be a number. Right? Like, if if I you know, I am the the one and only. You don't want the digit to come in there. Right? So that that's essentially what that's there for. Fascinating. So we applied smart formats. So we make that we make that available. So you don't have the option to turn those off or change them. That's just what you get with this, with this extension. Okay. Let's look at what came back there. Big old payload. Now, just because this is a slightly I've gone into the big data structure that Deepgram returns, which, Damian, you've probably spotted that immediately. This is the first alternative is always returned. So I can just speed speed up our I can speed run us here. So the first thing is this transcript, which yeah? Like you said, it's nice. It's formatted. Interest I didn't know it was baked into the model and that it's not post processing and that's the difference. I thought it was just a shortcut to checking a few other boxes, but it isn't. It actually does something different. Yeah. And some customers will want digits but not punctuation or punctuation and not digits. So having them split out as well allows them to pick and choose between the the features. Right. So we have reached the point where this is to Deepgram directs us. This is too big for me to just scroll through and and talk about. So what I'll do is I'll just look at the docs for this specific extension, and we can talk about about it. So this was the AI transcription operation. This is the data structure that's returned if it was a really short transcript. So we have the transcript. We saw that. We didn't actually manage to scroll to the end of it. Can you talk to us about the other objects that are all the other, properties that are returned? Yeah. So the words array is gonna give you the start and end times of each of the words, also the confidence that we have for that word. Like, if you detect a very low confidence word, you know, some people will actually choose to omit it. Right? It could have just been, you know, picked up from a cough or something like that. And, yeah, if it if it's down at, like, 5%, it's usually, probably gonna be wrong. Right? But for the most part, you'll see confidences in the high nineties. We also have the punctuated words, so, you know, you you'll get the word as it was, you know, printed out, without any punctuation or formatting. What about that? And then what what we apply to They're not the same words. Like, it's a typo. Oh, it's a typo in my readme. It's a typo in my readme. Ignore me. I'll go and fix that another time. Yeah. You would have seen lowercase h I and no no full stop in that case. Yeah. And then there's also paragraphs, which is which is also interesting. Mhmm. Yeah. So we can we can split it up, by paragraphs. If you enable diarization, we'll also split it up by, you know, who who said what as they said. Cool. And you can do utterances as well. So that will give you kind of logical semantic breaks in in speech as well. Yeah. And if you were to enable diarization in each word, you would get a speaker ID as well. So you would have, like, speaker 0, speaker 1, and, you know, whoever spoke the word. Yeah. Lovely. And we see here the transcript is there. It's formatted, but it adds these line breaks in. So, you know, you can kind of print that. We get paragraphs. We get sentences. We get start ends for all of them. So It's really nice and flexible. Yeah. I see sentences could work quite nicely for, putting captions on a screen, like a sentence at a time or something like that. Okay. So that's the data that comes back from that. I think for this, all we really care about is this top level transcript, But the rest of it does exist. Now just a reminder, you can do audio intelligence within that single request if you're using the Deepgram API or SDK, but we've chosen to split them into 2 distinct operations so you can just have what you need, and each one can be a little more simple rather than being a kitchen sink of options. So let's crack on then. Let's go ahead and add the text intelligence right here. So I'll call this analyze, I think. Once again, Deepgram API key. I think I, I can't see it again. Oh, wait. The it's in the it's in the last one. We'll do that. We'll grab it out of here. There it is. There's the raw value, which again will expire by the time this is over. Right. AI text intelligence and lies. Deepgram API key and the text is going to be transcription was the name of that was the key of the last step dot transcript. Now this is a point to and it will be the last operation. I hate this. As a as a educators, I like, you know, lead educator. I think this lets you foot gum. If you start rewiring your operations, this value is not always the same. That's why I'm personally a big fan of explicit naming of keys and explicit inclusion of keys. But key sorry. Last always exists. Another one that exists is the trigger, which would give you data from that, from that very first step. So it's just a couple of conveniences there. But we will make this try and go ahead. Is there any way to see all the available, step values or objects or explore them? Yes. There's a number of ways that debugging flows is an area we know needs improvement. I'm gonna just save this. You can take a look over here. Right? And you can look through the logs and you can go, right, well, this was for each step, but and it was called latest, but you don't have the key immediately available. You can simply just log them. We have a logging step, which will add an extra operation. You can also just return it in the last step and it will return here. Or rather, I think when you configure the trigger, let's take a look, you can get all data back and that will return the entire object. So you have options, but no, there isn't a really nice way to do this right now. It might be a cool addition. I know when I when I use, like, email, syntax injection, there's, like, a little list that lets me pick from them. Yeah. Yeah. No. That that makes no sense. And, this was actually the topic of back to Directus TV we go. Of one of our recent request reviews. What was it? It was the improvements to flows debugging. So we spoke about it for a whole hour, with our community around what they'd like to see based on an open feature request. So maybe that's something we'll see in the not so not too distant future. Alright. Let's try this. Now we are expecting to wait a moment for this because it's going to transcribe the whole hour, then it's gonna run the text intelligence. So I'll kick it off, but I am expecting to to wait. And then are are flows always triggered from an API request, or is there a way to There was there was 5 different triggers. So now we're a little bit deeper. I'll do this again because I think you're building more context around this. So the first is an event hook. So you can say, hey. Whenever an item is created in this specific collection or these collections, trigger the flow. So you can do event based hooks. You can either do it before the database transaction occurs so you can validate, manipulate the inbound data before it gets committed or perhaps stop it in its tracks, right, and fail out if something that isn't correct, or you can do it after the data has been committed. So that's the event hook. We have the web hook, which we are using for this just for speedy rerunning. We can run it on a schedule, so you can provide the 6 point cron syntax here and run it up to every minute. You can trigger it based on another flow. So one of the, operations in the list was to run another flow. You can put data in, and it will return data out so you can modularize your your flows a little. And then finally, manual. And I think the easiest way to look at manual is probably just a quick trip to the docs. The manual flow trigger, you pick a set of collections and it adds this button over here to the sidebar. So this sidebar, it requires you to check 1 or more you 1 or more items and hits and hit the button, or you can do it from within an individual record, an individual item. And it will send the IDs of those items into the flow as part of the trigger. That did take a little while there. You can additionally add this confirmation dialogue and collect per invocation values. So this could be useful for things like sending an email. Right? So you type in a message, you hit go, you've maybe picked some users or send a text message with Twilio, press a button, and off it goes. So they're the 5 ways to trigger, to trigger your flow. But we're just using the webhook so I can just run it just by hitting up and enter here. Mhmm. Let's, see what that big object look like. We have the summary, which is nice. It's a nice length for an hour's worth of footage. Can you talk to us about the rest? That's the summary, but we've got more. Yeah. So the topics, it's it's got a a lot of predetermined topics that the model's capable of picking up. You also have the option to pass custom topics. So if you have a topic that's kind of nuanced, very unusual, you can add that in as well. And that's gonna figure out, like, okay. Whereabouts in, you know, this transcript, right, based on, the text in in this case. And whereabouts was it talking about WikiLeaks or fake off or scammer or spyware? And and that's really useful because now you have the ability to actually jump to that position. Right? So you could imagine if you wanted to find, you know, the area that was talking about, WikiLeaks, you could just click a button, and it would jump you to that segment in in the actual transcript. Yeah. Exactly. You could build out a search, You're not searching just the raw transcripts. You're you're searching for topics because that's more realistic to people's usage. That's really cool. This again is quite long. So let's find our way to the to the example I I have written up here. So, yes, we get the topics on a per segment basis. You get the intents. Let's talk about intents. And can we talk a bit about how intents are different to topics? Because I'm a little fuzzy on it. Yeah. So so topics can can be all sorts of things. You're probably gonna have, you know, say, 10 x topics versus intents. An intent is really like if if I'm making a phone call and I want to cancel my plan, you know, or update my address, right, that might be one thing. But I may go off on a tangent and start talking about my holiday in Spain and do you know what I mean? And that that could be a topic, but the intent of the call really was to, you know, achieve something. And the same can be said about, you know, a video, a podcast. And so, yeah, I'm I'm interested about the intents that actually brought back for that podcast. Let's take a look. Might have to yeah. I might have to do the mother of all scrolls here. You could try a control f me. Yeah. I could. Yeah. You think I'd know that using a computer every day. What do we call it? Intent. Thank you for that. Wow. Okay. Yes. So Explore Samsung Smart TV features. That's funny. Though they were talking about because I just listened to this the other day. Basically, Samsung TVs have this feature built in where you could put it in, like, low low power mode where, like, it looks off, but it's not. And so the yeah. If you push mute 182 and then power the TV appears to be off, but it isn't. And then if you basically run spyware on it and then put it in that mode, no one knows. So instead of needing to plant bugs, you could actually just use the Samsung Smart TV, which will record to the TV, and then you just go by and retrieve it later. And you can see how useful this intent is. Right? Like, straight away, it got us to something, you know, very interesting. Right? Yeah. Yeah. So there there's definitely gonna be intense. Yeah. Yeah. That's where discuss Anum's features. That is what happened. Interesting. Okay. It might make a little less sense here, but in a call center context in particular Yeah. So with sentiment, and and it's pretty cool. I don't know if you're Yeah. So with sentiment, and and it's pretty cool. I don't I don't know if you have the playground up. There's a good visualization of the sentiment in there. Yes. Yeah. So if you scroll up and then you see just at the very top Per month. Okay. Next to summary. Yeah. Sentiment. So so you can track the sentiment over time. Right? Because we're giving you, like, you know, sentiment, you know, at each sentence or utterance. And if you scroll down, you can see as the sentiment changes, you know, it goes to see negative negative negative. So that kinda gives you an idea of, you know, what's happening throughout, you know, the show or or the phone call. Got it. Got it. But you might only need to know the average. So I think if memory serves me right, there is also I think it is literally called average. Mhmm. Yeah. An average sentiment as well. Yeah. And and the average is gonna tend towards neutral. Right? Because, you know, the vast majority of of text is is kind of neutral. Right? It's only it's only gonna be parts of the call go negative. So, like, if if you see there's I don't know if you can search for positive, see how many results you get. And so I'll say 10 and then negative. 69. Yeah. And then neutral. 81. Okay. So so so it looks like it it was kind of 5050 on the on the neutral and negative. Yeah. Just enough to bring it kind of back to that new Yeah. Yeah. Yeah. Well, it's just gonna average the sentiment scores, which are between minus 1 and 1, I'm guessing, given that this is minus 0.0.15. Mhmm. Okay. So we now have a summary, and now it's time to go ahead. And, and that summary was held in the output of that. I think it's called summary dot text. Summary was an object. And now it's time to use, the text to speech, APIs. And to do that, we are going to build an extension, which I'm really excited about. Now for those watching, this isn't intended to be a play along, so I'm gonna go a little bit faster than I would running a hands on workshop because this is gonna be available tomorrow on director's TV. I'm also gonna turn this into a blog post sometime in the next couple of weeks so you can follow step by step in written form if that's more your thing. So, we're gonna go into our extensions folder here and MPX create direct us extent extend. This always gives me extension Extension. Direct. Yeah. Sure. Let wait for the latest version, please. And here are all of the extension types. You can create custom panels for direct us insights, the dashboard builder, custom interfaces, which are form inputs for the editor, but we are going to create an operation for flows. And I will call this Deepgram TTS, text to speech. I'll just write it in JavaScript and auto install dependencies. And given the speed of other things that have happened on my system during this session, I think we'll just be waiting a hot minute for that. But what we're gonna do here is we're gonna set up this operation, and we are going to use Deepgram's JavaScript SDK, which makes interacting with Aura, the text to speech service, Just a lot just a lot easier. So while that's scaffolding, let's, oh, it did it. It did it. So I next time next time we wait, we'll take a look at Aura. So we're gonna jump we're gonna jump in here. Right. Let's take a look inside of our new Deepgram TTS extension at the code. There are 2 files that matter. The first is app dotjs, and this describes all of the configuration. So this, says what is shown on the card here on this kind of overview and what options are presented here and then fed into the into the back end. So the API key and the text and stuff like that. The, App. Js, yeah, will also do things like what icon is shown here, what text and description, stuff like that. And then there is the API JS which runs server side and actually executes executes the, you know, the the operation, then it will be here where we install and use the SDK. So let's that you can that you can build the UI through that code and do all back end process. Lot of other ideas kinda come into mind now that I see it. Yeah. And the on a lot with breakfast in multiple ways. You know? Yesterday, we built out actually, not for those watching on demand. Sorry for kind of crossing streams. So you probably, you you may have already seen this. If we take a look at Directus and just take a very quick look at yesterday's workshop, another thing you may not consider is I'm just mute that. You may not have considered is, you know, we have this dashboard builder, and you'll be thinking, oh, okay. You know, it's all out displaying displaying insights. You know? That could be useful or whatever. But what? Look at that, look at that quality there. I'll click back over here in a minute. Maybe just here. Here. But this panel, you pick a user from a drop down and hit call, and it would use the Twilio voice SDK to actually do a two way phone call from your browser to the target to the user's phone number. So, yeah, really, really flexible. You can very much build a lot in it. Anyway, right. So we're gonna create a custom operation. So first thing we're gonna do here is we are going to change the ID. The ID has to be unique across all operations in your project. So it's quite typical that, you know, people will prefix the name of their extension with their author name. I'm just gonna call this one Deepgram TTS because I doubt there will be another one called that. And that has to be the same in both files. So also here, the ID. We'll call this 1 Deepgram TTS. What are we gonna do for icon? We will use record voice over. I think it's what the one that I've used in the past for, Deepgram. And then for description, generate text to speech. Well, we don't need to save too much time insights. Just a a little visual thing. Now what we need to do is we're gonna pop in some text. So actually, I think we'll just leave that as it is, but we are also going to pick the model. So let's actually take this moment to pause. Could you talk to us about the models in Aura? Yeah. In the playground that you you should be able to access, and we we literally just added it the other day. So on the top at the right hand side the very top right hand. Oh, text to speech. I didn't see that then. Oh, perfect. Yes. Yeah. So you can type in any text you want here, and it will generate it. Yeah. And you can just hit play on one of the voices. Angus at the very top, actually, is my voice. So, yeah, if you ever wanna Actually, I listened to it yesterday. That's so funny. So we have 2 here. You know what? Let's just for sake of argument, we'll just pick the top 2, Angus and Arcus. But they each have this model name, Aura Angus e n, and AURA ARCUS en. So we're going to provide a way to do a drop down and just pick between them. And in theory, you would populate as many as you wish, or you would take away the choice and just pick 1 and not provide this option. But we can do that. So we have this text box in the option. Let's, go and create a new one. So we will do field. This is what you name it. So I'll name it model. Right? That's like the key that we saw. We're gonna give it a visual name. So we'll capitalize that. That is ultimately just going to store a string. And then we get to provide some information in here. First thing we'll do is just the the width will make it full, which just means I'll go under it. So you can make them half, but whatever. But the more important thing here is the interface or the form input where you can create custom interfaces as we spoke about earlier. And the one we want is called select drop down like so. This interface has some options. As you would probably expect, it's the choices. And each choice has a text, and that has a value. And, like I said, we will do 2. So the text for the first one is Angus, and we can see here, Aura Angus e n. Is that what it is? Yeah. Aura Angus e n, and Arcus was the second one. Aura, Arcus, e n. Nice. Now the only other thing we'll do here is we'll just show it on that card. This is optional. This is just this is just, you know, UI further to a to a degree, but we will and sorry. It's over here in the in the overview. So we'll also bring in the model, and we will show that on the card as well. You'll see what that does in just a moment. And what would be the default if that wasn't populated? Or is it just always It would just be an empty card. It would just be an Oh, it would be an empty card. Just like this. If the if the model wasn't selected. Good question. I think it might default to the first. Did I? You could probably set a default or handle the default over on the server side. If not selected, pick this. I think kinda similar approach to most drop downs that you could build. So let's let's run this. Let's go npm run dev. And that's going to build our extension, watch for changes, and rebuild it whenever there's a change. Over in our first terminal here, we see here extensions reloaded. If I hit save, it will rebuild the extension. Directors will see that I've rebuilt the that I've that the that the extension has been rebuilt. Will it? You might need to make a change rather than Yeah. That's what I was that's what I was waiting on. Interesting. I might just quickly restart it and see if it needed just a one time restart. And if that continues, then whatever will might just have to kick it up the bum. So that's rerunning now. So I'll just save that. There you go. Extensions reloaded. Okay. Just needed one quick one quick kick up the bum. So let's, let's see what happens now. So we will add to this new extension on the end. There it is. Deepgram TTS. There's the icon we pick. That's the text. That's the title. We pass in the text, which we know is annual annualize dot summary Mhmm. Dot x, I think. So analyze dot summary.text. Sure. And we pick the model, and there they are. So we'll pick your voice. That's quite funny. I didn't know. I didn't know that. That's pretty cool. And we'll hit save. So and you see there the model is shown on the front, and that's the text input that we put in. So we hit save. When this, operation runs, the API side will run. So the first thing we'll do is we will go ahead and, so go ahead and just pull the model in as well. So that'll just be the the e n, you know, Angus or aura Angus e n. Let's let's do this. I'm really excited, actually. Right. We are gonna use the Deepgram SDK. So npm install at deepgram/sdk. Good. Good. Good. And we'll go ahead and, initialize this. It's funny when I was a developer advocate at Deepgram, I did this all the time. So import, create, client from not that, from Deepgram SDK, and then you create an instance, Deepgram equals create client, and the API key have to go in here. Yeah. Obviously, I Eric, why did I even bother hitting save? We need to pass in the API key here. We We don't really wanna hard code it in our extension. So instead, what we're gonna do is we're gonna add it here to our Docker compose file, which will bring it into the environment variables. So mostly because I've already forgotten it, let's grab that key again. Let's pop it in here. We'll call it deep oh, Deepgram API key. There it is. We do need to restart our Docker container whenever we update the Docker Compose file because it just reads that once it load. And straightaway in here, process dotm.deepgramapi key. One moment. These are fine. These are not errors. These are little warnings, not a problem. It's just, some of some of the way that the, yeah, some of the the build of the Deepgram SDK, but it's not a bother at all. They are just warnings. And now it's time to actually build the hands, build out the the handler. So, what happens? We press the button, it goes in. Now what we're gonna want to do here is ultimately we want to save a file to our director's project. And we expose a bunch of services to your project, which you can use to interact directly with these kind of directors primitives. Now, the first thing we wanna do here is we wanna go ahead and just add in here a second variable called context. And inside of here, con, const. There we go. Const. We want services and we also want get schema, which we'll need to, initialize the service. Services is a list of all the services. The user service, the item service, the permission service, the role service. We only care about the files service. So we'll just pull that out just just to make it easier. And then we'll go ahead and we will initialize a files service. A new file service. And in here, you have to pass in the schema of your project, and that thankfully is just returned in get schema. And I did just catch in the little tool tip there that that needs to be, awaited, and therefore, this needs to be async. Not there. That's an object. So that's us creating the file service. That means we now have an interface with which we can create a file in just a moment. Next, we're gonna go ahead and use the Deepgram SDK to generate a a stream of audio, and this was lovely by the way. I was speaking to to Luke, one of the DX engineers at at Deepgram recently about this. And the fact that this SDK uses the native interface makes this next bit really, really nice. So what we wanna do here is create I'll call it response for now, I guess. Or e g response maybe. Deepgram response. What we can do here is just use the initialized client here with our environment variable dot speak dot request. First first argument here is the source, so we can just pass in the text. So you would yeah. This is how you do it, but a shorthand because the name of the of the, property and the value is the same, you can just shorthand it there. And secondly, any options that we want to use. I might just create options as its own just to keep it really clear. I might just do this above here and then feed it in. So what do we wanna do here? I think all we really wanna do is we wanna pick the model. The same thing, we can use the shorthand. So that'll be, Angus. And we're going to tell it what file format we want it to return. If memory serves me right, you can return quite a lot of audio formats from Deepgram. Right? Yeah. We we support quite a few different formats. Yeah. If you wanna play it back, typically m p 3 or WAV, if you wanna stream it, like, to Twilio and things like that, you'd probably do, like, raw audio linear 16 or Yeah. That's cool. We'll do this because in just a moment, we are gonna need to know the file format. So I want to explicitly ask it for a files format. So we know with confidence it's gonna be an MP 3. Then finally oh, hang on. Let's pass this in. DG options. There we go. And finally is the file stream. I might call it dgstream, just again to be very explicit. Now gonna await response dot get stream Stream. That's it. That is a DG response. That is now just a live stream of audio, which is fantastic. Because it is a file stream, we can pass it straight into the file service to upload it. Now before we continue, we are also going to need a file name. Right? We we need to tell direct us when we create a file, what we want the file name to be at least as when you download it. And I think what we might wanna do is actually collect that from the user upfront. Like, hey. What filename do you want this to be? So let's go through the motions here of adding a new item here and just a new text box. Right? And we'll call this one file name, I guess. Field name type. I think that's all we need. File name. I And you need to change the name. Right? Nope. Oh, yeah. Yeah. Yeah. Sure. But I mean, that's that's that will stop us getting confused, but strictly speaking, you you don't need to. So we have the file name. Great. We're not gonna bother showing it on the overview, so it can just stay here. And then over on the API side, we just wanna pull that pull that in here. File name. Yeah. Cool. And just to make this consistent, I might just move this, and we can call this, like, request or something like that. Just, again, kind of handle these the same way. So now we have the file name that we'll pass in. Let's save this. Let's, just refresh this. Make sure all of that works. And let the white screen stay for just a moment longer than I would have liked it to. Look, there's a problem here, which tells me something went wrong. But I don't know what. Look. They've all gone a bit they've all gone a bit funny, which means I have caused a problem. Love that for me. Might just zoom out one step for the sake of scanning a larger surface of code file name. I could have broken at any of those points. We haven't refreshed this in a while. Thought there. And these are, let let me scan this because they were just, these are just warnings there. Oh, no. No. No. That's still the warning. Interesting. Interesting. Let's look over here again. We passed in the the create client. Let's just save ourselves just a moment of effort and just figure out if it's in here, first of all. It was. Okay. That at least helped somewhat. Let's. Was it maybe the fact that you I've called it request? It's request meant no. That's fine. Okay. I mean, this is a top tier debugging. This. Okay. It's something down here. Okay. I mean, okay. WAVs. Yeah. So there's nothing wrong. Yeah. Let me just make sure the extension reloads. Fine. No bloody problem. Nothing happened here. Right. Let's open this one up, and now there's a file name. Right? So I suppose we can put in a dynamic value here and call it and, and call it latest, which was the latest episode dot I think we called it title. I'll save it, and then I'll just look back. Latest. Yeah. We called it title. That'll be the title. That's a dynamic value, which is nice. So that gets passed in as a file name. We get the stream, and, all we need to do now is upload it to directives. And the way we do that is let's do the same thing again. We'll call it, directives options. Just so again be really explicit. We need 3 things, I think, are mandatory here or it won't work. The first thing is the file name downloads. Whenever you download the file from director status, the file name that it will have, We've already established, so that's gonna be file name. Oh, dotmp 3, I suppose. I'm not sure if that's needed, but I'm gonna do it anyway. It needs a MIME type. We just call it type here. We already know that's an MP 3. I know the standard format for MIME types is this, audio slash. And again, that's why we specifically requested a file format. It doesn't really matter which one it was and where is it going. This matters because you can connect more than one asset storage to Directus. By default, it will use local storage, which is just this up which is just this kind of link to this uploads folder, but you can connect it to an s 3 bucket, and Azure Blob storage and and many more. So we're just telling it, hey. This is where we want storage. Should that should that meme type be MPEG rather than MP 3? No. I think it's this. I think it's I think if if I caught my mind back, I think that's right. Don't know. We'll find out in a couple of minutes. So that'll be, that'll be our first point of debugging. And then the final thing we'll do, although it is optional, is we will actually set the title of the new of the new file. So this is the, like, visual labeled title where the other one is what happens when you download it. That isn't strictly needed, but we'll we'll do that. Then what we'll do is we'll call it directors file, I guess. The new file is we will use our file service. We will upload 1, and the first value is going to be a stream. Unfortunately, Deepgram just returns a stream, so we can dump that in there. And the second one is the directors options. What is returned from upload 1 is the new primary key of the new file that's being created. So we will just return the rectus file. That's the whole thing. That's the whole operation. Before I good before I get all excited and describe everything that's happened, let's make sure it works because it it might not. Right. Nothing seems to have gone wrong here. We have a file name set, so let's trigger this one more time, and I am expecting to have to wait now because we are doing all of these steps back to back. I'm expecting to have to wait maybe 30 ish, 40 ish seconds. But we'll see. It's an hour of audio. So while that's happening, let's recap what's happening. We're going and grabbing the RSS feed and converting it to JSON with this custom extension you can download for free in the marketplace. We just, you know, traverse that that big objects that that's returned to just get the latest item. We didn't end up doing anything with the date, actually. Now I think about it or the description. We transcribe it using the AI transcription endpoint and then run text intelligence. Both of these are Deepgram to receive the summary. And then this custom built extension we just built does text to speech. You pass in some text, You pick a model from the drop down. We just pick the first two out the list, Arcus and Angus, and you give it the file name for the original file for the new file. And as you see here, what was returned was a string, which means there wasn't an error, which means there is our 42 second summary. Let me listen to your voice. I don't think this will come through. It it's you. It's so you. That's so funny. So do people do people at Deepgram just get proposition to, like, get their voices up? 6 of the 12 voices are Deepgrammers, and then the 6 are voice actors. It's really funny. But there's your little summary. There's your summary. That's sounds freaking cool. So let's talk about the code, and then we'll talk about what more you could do with this. Right? So we grab the data that was actually, you know, provided by the operation. We grab the file service and the get schema function, and we initialize a new file service that allows us to interact directly with the director's files collection. These three lines is all that's needed, and strictly speaking, you know, this object could go in here. So two lines is all that's needed to request a text to speech, a text to speech operation, I suppose, from Deepgram. That's all that's needed and it returns a stream of data. Then we, you know, configure all of our options for our new file we're about to create, and we upload the file by just providing the stream directly, providing the options, and that returns the primary key, which we return. That's the whole thing. Do you have any questions, in the chat? Who have been remarkably quiet, by the way. I hope you've enjoyed. Damien, do you have any questions, thoughts? Yeah. I'm I'm just amazed at how easy that was. Like, less than 30 times code, and it's all hooked up and it works. And a lot a lot of that code is verbose code as well. Right? Like it's Absolutely. Trying to be expanded. So, absolutely really easy. And, yeah, being able to pass that stream straight into the file was very useful. It was very, very useful indeed. So what more could you do with this? Well, the obvious kind of first step is that load more data is returned from Deepgram. So you can do more with that. You know, you why we could save the description directly to the file if we wanted. We can provide you know, we can maybe tag it with topics. We could do whatever whatever we want here. It's completely up to us. You could also run further automations either as part of the same flow or a separate flow. It's all good that, you know, a new podcast has been transcribed, but do we know? Maybe we send an alert. We send, you know, an email or a notification to the user, which if we take a look here, there is a send email operation right here. So you could tell them that there's a new summary and maybe directly link them to the director's files m p 3, Because everything in directors, if we take a look at this new file here, it has this ID, and you can just go to local host 8055/assets/that. And there is our m p 3. So you could link them directly to it if you fancied. Another thing is that, you know, this was a slightly conceited example in that we have to manually run it. But you could run a crop. You could use a chrome here. You could use a chrome, grab the feed and say, hey. Has there been anything new in the last 24 hours since I last ran? Okay. Now go and transcribe the latest episode. You know? So you could run this on a schedule and make it like a daily roll up of new shows, new episodes that you could listen to. One other idea is, like, obviously, this was audio to begin with, so we we kinda compressed it, and we create a summary that became audio. But maybe there is, like, you know, a cool blog that you follow, but you may not have the chance to read the blog, but you'd like to listen to it. Right? You know, maybe in the car. So you could take a blog, turn it into a, like, you know, an audiobook, very easily, or you could even, you know, summarize it. Right? And and play it out. I had a pretty interesting idea of, like, a a real time radio station that's basically, you know, tailored to exactly what you like. Right? So you could have a, you know, maybe archive research papers being fed in, and then it's giving you kinda the updates in real time. Oh, pretend I didn't see that. I remember I'm not sure if it will still be live. Yeah. Here it is. This is a post I wrote. The date's wrong because I I worked here at that point. But what this did, it used my JSON. It literally literally live transcribed a radio station. I could edit in BBC Radio 4. Mhmm. And it would live transcribe it, which was super cool. Super, super cool. Yeah. So now you can even do the reverse. Yeah. Yeah. Yeah. That's pretty, pretty cool. There's so much scope for this, you know, based on more complex triggers, you know, more logic in the middle. Like I said, you know, this could be a cron instead of a trigger. So many options. But I think that is just about our time, and we have 2 more minutes. So, yeah, thank you so much for indulging me and and and and getting involved in this and sharing your insights. I learned actually quite a lot, during that. Especially the smart format being part of the model itself. Fascinating. Not not what I thought. Yeah. Thanks for having me. This this is super interesting, and I'm kind of amazed that you're able to build all this from scratch, you know, in in the length of time that we're chatting here. And, yeah, it really just shows what's possible with Directus. So I might be I might be building a few little, thing that flows with it myself. That's how we get you. That's how we get you. And you can run it locally. Right? And it's the full fat thing. You know, it's not like a a less good version. Like, it is the full thing. It's what we host. I will say that I need to add it to this, I think it's still in a PR, actually. I don't think we merged it in yet. But the RSS to JSON operation, I will show it because it is also really, like, light. I just didn't wanna have to do it now because it kinda wasn't the point. We're taking the URL as you saw, and all we do here is that all the code for this operation is here. That's it. That's the whole thing, the whole operation. We import a library called XML parser. We go off and get the RSS feed. And assuming everything was good, we just pass it, the attribute name prefix underscore, and then we returned we returned the past data. So that whole operation, that first one was the code. We could have built it live. I just didn't think it was gonna be that interesting. Thank you so much folks in the chat, for your kind words. I'm glad we made your life easier. I like lots of claps. Yes. There are lots of use cases, both for Directus and Deepgram and the 2 together, and I completely echo Jonathan's sentiment. Welcome to the director's community. We're very happy to have you. Great. And with that, we are at time. So have a wonderful rest of your week, everyone. Have a wonderful rest of your, week, Damien. And tomorrow, just a reminder that there is one more event this week week and then there's this community networking social. It is using the one and only platform I have ever done networking on that doesn't absolutely suck. So if you're interested in meeting other people who are interested or use or involved in Directus in some way, shape, or form, drop by. It's at if you go to leapweek.dev, it will be, localized to your time zone. But here in Berlin, in Central European, it is at 4 PM. So, yeah, hopefully, we'll see you at that tomorrow. Damien, anything else you wanna share just before we hit end? No. Thanks very much everybody for joining. And, yeah, really interesting possibilities. This is open all. Excellent. Right. With that, have a good rest of your day, nerds. Bye for now. Bye bye.","af6d8b32-20dd-4215-aa51-218331552265",[172,173],"6651b07b-b7d0-4ad7-aa06-82596fd2b3ab","1df7e6e1-df7b-4f62-b2e1-1c19f3794829",[],{"id":133,"number":134,"show":122,"year":135,"episodes":176},[137,138,139,140],{"id":139,"slug":178,"vimeo_id":179,"description":180,"tile":181,"length":182,"resources":8,"people":8,"episode_number":183,"published":184,"title":185,"video_transcript_html":186,"video_transcript_text":187,"content":8,"seo":188,"status":130,"episode_people":189,"recommendations":192,"season":193},"setting-up-ab-testing-posthog","1060881589","Join us and our friends at PostHog to learn all about building A/B testing infrastructure in your CMS with their killer A/B testing functionality.","68305008-77bb-46a0-9ce0-4e94b8667fea",65,3,"2025-02-27","Building A/B Testing in Your CMS: A Deep Dive with Directus & PostHog","\u003Cp>Speaker 0: And we are live. Alright. Welcome. Welcome. Welcome.\u003C/p>\u003Cp>Super excited to kick off this webinar. It's been a long time in the making for me. I have been knee deep in AB testing over the last couple weeks, so super excited. We are going to be covering how to build AB testing inside your CMS with Posthog and Directus. As everybody trickles in, if you are in the chat, let us know where you are from.\u003C/p>\u003Cp>Hop in the chat. Let us know. Awesome. I am Brian Gillespie from Directus. I see a few of you in the chat already know me.\u003C/p>\u003Cp>We also have Yurai from Posthog. Yurai, nice to have you.\u003C/p>\u003Cp>Speaker 1: Hey, everybody. Nice to meet you. I'm Yorai. I live in Amsterdam, Netherlands, and I've been at Posthog for about a year and a half. And for the last year or so, I've been, working hard, on our AB testing tool, which I'm hoping to, demo you a bit today.\u003C/p>\u003Cp>Speaker 0: Yes. I could I I'm excited because I'm gonna learn a bit on this one as well. Obviously, I've done a lot of the technical implementation for not just our own use case at Directus, but, for this amazing bonus that we got for everyone at the conclusion of this. But, I, you know, I I haven't messed around with all of the the the, like, the metrics and, like, the testing and it just like, all of the config inside post hoc is is tremendously powerful. So I looking to, I guess, forward to seeing what, you know, how that is cooked up on your end.\u003C/p>\u003Cp>Alright. Let's, let's cover the agenda, and then we'll kinda give a brief overview of, both direct us and post hoc just because I I saw some questions in the sign up of the people that weren't familiar with either one. So this is obviously the awkward introduction phase. Hi. Okay.\u003C/p>\u003Cp>I see everybody. Canada, Tampa, Florida, Texas. Amazing. Nashville. Next, we're going to kick it over to Yurai where they'll cover he'll cover, basically, a demo of post hoc and how you set up experiments, how you run AB testing, what's a feature flag, what's not, what you should be thinking about as you're testing.\u003C/p>\u003Cp>And then we're going to, basically do a live jam sesh of how to connect Directus and Posthog using this, the starter kit that we've created. And we'll open this up for q and a at the end of this. And I'll show you guys how to get this amazing bonus with, the working source code, fingers crossed. Right? So you don't have to invest the time and headache that I have invested over the last couple weeks.\u003C/p>\u003Cp>But with that, Yirai, maybe you want to talk a little bit about post hoc for those who are unfamiliar with the tool.\u003C/p>\u003Cp>Speaker 1: Sure. I guess it'll be easier if I share my screen right away. Yeah. Go for it. Let me do that.\u003C/p>\u003Cp>Alright. Well, so post hoc, started as, as a product analytics platform, but we've really evolved into kind of, like, an all in one solution which allows you to build great products. So, besides product analytics, we have today more tools such as session replay, feature flags, surveys, you know, data warehouse, and, of course, experiments, which we'll talk about today. So experiments, basically allow you to, test, like, variations on your website, test different changes, and see if those changes lead to some kind of improvements in the behavior of your users, which is then, of course, visible in the metrics that you are tracking. So the way it works is that you you use post hoc's product or feature called feature flags.\u003C/p>\u003Cp>And feature flags basically assign different variations of your website to your users. And usually, by default, you will be testing two variations, on your website. The variations will be normally called control and test. And let's say user a will get the variation control, user b gets the variation test. They will both see something different on your website, and then, you know, PulseHawk will track the behavior of your users on the website by capturing events.\u003C/p>\u003Cp>And then we aggregate those events on our site, and we basically calculate results for you, which tell you whether a particular variation is better than some other variation. So like I said, every experiment is is backed by a feature flag, but, actually, you don't really need to know, much about feature flags at all. The feature flag will be created for you when you set up an experiment. So, like, all you have to do is basically create the experiment, and I believe Brian will later show you how to do that via direct us.\u003C/p>\u003Cp>Speaker 0: We will. And,\u003C/p>\u003Cp>Speaker 1: basically, it all kind of, like, happens under under the hood. And, basically, all you need to then do to analyze your experiment is to just let it go to your experiments that impose HOG, and you will be able to analyze your results there. So I'm going to open an example experiment to show you what a results analysis might look like. So over here, I have a running experiment open. You can see that it was started, like, two months ago, which will be, like, a pretty long running experiment, but that's just because, like, it's it's some test data.\u003C/p>\u003Cp>But, basically, the the core of each experiment is the metrics that you are tracking. Right? It's it's these metrics that tell you, like, what what exact changes in behavior your your changes in in your content or in your experience are producing. And in this particular experiment, I'm tracking, six different metrics, three primary and three secondary. And the difference between the primary and secondary is just that it's just like the way of of organizing your metrics.\u003C/p>\u003Cp>So the primary metrics are something which actually inform whether your experiment is successful or not. And secondary metrics are kind of like guardrails. So it's it's something that you don't really want to regress. It's it's maybe not like a like a metric directly tied to your experiment, but it could be anything from, let's say, like, a session length or or any kind of, like, in interaction maybe indirectly linked to your experiment, but you still may want to track that to make sure that, that you don't get some some other part of your of your product kind of, like, regressed.\u003C/p>\u003Cp>Speaker 0: Cool. That's a I I like that for sure. Hey. That's that's one of the confusing things for me. It was, like, what goes in primary, what goes in secondary?\u003C/p>\u003Cp>Just, so it sounds more of, like, secondary would be like, hey. This is a great guard against unwanted side effects. Like, hey. Exactly. Increased conversion, but, you know, like, the time on the page or or, like, session time or, bounce rate or something rose versus, the actual event that we wanted.\u003C/p>\u003Cp>Speaker 1: That's exactly right. Yeah. So it's just like a way of organizing things. And but other than that, there is really, like, no difference under the hood between, like, a primary metric or a secondary metric. Basically, always, like, at a at a very low level, what we do at Boss Hog is capturing events, and, a metric is like a way of counting those events in a in a certain way.\u003C/p>\u003Cp>So let's have a look at at one of these metrics. So let's let's take a look at the first one. I'm going to click at this on this edit button, and here is my metric definition. So this is a funnel metric which measures, the conversion rate between two events. So the first conversion so so the first event here is called sign up started.\u003C/p>\u003Cp>The second event is sign up completed. And what this funnel metric measures is the conversion between these two events. So you can see that I have 3,000 persons who did the first event, but only 815 persons who, triggered the second event. And so the the difference between these two events is basically your conversion rate, which in this case is twenty seven percent. And what you actually see here so this is like the the metric definition form.\u003C/p>\u003Cp>This is not your experiment result. This is just kinda like a preview, which shows you well, is the data actually there, right, in the in the system? Like, are actually people sending these events? And that kind of tells you that, okay. Like, this is, like, a valid metric to experiment on.\u003C/p>\u003Cp>Like, our instrumentation is set up properly so we can actually go ahead and and save that metric. So once I save the metric and, well, once the experiment is running, I will start seeing my results once sort of, like, a minimum criteria are met, which is that, you know, you need to have some sort some number of events that have been ingested. You need to have events for both control and test variants. And once all of these are met, you will start seeing results. And the way we present results is kind of like an industry standard way to present results of, you know, AB experiments, which is that we show you this chart, which is called a delta chart.\u003C/p>\u003Cp>And what you see here is that for each variant, you will see this bar. And this bar is basically a credible interval. What it shows you is the the actual or let me actually start start start, like this. So so each each bar shows you, the actual difference between that given variant and the control variant. Right?\u003C/p>\u003Cp>The the black bar in the middle is the delta between the variant and and the and the control variant. So you can see that in case of the test one variant, we actually have a regression here. So the control is at 0%, and the test one variant is kind of like minus 13%. So that's bad. Right?\u003C/p>\u003Cp>Like, that's a that's a regression. We have worse conversion rate for the test one variant compared to the control variant. Now for the test two variant, we actually see an improvement. So the delta here is plus 6.92 compared to control, and that's why this bar is in is in green because it's actually gonna be improvement. So that's what the the the black vertical bar tells you.\u003C/p>\u003Cp>And now now onto the edges onto the edges of the actual bar. So this is like a credible interval. So what this bar tells you is actually the uncertainty that you have because, in any kind of statistical testing, there is some sort of uncertainty. And this like, the the the outer boundaries of these, credible intervals tells you, what kind of range in the actual results you may expect. And this basically tells you that, this credible interval, goes from minus 3% to plus 70%.\u003C/p>\u003Cp>And that means that in ninety five percent of the cases, because this is a so called 95% credible interval, you can expect the true value to lie between, like, this range. So there is still some sort of small probability that there will be a regression, for the test two variant even though, it's kind of like there's a high probability that it will be, some some sort of improvement. The narrower a credible interval is, the higher certainty you have. Right? Because, it's kind of like a tighter range of values where the actual value may lie.\u003C/p>\u003Cp>The wider it is, then it's kind of like more more uncertainty. And oftentimes, as you as you collect more data and you, like, keep refreshing results, you can you can kind of, like, observe the variance getting narrower and narrower every day as you kind of gather more data and get gather more more certainty. So that's what these credible intervals bars tell you. It's calculated separately for each of the metrics. And, at post hoc, we use a so called Bayesian statistical methodology, and the two main outputs of the methodology is the credible interval itself, which tells you the the uncertainty of the result.\u003C/p>\u003Cp>And kind of like the main output is is what we call win probability. So in this case, for this variant, there is an almost 83% probability that this test two variant is actually better than control. And then we show you, kind of like the the significance banner over here. And at post hoc, the the criterion that we use to tell you whether you should roll out a variant or shouldn't roll out a variant is that the win probability needs to be higher than 90% for the best variant. And in this case, you can see that it's actually less than 90%.\u003C/p>\u003Cp>And that's why for this particular metric, we declare it as not significant because it's less than 90%, which is what this tool tip also tells you.\u003C/p>\u003Cp>Speaker 0: Okay. So now you've just answered, like, my own specific like, this is the biggest question I've had on, like, the experiments that we've ran of, like, what's how do you measure the significance? You know? Because we we've ran several tests, and, I'll get into, like, the specific results here in just a bit of one of our tests. But, like, some of the tests have been like, we saw, like, a it what looked like a positive improvement, but it was marked not significant.\u003C/p>\u003Cp>So it's like, like, it you know, can we be confident in that result or not?\u003C/p>\u003Cp>Speaker 1: Right. Yeah. Yeah. That's a good point. And, we actually keep, improving this UI, and we want to actually, like, make it clearer as to, like, what all of these numbers mean and when you can expect significant.\u003C/p>\u003Cp>And it's like why something is significant, why something isn't significant. Just, like, make make all of this kind of decision making clear. So that's that's definitely a a valid point.\u003C/p>\u003Cp>Speaker 0: I I I'll just stick you in the UI explaining it, man. Take it like you flawlessly. You did I was\u003C/p>\u003Cp>Speaker 1: gonna ask that again. I I didn't hear that. I I\u003C/p>\u003Cp>Speaker 0: was gonna say, just stick a video of you inside the UI because you, you you did flawlessly.\u003C/p>\u003Cp>Speaker 1: Oh, nice. Yeah. We might actually do that. That's a that's a great Yeah. Cool.\u003C/p>\u003Cp>Am I still sharing? Because I think the, Oh, we lost the screen share. Yeah. Oh, okay. I'll reshare.\u003C/p>\u003Cp>Okay. And then, so just like continuing to the second metric, it's it's exactly the same principle. So each metric is is evaluated in exactly the same way. I mean, the the like, on our back end, there are some differences as to how, different metric types are evaluated. So for example, the second metrics the second metric is a different metric type.\u003C/p>\u003Cp>Right? So, like, in in the first case, we had a we were measuring funnel conversion. In the second metric, we are actually just measuring the role click count. And, of course, there are some statistical differences as to how this should be evaluated, like, on the back end. And we make sure that, like, we do this properly.\u003C/p>\u003Cp>But for you as a user, there's really no difference. You basically just look at, the movement of these bars relative to the control variant, and you look at the the win probability. And then the banner will will tell you whether a particular metric is significant or not. You can also dive deeper into any particular metrics. So if you click on details, you will see the actual counts for, for each variant.\u003C/p>\u003Cp>You can see that the test variant is strongest. Right? So it makes sense that it has the highest count over here. And this is like a cumulative chart, so the the the counts actually stay the same after we we stopped collecting the data for this experiment. Nice.\u003C/p>\u003Cp>Yeah. You can also see things like exposures for each variant, their means, the delta, things like that. There's also like a like a small cool feature, which is that you can actually view recordings for any particular variant. So the the the power of the POSIX platform is really that we offer multiple products, and they are kind of, like, interlinked. So you can can actually, like, click on on this particular variant and see the recordings, like, of those persons that that that actually, like, sold that variant.\u003C/p>\u003Cp>And I don't see any recordings here because I'm on my local instance and just, like, using all the data so you can know from actual users. But in Yeah. On actual dashboard, you would actually see recordings, of users where you could actually see how they how they interact with your with your website.\u003C/p>\u003Cp>Speaker 0: Yeah. And and, like, having that all in one has been, like, a significant help for us at Directus. You know, I would it's not like for us, it's it's not stack overflow. It's it's like stack overload. Like, I, you know, I the thought of adding, like, six more tools to your tech stack for your website, is just like a a mess.\u003C/p>\u003Cp>It's it's like a pain for us. I I don't wanna do it. So and, like, when we integrated post hoc, like, the the analytics for us was, like, one of the one of the first things that we got a lot of value out of, and then we we started diving into the AB testing. You know, as we we kinda shift gears, like, do you have any best practices, your eye on, like like, what to test? You know, obviously, like, you've built this thing.\u003C/p>\u003Cp>You probably worked closely with with some clients at Posthog. Like, what are people testing? Do you have any best practices to share?\u003C/p>\u003Cp>Speaker 1: Sure. So I would say if you if you are just starting out with experiments, start with something very small. So things like, you know, small changes to your landing page. That will really allow you to, just, like, kind of, like, get, like, how how the whole things works. And, maybe also kind of, like, circumvent some of the gotchas, like, like, why why while you are still starting out.\u003C/p>\u003Cp>Like, there are, like, several things that you should be aware of if you are implementing AB testing. I'm not sure if if, like I would say, like, maybe there can, like, be on the scope of this of this webinar, but we have, like, section with some troubleshooting and FAQs and, some best practices, when when implementing experiments. To summarize very quickly, I would say the like, one important thing is to make sure that your tracking is set up correctly. So, like, in your code, whenever a user performs a given action, you do actually capture that action so that Pulsar receives that event because, obviously, if we don't receive the correct events, we cannot, provide correct analysis. Now in terms of some in terms of, like, some likely more actionable AB testing advice, I would say start with small changes.\u003C/p>\u003Cp>Also make sure that, you are testing perhaps only, like, one or two changes at once. Because if you change, like, too many things, let's say, on your on your on your landing page, and then the test is showing, you know, significant outcome, like, you don't really know which which one of those a changes that you've made is actually leading to the improvement. Whereas, if you just like there's, like, small incremental changes, then you would be able to to tell that, kind of, like, more reliably. Another kind of, like, important technical detail is that you should probably use a reverse proxy on your post hoc setup to make sure that the, like, ad blockers are not, like, blocking capturing of the events, which is, like, a common issue, that can be really easily, circumvented with this. We also have, like, proper documentation, on this.\u003C/p>\u003Cp>Like, in general, if if you have, basically, for for any questions, you can use our search functionality in our documentation, for example, for the reverse proxy. That will explain to you exactly what you you should do to to set up also correctly to to be able to circumvent, ad blockers. Another useful tip is to learn how to actually estimate, your sample size properly. So one thing I I haven't explained yet is that we have this data collection section over here. And what this allows you to do is to is to answer the question how long you should run your experiment for.\u003C/p>\u003Cp>And, so I'm actually going to show you how this works. So if I click on edit over here, I have this slider which says minimum detectable effect. And this basically says, well, what kind of change in my metric am I trying to measure? And there's, like, a trade off to be made here because, if you are the the way the way sample sizes and experimentation work is that the larger the change you are trying to measure, the smaller the sample size you need. It may sound kind of counterintuitive.\u003C/p>\u003Cp>Speaker 0: It it definitely. Definitely. Because I've seen this, and I'm like, like, hey. Why why do we need less people for this?\u003C/p>\u003Cp>Speaker 1: Yeah. And, actually, right now, we are actually completely rebuilding this component to to do, like, a better job, explaining all this. But, basically, what this means is that if there is a huge change in your metric, you don't really need, like, a sensitive test for it. Right? You you don't need, like, a, like, a huge sample size because, if there is, like, a huge effect, that effect will already be apparent in, like, a relatively small sample size.\u003C/p>\u003Cp>But if you are trying to measure something much smaller, like, let's say, I'm I'm just going to move this slider from 10% to 2%, I need, like, a much more sensitive test, which means, like, a much higher sample size. Right? So, like, I I basically need, much bigger sample size to be able to reliably say, this 2% change is not just due some sort of chance. It's actually due to the actual change in the in the underlying behavior. But I definitely need, like, a larger sample size for this.\u003C/p>\u003Cp>So Right. The consideration some some some key considerations here is that are that, first of all, what is the sample size you can actually get? Right? If you if you are a startup and you're just, like, getting your first users, it might be actually difficult to get a sample size of 10,000 persons. So in that case, you are basically restricted to much smaller sample sizes.\u003C/p>\u003Cp>And\u003C/p>\u003Cp>Speaker 0: Right.\u003C/p>\u003Cp>Speaker 1: That actually means that your tests would actually would probably have to target, you know, just like large changes.\u003C/p>\u003Cp>Speaker 0: You wanna go big or go home at that point.\u003C/p>\u003Cp>Speaker 1: Exactly. Yeah. But as soon as you have some larger user base, you can start going after, you know, incremental small changes, that perhaps produce, smaller effects on your metrics. But you can you can run, like, many of such experiments and, you know, even incremental changes or or of one or 2% over time. They they really add up to a lot.\u003C/p>\u003Cp>So, yeah, there's, like, a trade off to be made here, when it comes to this, so it's it's it's good to be aware of that.\u003C/p>\u003Cp>Speaker 0: Yeah. Well, awesome. Yeah. Thanks for the best practices, man. Thank you.\u003C/p>\u003Cp>Like, yeah, I'm I'm learning just as much on this as as the audience, so I appreciate that. We'll jump into, you know, kind of, like, our own experience a little bit, and then I'll, kinda run through the steps of of integrating post hoc and direct us and and, again, show you guys the the source code. We'll we'll dive into it. We won't write code. That didn't work out well for me the last time I, I did one of these live sessions.\u003C/p>\u003Cp>But, as far as our own, like, experience with posthog, you know, recently, we rolled out a brand new version of our home page. And I'm gonna can we see this? Oh, yeah. There we go. So this was a big change for us.\u003C/p>\u003Cp>And, you know, this one, if I shrink it down, it's probably a little better. But this was a this new homepage was a big shift for us. And one of the things that we wanted to do was we wanted to test first to make sure that, number one, the messaging was wasn't causing a a decrease in conversions. Number two, you know, we wanted to make sure that that this was performing better than our our old home page. You know, we've got this interactive carousel component that basically links into, what we call our directus pizza demo, which is just a a live working instance of directus.\u003C/p>\u003Cp>So folks can hop in and and poke around inside one of the templates. Before we we shifted all that traffic, right, we wanted to make sure that this was actually worthwhile. So our own results that we saw got a fancy slide up here somewhere. Boom. Yeah.\u003C/p>\u003Cp>So, the conversions were were relatively the same. And, again, that goes back to kinda your eyes point about, like, what metrics are, you know, how do we measure significant change? But some of the big results that we saw was, like, a 30% decrease in bounce rate on the site, which is huge. And, obviously, that correlates with a, like, a larger session time, most likely because people are getting in the demo of Directus, at least that's our hypothesis, and and actually poking around, which is which is what we want. And and I know you guys at Posthog, you're I, you guys are are kind of following that same methodology of, like, you know, let's let's skip all of the, fluffy marketing stuff and actually get you into the product, so we can actually dive in and and learn.\u003C/p>\u003Cp>Alright. So let's actually dive into a this integration. And I put together just, like, this a really nice visual for how we've been doing AB testing with post hoc at Directus. And that is kind of the the concept behind behind this setup. We've done tests at at two levels, and I call it the block level, which is basically testing within the same page, which is, you know, hey.\u003C/p>\u003Cp>We wanna test a different headline on the homepage, or we wanna test, a different pricing component or a a different pricing tier. So that would be like a block level test. And then, I I was calling this a page level test, which is basically testing between different pages. You could call it a split test. You guys are calling it redirect testing inside the documentation dry.\u003C/p>\u003Cp>But it basically like, we we take a URL. We want to redirect some percentage of the traffic. You know, usually, if it's just two variants you're testing, you you probably split fifty fifty. But but that's the way that we've been doing testing at Directus. And now I'm going to show you how this all comes together.\u003C/p>\u003Cp>So, this is post hoc, and we laid a special theme, special post hoc theme on top of a direct us instance just for this webinar. But this is our CMS starter template with a little bit of magic sprinkled into it. So, if we take a look at what I call the checklist where did that guy go? Supposed to have a production person here, Matt. Not calling you out, but I'm calling you out.\u003C/p>\u003Cp>The post hoc checklist, can we see that live? Do I have to stop screen sharing to see that? Yeah. There we go. Alright.\u003C/p>\u003Cp>So this is the AB testing checklist, as far as integrating with posthog. I'm gonna show you how to create a a project in posthog. We're going to dive into, like, creating a personal API key to power this little automation that we've got. We're gonna walk through the directest data model. We'll adjust some permissions.\u003C/p>\u003Cp>I'll show you the flow that's involved, and we'll talk through, like, this Next. Js front end, and how that is integrated. Alright. So let's get back to the screen share, and we'll do this together. What I'm going to do, and this is a little crazy to do.\u003C/p>\u003Cp>We are, like, maxed out as far as projects. So I'm gonna delete this test project. This is, sketchy on a demo on a webinar, but, that's what we're gonna do. Alright. So I'm in post all the first thing we've gotta do.\u003C/p>\u003Cp>Right? We're we're gonna create a project. Just go through this. This is gonna be the AB testing webinar project. Great.\u003C/p>\u003Cp>Alright. So we've got our project. I'm gonna need two things. I need the project ID. So I can find that up here in the URL, or, let me get my fancy mouse pointer going here.\u003C/p>\u003Cp>I can find that from these settings as well. So I'm gonna grab the project ID, and, yes, we will send the recording of this. I promise. Alright. So now in the Directus instance, which you are going to get total unrestricted access to you at the end of this, we've got some global set up.\u003C/p>\u003Cp>So global's inside Directus, are basically, just a what we call a singleton collection. So globals are are typically things like, social links or favicons, logos, stuff that you're gonna use across your entire site. So we're gonna add our project ID. And then the other thing that I'm gonna add, I I don't necessarily need this project API key for the directive side of things. You'll need this on the Next.\u003C/p>\u003Cp>Js integration. You'll wanna copy it to your clipboard, stick it into your text editor, so that you've got that. But what we're gonna do, we need to go into our personal settings. And the reason why is inside this Directus instance, there's an a nice little automation that will show. Alright.\u003C/p>\u003Cp>So I'm gonna log in, reauthenticate for security, and we're gonna look for a personal API key. So I'm just gonna create a new key. This is our a b testing webinar key. We want a specific project that's gonna be our AB testing webinar. Whenever you create keys, whether that's in post hoc or GitHub, please be very specific.\u003C/p>\u003Cp>So we're gonna do right access on experiments and feature flags, and I think this should be all we need. Am I am I correct in that assumption, your eye?\u003C/p>\u003Cp>Speaker 1: Looking good to me. Yeah.\u003C/p>\u003Cp>Speaker 0: Okay. Perfect. Alright. So I'm gonna grab this key. I'm gonna go in, and I'm gonna post that inside this direct us instance.\u003C/p>\u003Cp>Amazing. Magic. Right? So let's let's talk through the changes inside this Directus instance. Again, this is our simple CMS template.\u003C/p>\u003Cp>Like, if you go to Directus.io, you go to get started for free, you create a cloud account, You get logged in. You can get the starting point for this, just by clicking CMS, or you can also get it through our template CLI tool. We'll button up all these resources. But this already has what we call the many to any relationship. It's basically a dynamic page builder that is set up inside your CMS.\u003C/p>\u003Cp>So if I open up my live preview pane, we can see that this page is made up of blocks. Right? And this paradigm lends itself to that block test that I was talking about. So that is kind of the setup here. The, extra collections that we've added to this direct to census, which are very minimal, are are just two pieces.\u003C/p>\u003Cp>Right? We have added experiments and experiment variants. And the reason why we add those inside direct us, we need to be able to link the content inside the CMS to the post hoc experiment. And this is it gets back into the why we created this. So we want to empower our marketing team, our content editors, to run tests, right, without code, without bothering the developer, without it being blocked by the developer.\u003C/p>\u003Cp>Right? We want marketers or well, this is my personal mission. I want developers and marketers to get along well. And if you are waiting, on information from marketing to set up the actual code for an AB test, not great. Likewise, if they have to bug you every time they want to test a new variant, that's gonna frustrate you as well.\u003C/p>\u003Cp>So, what we do, we've created a a experiments collection. And inside that, pretty simple. We've got a name for the experiment. We've got a feature flag key that you'll see we actually need, inside post hub. We've got a a short description.\u003C/p>\u003Cp>We've added a type of test. You know, is it a block or is it a page level test? And then we have our variants. So the variants are a relationship to that experiment variants, and this is pretty simple as well. We've got a key for the variant.\u003C/p>\u003Cp>Each experiment has to have a control variant as your eye talked about. And if you're doing a page level experiment or a redirect test, you need to have a URL. So on the front end, running a test is as simple as this. Right? With those pieces put together, and I'm sure Matt is crossing his fingers behind the scenes right now.\u003C/p>\u003Cp>Let's do a block level test. Right? So I'm inside Directus. I want to test a new headline. New headline.\u003C/p>\u003Cp>Home page. There he is. I see I see him in the chat. New headline for homepage. Alright.\u003C/p>\u003Cp>I stole this placeholder copy directly from you guys, Uriah. We want to let's see if this new headline improves conversion. So, hey, this doesn't totally replace post hoc. This is just a a slick integration to work together with the two. So we're gonna pick the test type.\u003C/p>\u003Cp>This is gonna be our block level test. We wanna test within the same page. And we'll add the control, and we'll add, just like this new headline variant. Great. Okay.\u003C/p>\u003Cp>Now with that out of the way, we save. What happens behind the scenes? There is an automation. I love automation. Direct as flows is a great way to build these automations.\u003C/p>\u003Cp>This is what this automation looks like, and I'll walk you through it really quickly. So whenever I go to create an item inside experiments, we've run this series of operations. We grab our global settings, so that API key, that project ID, and then we format a payload for post hoc. We create a new experiment inside post hoc using their API. Did we lose audio?\u003C/p>\u003Cp>Can you hear me okay, Yuri?\u003C/p>\u003Cp>Speaker 1: I can hear you, Brian. Yes.\u003C/p>\u003Cp>Speaker 0: Okay. Okay. Alright. I just wanted to make sure. Hopefully, it'll all be in the recording as well.\u003C/p>\u003Cp>But, then we've got a another it just it little piece of JavaScript here that formats a feature flag payload, and that's helpful for our redirect test that we're doing. And then basically, we stuff all that into post dog. And at the end of this, we return a payload that gets saved inside Directus. So the effect that we've get is a experiment that gets created inside Posthog. We've got a experiment here inside Directus now that we can actually link to, a piece of content.\u003C/p>\u003Cp>So if I go into post dog, we go to experiments. Check installation, skip installation. Skip or no? Skip. I did not remember that part of the creating a project.\u003C/p>\u003Cp>Alright. So we could see this experiment here inside post hoc. Let's see. There it is. This is all set up.\u003C/p>\u003Cp>But now let's go and link this to a piece of content. Right. So we're gonna go back to our home page, and we've got our hero block. So this is the control block. I'm just gonna go down to the bottom.\u003C/p>\u003Cp>And, basically, we've got a a relationship from this block to our experiment, and then we're gonna pick the variant that it belongs to. Except something wasn't quite right. It wouldn't be a demo that I was doing if everything worked smoothly. Why isn't my variant showing up? Clear filters.\u003C/p>\u003Cp>Experiment. There is the alright. You got me. Let's clean this up just a bit. I've got tried to get fancy, and I've got, I don't know what level of fancy I got here.\u003C/p>\u003Cp>But, okay. We'll try this again. Now I'm gonna link this to our new homepage headline experiment. We're going to add this to our control, and now we're gonna add another headline. This is the new headline.\u003C/p>\u003Cp>Amazing. It's gonna look beautiful. We're gonna link it to that same experiment, except now I'm gonna link this to our new headline variant. So all I'm doing behind the scenes here, nothing fancy. I'm just linking this piece of content to the posthog experiment.\u003C/p>\u003Cp>On the Next. Js front end, we're making a call. We get this content, And, because we've got posthog integrated, we get something like this. If I hit refresh, right, I don't see two hero images or two hero blocks here. I just see one.\u003C/p>\u003Cp>Right? And that is because of the post hoc SDK that's set up that is handling all the magic, and you could probably understand why I don't want to do all that magic myself. Now let's see if I can actually trigger you're gonna have to show me, like, a a trick to, like, force some type of, visitor into a variant sometime, your eye.\u003C/p>\u003Cp>Speaker 1: Sure. We can do that.\u003C/p>\u003Cp>Speaker 0: Let's see. This should be put in. Yeah. It's it doesn't seem like I can actually trigger the not triggering the variant here for some reason, through this. But, this is how this is actually integrated.\u003C/p>\u003Cp>This is like a a block level test. Now, you know, if I swap\u003C/p>\u003Cp>Speaker 1: Can you, can you open your web tools? Maybe we can try to override a flag, for this particular page. Yeah. I I Oh, we don't have to do that. That was up to you.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Okay. So there we go. So if you know, now you could see if I swap the control Right?\u003C/p>\u003Cp>If I make the headline the control, we could see the difference here. And, basically, the post hoc integration is is pulling that all together. So that is, like, the setup inside Directus. Like, if I wanted to run a page, a a reader direct level test, let's say I wanted to have a a new pricing page. Right?\u003C/p>\u003Cp>If I go to pricing, we've got pricing to fit every budget here. Maybe I wanna change this. We have new pricing. So we'll just create a new page. Pricing to fit no one's budget, and we'll just raise the prices by quite a bit here.\u003C/p>\u003Cp>Amazing. Alright. So now I'm gonna hit save as copy in this template, and now I've got I've got two new pages or or, well, one new page, but that's our page. I'm gonna go in. And now if I wanna do a redirect level test, I have new pricing page.\u003C/p>\u003Cp>Pricing page. We'll do a redirect. So the control, here, we're gonna add this URL. So that'll just be slash pricing. And when you set up the control experiment or the control variant, again, that is the URL that you're testing.\u003C/p>\u003Cp>It's an important distinction to make. Next, we will add the new pricing page. Great. That's gonna be new pricing. And, again, our direct us flow automation, like, will will bring this home for us and basically create this experiment inside post dog.\u003C/p>\u003Cp>So we'll just hit refresh. We've got our new pricing experiment, and I can click in and and see the the variance here. And as Yurai showed, there's just a feature flag that backs these. Now what we're doing on the page level tests is we're using the post hog feature flag payloads, to avoid making a extra call to direct us for this information. So if we I think I can get a better view if I click edit here, maybe shrink this back a bit.\u003C/p>\u003Cp>You can kinda see what's going on here. We've got an experiment type. It's a page level test. We've got a control path, so that's our pricing. And then we have a path that we're going to redirect to.\u003C/p>\u003Cp>And what happens on the front end if we go to local host 3,000. Now if I try to navigate to pricing, I'm either going to get the control did I I guess we may have to dive into the actual tools here. We'll we'll work that out in a moment. Love giving these demos on the fly. Alright.\u003C/p>\u003Cp>What is next on the agenda? We'll just look at that really quickly. Alright. Our feature flag test. Alright.\u003C/p>\u003Cp>So we've got the director side of things. We've nailed that piece. And then let's take a look at, like, the Next. Js side. Right?\u003C/p>\u003Cp>We want to, like, walk through how this is actually set up and integrated. So a a couple important pieces that you need as far as, like, setting this up within Next. Js. Let's pull this up. Alright.\u003C/p>\u003Cp>Can everybody see this? Okay. Let me try to close close the terminal a bit here. I I can shrink the the size of the make make the font just a little bit bigger. Alright.\u003C/p>\u003Cp>So, again, once you download this repo, you know, feel free to to browse through it on GitHub. We'll, again, we'll we'll send you all of this. But let's let's start on the direct side of things. Inside this Next. Js application, there are our fetchers.\u003C/p>\u003Cp>So we're just using these two. Close. Shrink that. Okay. There we go.\u003C/p>\u003Cp>Alright. So these fetchers are are basically just communicating using the Directus SDK. What we've got here and the only change that we've made from our standard Next. Js template is just making sure we grab the experiment data and the experiment variant that we've linked to a page block. So this all comes together on our, like, our page builder setup that we've got.\u003C/p>\u003Cp>And one of the other things that you'll have to do inside Directus, you can fetch that data, but you need to be able to add the data to your permissions. So you've got to make sure that your experiment variance and your experiments are enabled underneath your permissions to make this work inside Directus. That is just as far as getting into best practices, that's one of my Directus best practices. At 99% of my errors are because I didn't set permissions. Right?\u003C/p>\u003Cp>But we have to add that experiment variant there. And then inside what we call the page builder, there's a a bit of logic here that basically, filters out the blocks. So, this template is set up to run Next. Js server components, so we don't get this flash of content, whenever we enroll someone into a variant. But, basically, we're just checking to see, is this block attached to a variant in an experiment?\u003C/p>\u003Cp>If it is, we get the feature flag from the post halt client, which we'll look at in a moment. And should we add this block? So if the feature flag is found and the block is the control variant, we'll add that control variant. If the feature flag is found and it's not the control variant, we'll make sure we add that to the, to that block. So, not a huge shift in in the logic as far as working with Directus, just simply matching those up.\u003C/p>\u003Cp>On the post hoc side of it, and this is all we it's just standard boilerplate from the post hoc documentation. You need to have a post hoc provider. So we just set this up using use client here because this provider is going to go into a shared, like, a layout inside this Next. Js application. One of the important bits, especially for, like, server side rendering is the bootstrapping.\u003C/p>\u003Cp>So, basically, we're getting all the feature flags on the server side from post hoc and making sure we pass that when we initialize the the post hoc JS client on the the client side. And, Yurai, do you have anything to to kinda add on that bootstrapping side? You know? I I know this was, like, one point that was, a a little bit of where I ran into some issues when I was implementing this.\u003C/p>\u003Cp>Speaker 1: No. Not really. I would just say that this this is really the preferred way how to how to get a feature flex to your client, for a couple of reasons. Because the the other alternative is to actually fetch the feature flags directly from the client, but there is always some delay there. So, you know, you you you may get some usage events being sent without correct future flag information if you do it that way.\u003C/p>\u003Cp>Right? But if you if you bootstrap your flags, that means you always evaluate the flags on the server, which is actually faster because, the POSOC library actually evaluates, the flags there without having to actually go to the POSOC server. So it's actually faster. And once the the web content is served, you already get the feature flags kind of, like, basically already bootstrapped to it. So, this is is actually what what we always recommend, for our users to do.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Makes sense. Now how you do the bootstrapping, it depends on your specific application. The way that we chose to do it in this specific one.\u003C/p>\u003Cp>So we've got this post hoc provider. There's a there's a shared layout that I'll I'll show you. This is just how we we use this provider. So inside our root layout component, and this is using the the Next. Js app router setup, we are actually, like, sending this bootstrap data in a header from a Next JS middleware.\u003C/p>\u003Cp>So we get that here. We pass it to our provider, that sends it down through the client. But the middleware is an important piece. Now you you could do this via a, like, a server component and, like, if you're not doing the redirect testing, I found that that worked pretty well. But, as far as, like, the redirects, you probably wanna do this in the Next.\u003C/p>\u003Cp>Js middleware. Just this is the best way that I've found to do it. So what we've done inside the middleware, if we get to the actual function here. Right? We get the path name that you're sending, you know, what we're navigating to, and then we're basically getting a distinct ID.\u003C/p>\u003Cp>So this is just a helper that is somewhere, maybe in a where is that guy? Distinct ID. Yep. So a post log gives stores a cookie. We will try to get the distinct ID for that visitor, that user via that cookie.\u003C/p>\u003Cp>If we can't find it, we're just gonna create one. Right? And then we will look for some cache data inside the cookie. So we've got, like, a a bootstrap cookie, where we're we're caching this data. But, basically, what we're doing to enhance performance and and make sure that you're not, like, delaying rendering every single time, we've got a flag route set up on the API side, which is somewhere.\u003C/p>\u003Cp>Posthog flags. So, basically, there's a Node. Js posthog client. We pass that distinct ID to it. We go get all the flags and the payloads, and then we're caching that for sixty seconds.\u003C/p>\u003Cp>So as the user navigates, this middleware gets triggered, and, you know, we're we bootstrap that data. And then we also use this to handle our redirect, at the page level. So we've got a check for redirect function, which basically looks at that flag data that we have here. So once we fetch all of those flags from post hoc, we're iterating through those and saying, okay. Are any of these redirects that we've set up matching this experiment?\u003C/p>\u003Cp>If so, then we we send them through. You know, there's a a little bit more fancy stuff behind the scenes, but I I know we're coming up on time with this. Is there I I'm trying to think if there's any other, like, important pieces that I wanted to cover before we turn everybody just totally loose on this thing. I don't think so. Let's see.\u003C/p>\u003Cp>Where's my checklist? Redirect. We've configured the provider. And I saw where is was it Jobchum? Yes.\u003C/p>\u003Cp>So the that was a good spot. I figured out why this is not working. Thanks to JobChomp. It the public post hoc API key is from my previous project. So that's where I told everybody to take this down, make sure you copy it, but I forgot to stick it into my EMV.\u003C/p>\u003Cp>And, yeah, that's why we were having issues. Always love it on the demos. That's that's always fun. Alright. That's it.\u003C/p>\u003Cp>Let's, we'll open it up for q and a. You're I, you know, while we're waiting on questions to come up, I just wanted to say thank you for for jumping on with us and, you know, at least teaching me how to get get more use out of out of the post hoc side of things.\u003C/p>\u003Cp>Speaker 1: Of course. Yeah. It's a it's a it's a pleasure. It's it's a very nice integration that you that you build there. And, we actually have our own kind of, like, no code experimentation tool.\u003C/p>\u003Cp>It's still very, like, very much in beta. But, we'll I'd love to get access\u003C/p>\u003Cp>Speaker 0: to that.\u003C/p>\u003Cp>Speaker 1: Yeah. Whoever whoever uses, like, just Bosak can actually, like, already try it out. It's it's, like, not nearly as powerful as as as, like, what direct Directus allows right now as in kind of, like, rearranging blocks and, like like, doing all that. It's basically just for, like, simple simple style changes. But, yeah, perhaps there is also something for us to to learn here.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. You know, I like, hopefully, like, coming out of this, we'll have, like, a, like, a how to dev blog post to to put this together. But, you know, one of the things that I I just I struggle to find any, like, linking to CMS examples. So, now that we've got one, this is how an integration could work.\u003C/p>\u003Cp>Let's let's take some questions here from Steven. Do you have a guide on what kind of traffic numbers you need to do effective testing?\u003C/p>\u003Cp>Speaker 1: Yeah. So maybe I can answer that. So like I said, we we have this sample size calculator, which basically tells you exactly that. Now, that calculation is always tied to a particular metric. Right?\u003C/p>\u003Cp>So if you are tracking five metrics, but each of those metrics has kind of, like, different, like, usage numbers, as in, like, different number of persons that actually generate that metric. To be to be really statistically rigorous, you actually have to take the metric with, kinda like the smallest traffic and make sure that you actually get enough traffic for that metric. Right? Other than that, it's it's it's already like what I mentioned. The the the larger the change you are targeting, the smaller the sample size.\u003C/p>\u003Cp>But the smaller the change you are targeting means that, you kind of, like you need to have, like, a more sensitive test, and you need a you need a large larger sample size.\u003C/p>\u003Cp>Speaker 0: That makes sense. Alright. One other question I see from Stefan. How would you set up an AB test for global components across multiple pages like a header? Will it be, like, a new type of test that needs to be set up first?\u003C/p>\u003Cp>It it depends. Like, any good development oriented question, the answer is it depends. But, you know, if you've got the let me just pull up direct us real quick, and then we'll I'll give the bonus link in just a moment. Where's this at? Alright.\u003C/p>\u003Cp>So, you know, a basically, inside the direct us instance that that we've shown here, on the page block level, we we just added a relationship to the variance. And, you know, we've got the corresponding logic inside the Next. Js application that basically just says, hey. Post all, give me the variance and then assigns one of those. But you could add the same relationship to other pieces of the website if you wanted to, whether that was, you know, your navigation, like, your navigation items, within the setup.\u003C/p>\u003Cp>So this, this CMS setup has navigation already built into it. You know, you could potentially link it there. You know, you could do, like, a a hybrid approach inside the code where you, you know, you hard code some of these tests, which are, like, a lot of the examples in post hoc just for simplicity's sake are are there. In our own experience, it's like I I tryna I kind of, like, look at it through the lens of, like, hey. Is this something we're we're gonna do often?\u003C/p>\u003Cp>Like, do I wanna test header elements often? If so, you know, it might make sense to enable your content editors to be able to do that. If it's, you know, like, a one and done test, you might just, add it to the code and and move on. So, hopefully, that's helpful. Let me throw up the well, I'll just post it here in the chat, if I can.\u003C/p>\u003Cp>What's going on? The screen share is stuck. Why is this not working? Something's going on. Okay.\u003C/p>\u003Cp>I can't post the link here in the chat. Matt, if you're around, post this link in the chat for me. My screen is fouling up as it often does on these demos. We'll we'll definitely send this out in a newsletter after the webinar as well, but, there's a repo where you can get all the source code. If you have any questions, feel free to follow-up with us.\u003C/p>\u003Cp>On the Directus side of things, we are also offering a special little promo. And I can't get this to yeah. Hey. The screen share thing is just spinning for me. So that's where we're at with it.\u003C/p>\u003Cp>Uriah, thanks for joining, man. I really enjoyed this. You know, this has been a fun project, and I I appreciate your support and your help along the way.\u003C/p>\u003Cp>Speaker 1: Likewise.\u003C/p>\u003Cp>Speaker 0: Excellent. We'll have a recording out for everyone. And with that, thank you, and good night.\u003C/p>\u003Cp>Speaker 1: Thank you, everybody. Good luck. Bye bye.\u003C/p>","And we are live. Alright. Welcome. Welcome. Welcome. Super excited to kick off this webinar. It's been a long time in the making for me. I have been knee deep in AB testing over the last couple weeks, so super excited. We are going to be covering how to build AB testing inside your CMS with Posthog and Directus. As everybody trickles in, if you are in the chat, let us know where you are from. Hop in the chat. Let us know. Awesome. I am Brian Gillespie from Directus. I see a few of you in the chat already know me. We also have Yurai from Posthog. Yurai, nice to have you. Hey, everybody. Nice to meet you. I'm Yorai. I live in Amsterdam, Netherlands, and I've been at Posthog for about a year and a half. And for the last year or so, I've been, working hard, on our AB testing tool, which I'm hoping to, demo you a bit today. Yes. I could I I'm excited because I'm gonna learn a bit on this one as well. Obviously, I've done a lot of the technical implementation for not just our own use case at Directus, but, for this amazing bonus that we got for everyone at the conclusion of this. But, I, you know, I I haven't messed around with all of the the the, like, the metrics and, like, the testing and it just like, all of the config inside post hoc is is tremendously powerful. So I looking to, I guess, forward to seeing what, you know, how that is cooked up on your end. Alright. Let's, let's cover the agenda, and then we'll kinda give a brief overview of, both direct us and post hoc just because I I saw some questions in the sign up of the people that weren't familiar with either one. So this is obviously the awkward introduction phase. Hi. Okay. I see everybody. Canada, Tampa, Florida, Texas. Amazing. Nashville. Next, we're going to kick it over to Yurai where they'll cover he'll cover, basically, a demo of post hoc and how you set up experiments, how you run AB testing, what's a feature flag, what's not, what you should be thinking about as you're testing. And then we're going to, basically do a live jam sesh of how to connect Directus and Posthog using this, the starter kit that we've created. And we'll open this up for q and a at the end of this. And I'll show you guys how to get this amazing bonus with, the working source code, fingers crossed. Right? So you don't have to invest the time and headache that I have invested over the last couple weeks. But with that, Yirai, maybe you want to talk a little bit about post hoc for those who are unfamiliar with the tool. Sure. I guess it'll be easier if I share my screen right away. Yeah. Go for it. Let me do that. Alright. Well, so post hoc, started as, as a product analytics platform, but we've really evolved into kind of, like, an all in one solution which allows you to build great products. So, besides product analytics, we have today more tools such as session replay, feature flags, surveys, you know, data warehouse, and, of course, experiments, which we'll talk about today. So experiments, basically allow you to, test, like, variations on your website, test different changes, and see if those changes lead to some kind of improvements in the behavior of your users, which is then, of course, visible in the metrics that you are tracking. So the way it works is that you you use post hoc's product or feature called feature flags. And feature flags basically assign different variations of your website to your users. And usually, by default, you will be testing two variations, on your website. The variations will be normally called control and test. And let's say user a will get the variation control, user b gets the variation test. They will both see something different on your website, and then, you know, PulseHawk will track the behavior of your users on the website by capturing events. And then we aggregate those events on our site, and we basically calculate results for you, which tell you whether a particular variation is better than some other variation. So like I said, every experiment is is backed by a feature flag, but, actually, you don't really need to know, much about feature flags at all. The feature flag will be created for you when you set up an experiment. So, like, all you have to do is basically create the experiment, and I believe Brian will later show you how to do that via direct us. We will. And, basically, it all kind of, like, happens under under the hood. And, basically, all you need to then do to analyze your experiment is to just let it go to your experiments that impose HOG, and you will be able to analyze your results there. So I'm going to open an example experiment to show you what a results analysis might look like. So over here, I have a running experiment open. You can see that it was started, like, two months ago, which will be, like, a pretty long running experiment, but that's just because, like, it's it's some test data. But, basically, the the core of each experiment is the metrics that you are tracking. Right? It's it's these metrics that tell you, like, what what exact changes in behavior your your changes in in your content or in your experience are producing. And in this particular experiment, I'm tracking, six different metrics, three primary and three secondary. And the difference between the primary and secondary is just that it's just like the way of of organizing your metrics. So the primary metrics are something which actually inform whether your experiment is successful or not. And secondary metrics are kind of like guardrails. So it's it's something that you don't really want to regress. It's it's maybe not like a like a metric directly tied to your experiment, but it could be anything from, let's say, like, a session length or or any kind of, like, in interaction maybe indirectly linked to your experiment, but you still may want to track that to make sure that, that you don't get some some other part of your of your product kind of, like, regressed. Cool. That's a I I like that for sure. Hey. That's that's one of the confusing things for me. It was, like, what goes in primary, what goes in secondary? Just, so it sounds more of, like, secondary would be like, hey. This is a great guard against unwanted side effects. Like, hey. Exactly. Increased conversion, but, you know, like, the time on the page or or, like, session time or, bounce rate or something rose versus, the actual event that we wanted. That's exactly right. Yeah. So it's just like a way of organizing things. And but other than that, there is really, like, no difference under the hood between, like, a primary metric or a secondary metric. Basically, always, like, at a at a very low level, what we do at Boss Hog is capturing events, and, a metric is like a way of counting those events in a in a certain way. So let's have a look at at one of these metrics. So let's let's take a look at the first one. I'm going to click at this on this edit button, and here is my metric definition. So this is a funnel metric which measures, the conversion rate between two events. So the first conversion so so the first event here is called sign up started. The second event is sign up completed. And what this funnel metric measures is the conversion between these two events. So you can see that I have 3,000 persons who did the first event, but only 815 persons who, triggered the second event. And so the the difference between these two events is basically your conversion rate, which in this case is twenty seven percent. And what you actually see here so this is like the the metric definition form. This is not your experiment result. This is just kinda like a preview, which shows you well, is the data actually there, right, in the in the system? Like, are actually people sending these events? And that kind of tells you that, okay. Like, this is, like, a valid metric to experiment on. Like, our instrumentation is set up properly so we can actually go ahead and and save that metric. So once I save the metric and, well, once the experiment is running, I will start seeing my results once sort of, like, a minimum criteria are met, which is that, you know, you need to have some sort some number of events that have been ingested. You need to have events for both control and test variants. And once all of these are met, you will start seeing results. And the way we present results is kind of like an industry standard way to present results of, you know, AB experiments, which is that we show you this chart, which is called a delta chart. And what you see here is that for each variant, you will see this bar. And this bar is basically a credible interval. What it shows you is the the actual or let me actually start start start, like this. So so each each bar shows you, the actual difference between that given variant and the control variant. Right? The the black bar in the middle is the delta between the variant and and the and the control variant. So you can see that in case of the test one variant, we actually have a regression here. So the control is at 0%, and the test one variant is kind of like minus 13%. So that's bad. Right? Like, that's a that's a regression. We have worse conversion rate for the test one variant compared to the control variant. Now for the test two variant, we actually see an improvement. So the delta here is plus 6.92 compared to control, and that's why this bar is in is in green because it's actually gonna be improvement. So that's what the the the black vertical bar tells you. And now now onto the edges onto the edges of the actual bar. So this is like a credible interval. So what this bar tells you is actually the uncertainty that you have because, in any kind of statistical testing, there is some sort of uncertainty. And this like, the the the outer boundaries of these, credible intervals tells you, what kind of range in the actual results you may expect. And this basically tells you that, this credible interval, goes from minus 3% to plus 70%. And that means that in ninety five percent of the cases, because this is a so called 95% credible interval, you can expect the true value to lie between, like, this range. So there is still some sort of small probability that there will be a regression, for the test two variant even though, it's kind of like there's a high probability that it will be, some some sort of improvement. The narrower a credible interval is, the higher certainty you have. Right? Because, it's kind of like a tighter range of values where the actual value may lie. The wider it is, then it's kind of like more more uncertainty. And oftentimes, as you as you collect more data and you, like, keep refreshing results, you can you can kind of, like, observe the variance getting narrower and narrower every day as you kind of gather more data and get gather more more certainty. So that's what these credible intervals bars tell you. It's calculated separately for each of the metrics. And, at post hoc, we use a so called Bayesian statistical methodology, and the two main outputs of the methodology is the credible interval itself, which tells you the the uncertainty of the result. And kind of like the main output is is what we call win probability. So in this case, for this variant, there is an almost 83% probability that this test two variant is actually better than control. And then we show you, kind of like the the significance banner over here. And at post hoc, the the criterion that we use to tell you whether you should roll out a variant or shouldn't roll out a variant is that the win probability needs to be higher than 90% for the best variant. And in this case, you can see that it's actually less than 90%. And that's why for this particular metric, we declare it as not significant because it's less than 90%, which is what this tool tip also tells you. Okay. So now you've just answered, like, my own specific like, this is the biggest question I've had on, like, the experiments that we've ran of, like, what's how do you measure the significance? You know? Because we we've ran several tests, and, I'll get into, like, the specific results here in just a bit of one of our tests. But, like, some of the tests have been like, we saw, like, a it what looked like a positive improvement, but it was marked not significant. So it's like, like, it you know, can we be confident in that result or not? Right. Yeah. Yeah. That's a good point. And, we actually keep, improving this UI, and we want to actually, like, make it clearer as to, like, what all of these numbers mean and when you can expect significant. And it's like why something is significant, why something isn't significant. Just, like, make make all of this kind of decision making clear. So that's that's definitely a a valid point. I I I'll just stick you in the UI explaining it, man. Take it like you flawlessly. You did I was gonna ask that again. I I didn't hear that. I I was gonna say, just stick a video of you inside the UI because you, you you did flawlessly. Oh, nice. Yeah. We might actually do that. That's a that's a great Yeah. Cool. Am I still sharing? Because I think the, Oh, we lost the screen share. Yeah. Oh, okay. I'll reshare. Okay. And then, so just like continuing to the second metric, it's it's exactly the same principle. So each metric is is evaluated in exactly the same way. I mean, the the like, on our back end, there are some differences as to how, different metric types are evaluated. So for example, the second metrics the second metric is a different metric type. Right? So, like, in in the first case, we had a we were measuring funnel conversion. In the second metric, we are actually just measuring the role click count. And, of course, there are some statistical differences as to how this should be evaluated, like, on the back end. And we make sure that, like, we do this properly. But for you as a user, there's really no difference. You basically just look at, the movement of these bars relative to the control variant, and you look at the the win probability. And then the banner will will tell you whether a particular metric is significant or not. You can also dive deeper into any particular metrics. So if you click on details, you will see the actual counts for, for each variant. You can see that the test variant is strongest. Right? So it makes sense that it has the highest count over here. And this is like a cumulative chart, so the the the counts actually stay the same after we we stopped collecting the data for this experiment. Nice. Yeah. You can also see things like exposures for each variant, their means, the delta, things like that. There's also like a like a small cool feature, which is that you can actually view recordings for any particular variant. So the the the power of the POSIX platform is really that we offer multiple products, and they are kind of, like, interlinked. So you can can actually, like, click on on this particular variant and see the recordings, like, of those persons that that that actually, like, sold that variant. And I don't see any recordings here because I'm on my local instance and just, like, using all the data so you can know from actual users. But in Yeah. On actual dashboard, you would actually see recordings, of users where you could actually see how they how they interact with your with your website. Yeah. And and, like, having that all in one has been, like, a significant help for us at Directus. You know, I would it's not like for us, it's it's not stack overflow. It's it's like stack overload. Like, I, you know, I the thought of adding, like, six more tools to your tech stack for your website, is just like a a mess. It's it's like a pain for us. I I don't wanna do it. So and, like, when we integrated post hoc, like, the the analytics for us was, like, one of the one of the first things that we got a lot of value out of, and then we we started diving into the AB testing. You know, as we we kinda shift gears, like, do you have any best practices, your eye on, like like, what to test? You know, obviously, like, you've built this thing. You probably worked closely with with some clients at Posthog. Like, what are people testing? Do you have any best practices to share? Sure. So I would say if you if you are just starting out with experiments, start with something very small. So things like, you know, small changes to your landing page. That will really allow you to, just, like, kind of, like, get, like, how how the whole things works. And, maybe also kind of, like, circumvent some of the gotchas, like, like, why why while you are still starting out. Like, there are, like, several things that you should be aware of if you are implementing AB testing. I'm not sure if if, like I would say, like, maybe there can, like, be on the scope of this of this webinar, but we have, like, section with some troubleshooting and FAQs and, some best practices, when when implementing experiments. To summarize very quickly, I would say the like, one important thing is to make sure that your tracking is set up correctly. So, like, in your code, whenever a user performs a given action, you do actually capture that action so that Pulsar receives that event because, obviously, if we don't receive the correct events, we cannot, provide correct analysis. Now in terms of some in terms of, like, some likely more actionable AB testing advice, I would say start with small changes. Also make sure that, you are testing perhaps only, like, one or two changes at once. Because if you change, like, too many things, let's say, on your on your on your landing page, and then the test is showing, you know, significant outcome, like, you don't really know which which one of those a changes that you've made is actually leading to the improvement. Whereas, if you just like there's, like, small incremental changes, then you would be able to to tell that, kind of, like, more reliably. Another kind of, like, important technical detail is that you should probably use a reverse proxy on your post hoc setup to make sure that the, like, ad blockers are not, like, blocking capturing of the events, which is, like, a common issue, that can be really easily, circumvented with this. We also have, like, proper documentation, on this. Like, in general, if if you have, basically, for for any questions, you can use our search functionality in our documentation, for example, for the reverse proxy. That will explain to you exactly what you you should do to to set up also correctly to to be able to circumvent, ad blockers. Another useful tip is to learn how to actually estimate, your sample size properly. So one thing I I haven't explained yet is that we have this data collection section over here. And what this allows you to do is to is to answer the question how long you should run your experiment for. And, so I'm actually going to show you how this works. So if I click on edit over here, I have this slider which says minimum detectable effect. And this basically says, well, what kind of change in my metric am I trying to measure? And there's, like, a trade off to be made here because, if you are the the way the way sample sizes and experimentation work is that the larger the change you are trying to measure, the smaller the sample size you need. It may sound kind of counterintuitive. It it definitely. Definitely. Because I've seen this, and I'm like, like, hey. Why why do we need less people for this? Yeah. And, actually, right now, we are actually completely rebuilding this component to to do, like, a better job, explaining all this. But, basically, what this means is that if there is a huge change in your metric, you don't really need, like, a sensitive test for it. Right? You you don't need, like, a, like, a huge sample size because, if there is, like, a huge effect, that effect will already be apparent in, like, a relatively small sample size. But if you are trying to measure something much smaller, like, let's say, I'm I'm just going to move this slider from 10% to 2%, I need, like, a much more sensitive test, which means, like, a much higher sample size. Right? So, like, I I basically need, much bigger sample size to be able to reliably say, this 2% change is not just due some sort of chance. It's actually due to the actual change in the in the underlying behavior. But I definitely need, like, a larger sample size for this. So Right. The consideration some some some key considerations here is that are that, first of all, what is the sample size you can actually get? Right? If you if you are a startup and you're just, like, getting your first users, it might be actually difficult to get a sample size of 10,000 persons. So in that case, you are basically restricted to much smaller sample sizes. And Right. That actually means that your tests would actually would probably have to target, you know, just like large changes. You wanna go big or go home at that point. Exactly. Yeah. But as soon as you have some larger user base, you can start going after, you know, incremental small changes, that perhaps produce, smaller effects on your metrics. But you can you can run, like, many of such experiments and, you know, even incremental changes or or of one or 2% over time. They they really add up to a lot. So, yeah, there's, like, a trade off to be made here, when it comes to this, so it's it's it's good to be aware of that. Yeah. Well, awesome. Yeah. Thanks for the best practices, man. Thank you. Like, yeah, I'm I'm learning just as much on this as as the audience, so I appreciate that. We'll jump into, you know, kind of, like, our own experience a little bit, and then I'll, kinda run through the steps of of integrating post hoc and direct us and and, again, show you guys the the source code. We'll we'll dive into it. We won't write code. That didn't work out well for me the last time I, I did one of these live sessions. But, as far as our own, like, experience with posthog, you know, recently, we rolled out a brand new version of our home page. And I'm gonna can we see this? Oh, yeah. There we go. So this was a big change for us. And, you know, this one, if I shrink it down, it's probably a little better. But this was a this new homepage was a big shift for us. And one of the things that we wanted to do was we wanted to test first to make sure that, number one, the messaging was wasn't causing a a decrease in conversions. Number two, you know, we wanted to make sure that that this was performing better than our our old home page. You know, we've got this interactive carousel component that basically links into, what we call our directus pizza demo, which is just a a live working instance of directus. So folks can hop in and and poke around inside one of the templates. Before we we shifted all that traffic, right, we wanted to make sure that this was actually worthwhile. So our own results that we saw got a fancy slide up here somewhere. Boom. Yeah. So, the conversions were were relatively the same. And, again, that goes back to kinda your eyes point about, like, what metrics are, you know, how do we measure significant change? But some of the big results that we saw was, like, a 30% decrease in bounce rate on the site, which is huge. And, obviously, that correlates with a, like, a larger session time, most likely because people are getting in the demo of Directus, at least that's our hypothesis, and and actually poking around, which is which is what we want. And and I know you guys at Posthog, you're I, you guys are are kind of following that same methodology of, like, you know, let's let's skip all of the, fluffy marketing stuff and actually get you into the product, so we can actually dive in and and learn. Alright. So let's actually dive into a this integration. And I put together just, like, this a really nice visual for how we've been doing AB testing with post hoc at Directus. And that is kind of the the concept behind behind this setup. We've done tests at at two levels, and I call it the block level, which is basically testing within the same page, which is, you know, hey. We wanna test a different headline on the homepage, or we wanna test, a different pricing component or a a different pricing tier. So that would be like a block level test. And then, I I was calling this a page level test, which is basically testing between different pages. You could call it a split test. You guys are calling it redirect testing inside the documentation dry. But it basically like, we we take a URL. We want to redirect some percentage of the traffic. You know, usually, if it's just two variants you're testing, you you probably split fifty fifty. But but that's the way that we've been doing testing at Directus. And now I'm going to show you how this all comes together. So, this is post hoc, and we laid a special theme, special post hoc theme on top of a direct us instance just for this webinar. But this is our CMS starter template with a little bit of magic sprinkled into it. So, if we take a look at what I call the checklist where did that guy go? Supposed to have a production person here, Matt. Not calling you out, but I'm calling you out. The post hoc checklist, can we see that live? Do I have to stop screen sharing to see that? Yeah. There we go. Alright. So this is the AB testing checklist, as far as integrating with posthog. I'm gonna show you how to create a a project in posthog. We're going to dive into, like, creating a personal API key to power this little automation that we've got. We're gonna walk through the directest data model. We'll adjust some permissions. I'll show you the flow that's involved, and we'll talk through, like, this Next. Js front end, and how that is integrated. Alright. So let's get back to the screen share, and we'll do this together. What I'm going to do, and this is a little crazy to do. We are, like, maxed out as far as projects. So I'm gonna delete this test project. This is, sketchy on a demo on a webinar, but, that's what we're gonna do. Alright. So I'm in post all the first thing we've gotta do. Right? We're we're gonna create a project. Just go through this. This is gonna be the AB testing webinar project. Great. Alright. So we've got our project. I'm gonna need two things. I need the project ID. So I can find that up here in the URL, or, let me get my fancy mouse pointer going here. I can find that from these settings as well. So I'm gonna grab the project ID, and, yes, we will send the recording of this. I promise. Alright. So now in the Directus instance, which you are going to get total unrestricted access to you at the end of this, we've got some global set up. So global's inside Directus, are basically, just a what we call a singleton collection. So globals are are typically things like, social links or favicons, logos, stuff that you're gonna use across your entire site. So we're gonna add our project ID. And then the other thing that I'm gonna add, I I don't necessarily need this project API key for the directive side of things. You'll need this on the Next. Js integration. You'll wanna copy it to your clipboard, stick it into your text editor, so that you've got that. But what we're gonna do, we need to go into our personal settings. And the reason why is inside this Directus instance, there's an a nice little automation that will show. Alright. So I'm gonna log in, reauthenticate for security, and we're gonna look for a personal API key. So I'm just gonna create a new key. This is our a b testing webinar key. We want a specific project that's gonna be our AB testing webinar. Whenever you create keys, whether that's in post hoc or GitHub, please be very specific. So we're gonna do right access on experiments and feature flags, and I think this should be all we need. Am I am I correct in that assumption, your eye? Looking good to me. Yeah. Okay. Perfect. Alright. So I'm gonna grab this key. I'm gonna go in, and I'm gonna post that inside this direct us instance. Amazing. Magic. Right? So let's let's talk through the changes inside this Directus instance. Again, this is our simple CMS template. Like, if you go to Directus.io, you go to get started for free, you create a cloud account, You get logged in. You can get the starting point for this, just by clicking CMS, or you can also get it through our template CLI tool. We'll button up all these resources. But this already has what we call the many to any relationship. It's basically a dynamic page builder that is set up inside your CMS. So if I open up my live preview pane, we can see that this page is made up of blocks. Right? And this paradigm lends itself to that block test that I was talking about. So that is kind of the setup here. The, extra collections that we've added to this direct to census, which are very minimal, are are just two pieces. Right? We have added experiments and experiment variants. And the reason why we add those inside direct us, we need to be able to link the content inside the CMS to the post hoc experiment. And this is it gets back into the why we created this. So we want to empower our marketing team, our content editors, to run tests, right, without code, without bothering the developer, without it being blocked by the developer. Right? We want marketers or well, this is my personal mission. I want developers and marketers to get along well. And if you are waiting, on information from marketing to set up the actual code for an AB test, not great. Likewise, if they have to bug you every time they want to test a new variant, that's gonna frustrate you as well. So, what we do, we've created a a experiments collection. And inside that, pretty simple. We've got a name for the experiment. We've got a feature flag key that you'll see we actually need, inside post hub. We've got a a short description. We've added a type of test. You know, is it a block or is it a page level test? And then we have our variants. So the variants are a relationship to that experiment variants, and this is pretty simple as well. We've got a key for the variant. Each experiment has to have a control variant as your eye talked about. And if you're doing a page level experiment or a redirect test, you need to have a URL. So on the front end, running a test is as simple as this. Right? With those pieces put together, and I'm sure Matt is crossing his fingers behind the scenes right now. Let's do a block level test. Right? So I'm inside Directus. I want to test a new headline. New headline. Home page. There he is. I see I see him in the chat. New headline for homepage. Alright. I stole this placeholder copy directly from you guys, Uriah. We want to let's see if this new headline improves conversion. So, hey, this doesn't totally replace post hoc. This is just a a slick integration to work together with the two. So we're gonna pick the test type. This is gonna be our block level test. We wanna test within the same page. And we'll add the control, and we'll add, just like this new headline variant. Great. Okay. Now with that out of the way, we save. What happens behind the scenes? There is an automation. I love automation. Direct as flows is a great way to build these automations. This is what this automation looks like, and I'll walk you through it really quickly. So whenever I go to create an item inside experiments, we've run this series of operations. We grab our global settings, so that API key, that project ID, and then we format a payload for post hoc. We create a new experiment inside post hoc using their API. Did we lose audio? Can you hear me okay, Yuri? I can hear you, Brian. Yes. Okay. Okay. Alright. I just wanted to make sure. Hopefully, it'll all be in the recording as well. But, then we've got a another it just it little piece of JavaScript here that formats a feature flag payload, and that's helpful for our redirect test that we're doing. And then basically, we stuff all that into post dog. And at the end of this, we return a payload that gets saved inside Directus. So the effect that we've get is a experiment that gets created inside Posthog. We've got a experiment here inside Directus now that we can actually link to, a piece of content. So if I go into post dog, we go to experiments. Check installation, skip installation. Skip or no? Skip. I did not remember that part of the creating a project. Alright. So we could see this experiment here inside post hoc. Let's see. There it is. This is all set up. But now let's go and link this to a piece of content. Right. So we're gonna go back to our home page, and we've got our hero block. So this is the control block. I'm just gonna go down to the bottom. And, basically, we've got a a relationship from this block to our experiment, and then we're gonna pick the variant that it belongs to. Except something wasn't quite right. It wouldn't be a demo that I was doing if everything worked smoothly. Why isn't my variant showing up? Clear filters. Experiment. There is the alright. You got me. Let's clean this up just a bit. I've got tried to get fancy, and I've got, I don't know what level of fancy I got here. But, okay. We'll try this again. Now I'm gonna link this to our new homepage headline experiment. We're going to add this to our control, and now we're gonna add another headline. This is the new headline. Amazing. It's gonna look beautiful. We're gonna link it to that same experiment, except now I'm gonna link this to our new headline variant. So all I'm doing behind the scenes here, nothing fancy. I'm just linking this piece of content to the posthog experiment. On the Next. Js front end, we're making a call. We get this content, And, because we've got posthog integrated, we get something like this. If I hit refresh, right, I don't see two hero images or two hero blocks here. I just see one. Right? And that is because of the post hoc SDK that's set up that is handling all the magic, and you could probably understand why I don't want to do all that magic myself. Now let's see if I can actually trigger you're gonna have to show me, like, a a trick to, like, force some type of, visitor into a variant sometime, your eye. Sure. We can do that. Let's see. This should be put in. Yeah. It's it doesn't seem like I can actually trigger the not triggering the variant here for some reason, through this. But, this is how this is actually integrated. This is like a a block level test. Now, you know, if I swap Can you, can you open your web tools? Maybe we can try to override a flag, for this particular page. Yeah. I I Oh, we don't have to do that. That was up to you. Yeah. Yeah. Okay. So there we go. So if you know, now you could see if I swap the control Right? If I make the headline the control, we could see the difference here. And, basically, the post hoc integration is is pulling that all together. So that is, like, the setup inside Directus. Like, if I wanted to run a page, a a reader direct level test, let's say I wanted to have a a new pricing page. Right? If I go to pricing, we've got pricing to fit every budget here. Maybe I wanna change this. We have new pricing. So we'll just create a new page. Pricing to fit no one's budget, and we'll just raise the prices by quite a bit here. Amazing. Alright. So now I'm gonna hit save as copy in this template, and now I've got I've got two new pages or or, well, one new page, but that's our page. I'm gonna go in. And now if I wanna do a redirect level test, I have new pricing page. Pricing page. We'll do a redirect. So the control, here, we're gonna add this URL. So that'll just be slash pricing. And when you set up the control experiment or the control variant, again, that is the URL that you're testing. It's an important distinction to make. Next, we will add the new pricing page. Great. That's gonna be new pricing. And, again, our direct us flow automation, like, will will bring this home for us and basically create this experiment inside post dog. So we'll just hit refresh. We've got our new pricing experiment, and I can click in and and see the the variance here. And as Yurai showed, there's just a feature flag that backs these. Now what we're doing on the page level tests is we're using the post hog feature flag payloads, to avoid making a extra call to direct us for this information. So if we I think I can get a better view if I click edit here, maybe shrink this back a bit. You can kinda see what's going on here. We've got an experiment type. It's a page level test. We've got a control path, so that's our pricing. And then we have a path that we're going to redirect to. And what happens on the front end if we go to local host 3,000. Now if I try to navigate to pricing, I'm either going to get the control did I I guess we may have to dive into the actual tools here. We'll we'll work that out in a moment. Love giving these demos on the fly. Alright. What is next on the agenda? We'll just look at that really quickly. Alright. Our feature flag test. Alright. So we've got the director side of things. We've nailed that piece. And then let's take a look at, like, the Next. Js side. Right? We want to, like, walk through how this is actually set up and integrated. So a a couple important pieces that you need as far as, like, setting this up within Next. Js. Let's pull this up. Alright. Can everybody see this? Okay. Let me try to close close the terminal a bit here. I I can shrink the the size of the make make the font just a little bit bigger. Alright. So, again, once you download this repo, you know, feel free to to browse through it on GitHub. We'll, again, we'll we'll send you all of this. But let's let's start on the direct side of things. Inside this Next. Js application, there are our fetchers. So we're just using these two. Close. Shrink that. Okay. There we go. Alright. So these fetchers are are basically just communicating using the Directus SDK. What we've got here and the only change that we've made from our standard Next. Js template is just making sure we grab the experiment data and the experiment variant that we've linked to a page block. So this all comes together on our, like, our page builder setup that we've got. And one of the other things that you'll have to do inside Directus, you can fetch that data, but you need to be able to add the data to your permissions. So you've got to make sure that your experiment variance and your experiments are enabled underneath your permissions to make this work inside Directus. That is just as far as getting into best practices, that's one of my Directus best practices. At 99% of my errors are because I didn't set permissions. Right? But we have to add that experiment variant there. And then inside what we call the page builder, there's a a bit of logic here that basically, filters out the blocks. So, this template is set up to run Next. Js server components, so we don't get this flash of content, whenever we enroll someone into a variant. But, basically, we're just checking to see, is this block attached to a variant in an experiment? If it is, we get the feature flag from the post halt client, which we'll look at in a moment. And should we add this block? So if the feature flag is found and the block is the control variant, we'll add that control variant. If the feature flag is found and it's not the control variant, we'll make sure we add that to the, to that block. So, not a huge shift in in the logic as far as working with Directus, just simply matching those up. On the post hoc side of it, and this is all we it's just standard boilerplate from the post hoc documentation. You need to have a post hoc provider. So we just set this up using use client here because this provider is going to go into a shared, like, a layout inside this Next. Js application. One of the important bits, especially for, like, server side rendering is the bootstrapping. So, basically, we're getting all the feature flags on the server side from post hoc and making sure we pass that when we initialize the the post hoc JS client on the the client side. And, Yurai, do you have anything to to kinda add on that bootstrapping side? You know? I I know this was, like, one point that was, a a little bit of where I ran into some issues when I was implementing this. No. Not really. I would just say that this this is really the preferred way how to how to get a feature flex to your client, for a couple of reasons. Because the the other alternative is to actually fetch the feature flags directly from the client, but there is always some delay there. So, you know, you you you may get some usage events being sent without correct future flag information if you do it that way. Right? But if you if you bootstrap your flags, that means you always evaluate the flags on the server, which is actually faster because, the POSOC library actually evaluates, the flags there without having to actually go to the POSOC server. So it's actually faster. And once the the web content is served, you already get the feature flags kind of, like, basically already bootstrapped to it. So, this is is actually what what we always recommend, for our users to do. Yeah. Yeah. Makes sense. Now how you do the bootstrapping, it depends on your specific application. The way that we chose to do it in this specific one. So we've got this post hoc provider. There's a there's a shared layout that I'll I'll show you. This is just how we we use this provider. So inside our root layout component, and this is using the the Next. Js app router setup, we are actually, like, sending this bootstrap data in a header from a Next JS middleware. So we get that here. We pass it to our provider, that sends it down through the client. But the middleware is an important piece. Now you you could do this via a, like, a server component and, like, if you're not doing the redirect testing, I found that that worked pretty well. But, as far as, like, the redirects, you probably wanna do this in the Next. Js middleware. Just this is the best way that I've found to do it. So what we've done inside the middleware, if we get to the actual function here. Right? We get the path name that you're sending, you know, what we're navigating to, and then we're basically getting a distinct ID. So this is just a helper that is somewhere, maybe in a where is that guy? Distinct ID. Yep. So a post log gives stores a cookie. We will try to get the distinct ID for that visitor, that user via that cookie. If we can't find it, we're just gonna create one. Right? And then we will look for some cache data inside the cookie. So we've got, like, a a bootstrap cookie, where we're we're caching this data. But, basically, what we're doing to enhance performance and and make sure that you're not, like, delaying rendering every single time, we've got a flag route set up on the API side, which is somewhere. Posthog flags. So, basically, there's a Node. Js posthog client. We pass that distinct ID to it. We go get all the flags and the payloads, and then we're caching that for sixty seconds. So as the user navigates, this middleware gets triggered, and, you know, we're we bootstrap that data. And then we also use this to handle our redirect, at the page level. So we've got a check for redirect function, which basically looks at that flag data that we have here. So once we fetch all of those flags from post hoc, we're iterating through those and saying, okay. Are any of these redirects that we've set up matching this experiment? If so, then we we send them through. You know, there's a a little bit more fancy stuff behind the scenes, but I I know we're coming up on time with this. Is there I I'm trying to think if there's any other, like, important pieces that I wanted to cover before we turn everybody just totally loose on this thing. I don't think so. Let's see. Where's my checklist? Redirect. We've configured the provider. And I saw where is was it Jobchum? Yes. So the that was a good spot. I figured out why this is not working. Thanks to JobChomp. It the public post hoc API key is from my previous project. So that's where I told everybody to take this down, make sure you copy it, but I forgot to stick it into my EMV. And, yeah, that's why we were having issues. Always love it on the demos. That's that's always fun. Alright. That's it. Let's, we'll open it up for q and a. You're I, you know, while we're waiting on questions to come up, I just wanted to say thank you for for jumping on with us and, you know, at least teaching me how to get get more use out of out of the post hoc side of things. Of course. Yeah. It's a it's a it's a pleasure. It's it's a very nice integration that you that you build there. And, we actually have our own kind of, like, no code experimentation tool. It's still very, like, very much in beta. But, we'll I'd love to get access to that. Yeah. Whoever whoever uses, like, just Bosak can actually, like, already try it out. It's it's, like, not nearly as powerful as as as, like, what direct Directus allows right now as in kind of, like, rearranging blocks and, like like, doing all that. It's basically just for, like, simple simple style changes. But, yeah, perhaps there is also something for us to to learn here. Yeah. Yeah. You know, I like, hopefully, like, coming out of this, we'll have, like, a, like, a how to dev blog post to to put this together. But, you know, one of the things that I I just I struggle to find any, like, linking to CMS examples. So, now that we've got one, this is how an integration could work. Let's let's take some questions here from Steven. Do you have a guide on what kind of traffic numbers you need to do effective testing? Yeah. So maybe I can answer that. So like I said, we we have this sample size calculator, which basically tells you exactly that. Now, that calculation is always tied to a particular metric. Right? So if you are tracking five metrics, but each of those metrics has kind of, like, different, like, usage numbers, as in, like, different number of persons that actually generate that metric. To be to be really statistically rigorous, you actually have to take the metric with, kinda like the smallest traffic and make sure that you actually get enough traffic for that metric. Right? Other than that, it's it's it's already like what I mentioned. The the the larger the change you are targeting, the smaller the sample size. But the smaller the change you are targeting means that, you kind of, like you need to have, like, a more sensitive test, and you need a you need a large larger sample size. That makes sense. Alright. One other question I see from Stefan. How would you set up an AB test for global components across multiple pages like a header? Will it be, like, a new type of test that needs to be set up first? It it depends. Like, any good development oriented question, the answer is it depends. But, you know, if you've got the let me just pull up direct us real quick, and then we'll I'll give the bonus link in just a moment. Where's this at? Alright. So, you know, a basically, inside the direct us instance that that we've shown here, on the page block level, we we just added a relationship to the variance. And, you know, we've got the corresponding logic inside the Next. Js application that basically just says, hey. Post all, give me the variance and then assigns one of those. But you could add the same relationship to other pieces of the website if you wanted to, whether that was, you know, your navigation, like, your navigation items, within the setup. So this, this CMS setup has navigation already built into it. You know, you could potentially link it there. You know, you could do, like, a a hybrid approach inside the code where you, you know, you hard code some of these tests, which are, like, a lot of the examples in post hoc just for simplicity's sake are are there. In our own experience, it's like I I tryna I kind of, like, look at it through the lens of, like, hey. Is this something we're we're gonna do often? Like, do I wanna test header elements often? If so, you know, it might make sense to enable your content editors to be able to do that. If it's, you know, like, a one and done test, you might just, add it to the code and and move on. So, hopefully, that's helpful. Let me throw up the well, I'll just post it here in the chat, if I can. What's going on? The screen share is stuck. Why is this not working? Something's going on. Okay. I can't post the link here in the chat. Matt, if you're around, post this link in the chat for me. My screen is fouling up as it often does on these demos. We'll we'll definitely send this out in a newsletter after the webinar as well, but, there's a repo where you can get all the source code. If you have any questions, feel free to follow-up with us. On the Directus side of things, we are also offering a special little promo. And I can't get this to yeah. Hey. The screen share thing is just spinning for me. So that's where we're at with it. Uriah, thanks for joining, man. I really enjoyed this. You know, this has been a fun project, and I I appreciate your support and your help along the way. Likewise. Excellent. We'll have a recording out for everyone. And with that, thank you, and good night. Thank you, everybody. Good luck. Bye bye.","70c8471c-7ad1-49ad-bcde-86e4370fc40b",[190,191],"f885efad-ff0f-404d-a475-fc8f0319742d","be9d26d5-180f-4a33-8907-1353e1a713e1",[],{"id":133,"number":134,"show":122,"year":135,"episodes":194},[137,138,139,140],{"id":140,"slug":196,"vimeo_id":197,"description":198,"tile":199,"length":8,"resources":8,"people":8,"episode_number":200,"published":201,"title":202,"video_transcript_html":203,"video_transcript_text":204,"content":8,"seo":205,"status":130,"episode_people":206,"recommendations":209,"season":210},"advanced-content-workflows-inngest","1067367779","Join us and our friends at Inngest to learn all about building advanced content workflows and orchestrating automated localization for content in different languages. ","f0228f3a-bc39-4151-b6fd-55594af0f637",4,"2025-03-20","Build Advanced Content Workflows: A Deep Dive with Directus + Inngest","\u003Cp>Speaker 0: With Directus and Inngest. I'm your host on the Directus side, Bryant Gillespie, developer advocate, growth engineer, professional hack and slash developer over here at Directus. Super excited for today. Got a special guest that I'll introduce in just a moment. We should have a good time.\u003C/p>\n\u003Cp>We kinda reworked the slide deck a bit last minute, as I tend to do. But before we dive into introductions, let's just take a look at what's on the agenda. Alright. So we'll go through some super awkward introductions in just a moment. We'll deep dive into what is ingest, what powers ingest behind the scenes, why you would wanna use ingest, then we'll get into why Directus and ingest are a great pair together.\u003C/p>\n\u003Cp>And then we're gonna run through this advanced content workflow after we've, walked through this integration of invincible automatic localization, for your content, which is always a beast. So with that, I want to kick over to mister Dan. Dan, how are you, sir?\u003C/p>\n\u003Cp>Speaker 1: Great. Great, Brian. Thanks for thanks for having me. Stoked about stoked about doing this. Thanks everyone for joining as well.\u003C/p>\n\u003Cp>My name is Dan, and I'm the CTO and cofounder at a company called Jest. And we'll, we'll get into a little bit more about what that is and, what we're doing today and, why you've joined. But, yeah, just thanks for joining, and I'm stoked about the stuff that Brian, Brian kicked off today.\u003C/p>\n\u003Cp>Speaker 0: Yeah. Yeah. And, Abel, like, for for some context and backstory, like, I first found ingest, we were working on a project we were calling Event OS. And, Direct has made it a breeze to set this up, get some APIs to work with inside a Nuxt application, but it's not dissimilar from the platform you use to register for this event. One of the the main challenges was, like, how do we send notifications and, like, how do we set up reminders an hour before an event and still have that be robust, like, cancellable in case we move the event, you know, and all sorts of things that we're gonna cover today.\u003C/p>\n\u003Cp>So that was my introduction to ingest, and frankly, I was kinda blown away. I I don't think if Matt is here in the chat, I was like, man, we gotta get these guys on a partner webinar, and voila, here we are. So with that, we'll just drag the little slide deck over. And what's up next is what is ingest? So I'll kick it over to you, Dan, and, you know, let you dive into ingest a little bit deeper for those who are unfamiliar.\u003C/p>\n\u003Cp>Speaker 1: Great. Thanks, Brian. Appreciate it. So I'm gonna give you a little high level overview today, and we're a technical audience, so have a little bit more of a a technical variant on slides today. So, if anyone needs things a little bit bigger, tell me to bump it up in the chat.\u003C/p>\n\u003Cp>I'll try to respond if there's anything. So yeah. So, basically, what is Ingest? Okay. What you can basically think about it as the at the high level is that Ingest is a workflow orchestration platform that enables you to run reliable asynchronous jobs anywhere.\u003C/p>\n\u003Cp>We're gonna get come back to that in just a just a minute or two. So why would you use ingest for this this task, this need? What ingest allows you to do is to define these step functions. So you're creating workflows directly in your existing code base, use our SDKs, and any logic that you're used to. There's no DAGs or really obscure complex DSLs to learn to build these, powerful functions using regular code.\u003C/p>\n\u003Cp>So they're also reliable by default. So everything that you run with ingest automatically is retries. I think we've probably a lot of people here have built systems where, things go wrong. An API goes down for a few minutes, there's a blip, there's a networking thing, there's a race condition that happens in your database, and all simple retry a few seconds later would solve your problem. So ingest does that by default, and it also includes, something that is called durable execution.\u003C/p>\n\u003Cp>You may have heard this term, and if you haven't, you can think of this as your code is designed to be fault tolerant and survive catastrophic failovers, basically. So I won't get too deep into that topic today, but there's plenty of resources that we can send you after if you are curious. The last thing is that we've built in queuing and flow control right into ingest so that you can go to production confidently. What that means is that there are concurrency management built into the system that you don't need to have run a a worker or a queue or anything like that. You just define it in your code very simply.\u003C/p>\n\u003Cp>You also have controls for multi tenant support, which is another advanced topic we won't get into yet today. But you can just kind of anything that you might have built into some sort of, asynchronous system that you might need is there at your fingertips with easy configuration like throttling, rate limiting, delaying the start of jobs, debouncing, batch processing, dynamically prioritize prioritizing certain jobs, as well as running work in parallel or fanning out to multiple jobs. So that's at the high level, but realistically, okay, what can I build with ingest? So let's just you know, at the highest level in general thing is ingest is a general purpose tool, and you can run any background task. Something simple, just one function, one operation to something that is that is very complex and has to orchestrate between different process.\u003C/p>\n\u003Cp>So you also have the control in your hands. You can control the flow of the logic with if statements, not DAGs, and you can kinda build whatever you want. So I'll give you a little taste of, you know, with this audience, which is technical, and I'm sure a lot of y'all are interested or already users of Directus. You know, what might work in or make sense in your in your situation or what you might be building? So one of the first things that's very hot, sure a lot of you are dabbling or building already, pushing these things to production is AI workflows.\u003C/p>\n\u003Cp>So you can handle the complexity of things like flaky LOM API calls that might fail, things like chaining, building agent loops, calling tools if you're getting that advanced, or do things like human in the loop flows. You know, you're you're proving the work that that, LLM did for you. So this might be a research AI workflow. Go out and get all this data and pull it back and store it somewhere, or you might be creating content. That kinda leads me into content processing, which is where you might be pre or post processing some content.\u003C/p>\n\u003Cp>It might be a blog post or could be static assets that are in your system like images or videos. You might be doing things like translating, resizing, transcribing more to those assets. You might need to kind of create a complex pipeline. So one example here is that you you might be taking data, from your, your Directus system and you might be piping that into maybe a React SVG library and generating visual assets on the fly for your content and then uploading it to where you're storing, your assets. You know, another thing there, like, kind of beyond content is is user journey automation.\u003C/p>\n\u003Cp>Brian said at the top something about scheduling emails with events and and and and that and and whatnot. Within just, sorry, Brian.\u003C/p>\n\u003Cp>Speaker 0: No. No. That was a huge one for me, man. That was, that was, like, my introduction to it, and I think that was, like, one of the one of the first case studies or something I ran across in the docs of, like, hey. Just something as simple as let me sign a user up and then wait a day and send them something Yeah.\u003C/p>\n\u003Cp>Incredibly convoluted in other systems.\u003C/p>\n\u003Cp>Speaker 1: Yeah. And and you know what? You might have this thing where you could some systems might allow you to schedule an email in the future, or you might have a cron based system. But what if you could just, you know, prepare something, do some operation maybe like you're sending somebody you know, you're sending someone an invite, and then that invite email goes out. And then twenty four hours before the event, you might wanna schedule and say, let's render this email.\u003C/p>\n\u003Cp>Might be using, say, React email, which is awesome, and, pulling in data from different sources and sending that person a personalized email twenty four hours ahead. Or you might wanna customize, hey. This user is in this time stamp. So you might wanna, like, have more control sorry, time, time stamp, time zone. So you might wanna have, like, a time aware thing.\u003C/p>\n\u003Cp>So you could use something like another complex tool or, whatever is within an event platform that you're doing or something like Intercom or HubSpot, but that can be either not very controllable. You can't bring in custom logic and code. It's very static. They're crazy expensive, for those purposes. So you kinda do whatever you want here.\u003C/p>\n\u003Cp>Build flows that are scheduling delays, Slack notification, even like things like provisioning resources or provisioning access. So, that's a fun kinda use case. You can kinda build anything triggered with these vents. I'll go through a couple of these other ones because these are kinda secondary. But, you know, if you're integrating your system with a third party webhook, maybe something like integrating Stripe payments somehow into in into your system, or maybe you're using a system like Mux, and you're uploading your video assets to Mux, but you need to do something in your Directus CMS after those videos are uploaded, you can use a webhook.\u003C/p>\n\u003Cp>And you can those events can go to ingest and trigger your functions to run your Directus instance. So you can kind of build more advanced integrations with more control that you might need. And the last thing that happens in a lot of systems is there's always data from one source to another to another place. So you might have just data import jobs. Kind of think about it like you're building ETL jobs or just pipelines or synchronization, but you don't, you know, you you don't need anything like a complex data science tool or, something that a data engineer might use like an airflow or something like that.\u003C/p>\n\u003Cp>So you can build kind of anything in this, but I hope these are kind of ideas that might work with, what you're already doing with Directus. So I'll go from there, and I'll, I'll get to that question in a little bit. We have some time at the end as well. And, I'll go and just show, like, a little bit of functions. You this again is a technical audience, so let's just show some code about how you build an Ingest function and how it works.\u003C/p>\n\u003Cp>So I'll run through this. Basically, what you're doing here is you're creating the Ingest function. It's pretty simple. You define it in your existing code base along with your directus functions, your directus logic and whatnot, your whole system. You can do things like define queuing or flow control right at the top of the function very easily.\u003C/p>\n\u003Cp>So if, for example, you just wanted to run this job, at most 30 times in sixty seconds and you wanted to max out at three retries or 15 retries, you can control these things very easily and the ingest system will just do it automatically for you. You don't need to battle queues and infrastructure for that. You also declaratively define the event that will trigger your function. So in this case, when a blog post is drafted, run this code. And then bay very simply, you define a function right here, which is a handler that has all your code.\u003C/p>\n\u003Cp>If you're thinking about what I talked about before which is workflows, steps, and whatnot, when you're defining ingest functions, all you need to do is learn a couple building blocks which are steps. The main one is step dot run. Basically, you can encapsulate some sort of business logic, some sort of side effect, and you can perform an operation in there. Each time you run code within one of these, it's automatically retried if it fails as per your retry policy. And if this does succeed, if there's any failures later in the function, it won't be retried.\u003C/p>\n\u003Cp>We cache those results and save them. So it makes your function a little durable if you're familiar with that term or fault tolerant, you know, that's the the a la the, durable execution side of things. So, basically what you're doing is you're defining steps and multiple steps to build out your workflow. You can use normal if logic. You can dynamically define steps on the fly.\u003C/p>\n\u003Cp>You do not need to predefine them, which is really great. And that's the building blocks. But what's interesting is that there's more power to these these these basic primitives of steps that you have. So here's one example that might tie back to what we were just talking about, which are that you can pause workflows and wait for approval. So you can or wait for other data to happen, other events.\u003C/p>\n\u003Cp>So you could build things that run, process some, you know, some data, and then wait for maybe an approval or maybe something else in your system to be updated, like a draft or publish status or something, and you can resume your function. So ingest your code will not be running. Ingest basically holds on to the state of your function. And then when it gets that event, it resumes and restarts your function from where it stopped, doing some different kind of magic and whatnot. So then again, you can use that approval to dynamically, you know, traverse and and run different steps.\u003C/p>\n\u003Cp>You can also do things that are a little bit, you know, kind of outside of this this this thing that are that are kind of extending the power, which are making reliable calls to LLMs by offloading those AI calls to ingest servers. You can also do things like what Brian talked about, which are basically use other types of steps like sleeps or delays. So you can delay to an exact time and then resume the code then, or you can delay to an arbitrary number of days or hours or weeks, and then your code resumes. So these are kinda just the the basic building blocks. There's a couple more that I didn't get into.\u003C/p>\n\u003Cp>But if you can learn these couple primitives, you can build a lot of complex logic that also is very reliable outside of box. So, you know, going from here, I'll just take a step back and look at the architecture. And, Brian, how are we doing on time right now?\u003C/p>\n\u003Cp>Speaker 0: We are we're no. We're golden.\u003C/p>\n\u003Cp>Speaker 1: Can you see? Okay.\u003C/p>\n\u003Cp>Speaker 0: Yeah. Yeah. No. I just yeah. No.\u003C/p>\n\u003Cp>We're beautiful. No worries. I I I did just wanna say, like, the hey, like, the the background jobs and and, like, some of the the queuing stuff was what got me in the door. But, like, the the durable execution piece, especially when you're working with, like, the LLM stuff, after you've you've used, like, one of these big models where you could just literally watch credits evaporate when a function fails is, has been a, like, a nice piece for my workflow. So, kudos there.\u003C/p>\n\u003Cp>Speaker 1: Yeah. Thank you. Thank you. Appreciate that. And, yes, to the to the, to the audience, yes.\u003C/p>\n\u003Cp>Using, step dot a I dot infer, if you're running in serverless because with something like Vercel, it can offload it to our servers. So your function stops running. It's not consuming any any compute and it offloads. So that was an easy one to jump in there. Thanks for that question.\u003C/p>\n\u003Cp>Yeah. I'll jump into the architecture because I wanna show you how it works with Directus and how the system is. So basically, at at the high level, this is a little, you know, you don't need to know, like, know everything in detail. I'll try to zoom and see if I can pinch in a little bit. There we go.\u003C/p>\n\u003Cp>So the ingest system comprises a bunch of things that you might typically build or need to to execute this and do this yourself. So and there's much more beyond that. But the fundamental thing is there's an event API that receives data from anywhere, could be from your direct assistance or the webhook, And there's, something that consumes those events and basically, creates new jobs and enqueues them. It stores the state of those jobs. It stores the history of those jobs in case there's a problem or anything or you wanna observe that later, which we'll get to, then it executes those jobs.\u003C/p>\n\u003Cp>And what happens is the ingest knows, okay, I'm running this function and it toasted on your direct instance. Your code is sitting in your direct instance, and it invokes it via HTTP. So it securely just calls it when it needs to, invokes the minimum number of work, and then returns to ingest. And then if it needs to resume or continue, it will make other calls as necessary. So if you sleep, it goes back to ingest, then it calls your server again later.\u003C/p>\n\u003Cp>So this is the basic kind of principle of how all the systems work together and how there's a queue built into the system that is basically pushing work to your system. And that's why it's really key to have the flow controls. You know, you don't want to you wanna set something like throttling because you don't wanna overwhelm and knock down your system or your database in case you're processing, say, a huge backlog of 10,000 or 10,000,000, jobs or tasks that you need to do. So this is really key and also just shows that the code that you write is hosted on your, your machine. It's where you have it.\u003C/p>\n\u003Cp>You keep it along with your existing, direct as code base, which is really great. So I'll, I'll show you a little bit about, like, why do we have a database, what is this dashboard UI. So when you're running these long jobs that are happening behind the scenes, they're often hard to observe. I'm sure that some of you have worked in systems where you're parsing just logs on CloudWatch or something like that. You're trying to figure out what's the status of this job.\u003C/p>\n\u003Cp>Okay. Now we need to chuck it into a database so we can observe this. And you're trying to piece together what happened and where things failed. So we get to that one question in one second. So what we have is we have a dashboard that gives you visibility into the system.\u003C/p>\n\u003Cp>So this is the ingest dashboard and ingest cloud where you can see the the output or status of all the different workflows and functions that you've run-in your system, and then you can expand these to see all the different steps and what might have failed, what have might retried, the outputs of those steps, some metadata that you might pull through like, say, tokens in the model that you might have used to make an AI call or the time stamp when something started or ended. You also can look at inputs, say, okay. What piece of content triggered this? You can debug what actually happened and understand, did this work? You can easily cancel and retry these things or retry them in bulk.\u003C/p>\n\u003Cp>There's a lot of operations you could do, but this is the core of, like, this observability that you have into the system. So in also at the, like, the highest level, you can see broad system level metrics, like what functions have failed. Is my import data pipeline failing at a high rate? What what's happening? I need to look into this.\u003C/p>\n\u003Cp>So it gives you this kind of overall picture of how the system is go is going where it's it might be doing things always in the background. It's it aren't always automatically visible. And you can run many functions and have all different types of, functions and workflows like generating video transcripts or creating chat completions or daily digest. You you just get the single pane of glass of, like, understanding what's running in my system. And so the point of concurrent runs that, was asked in the, in the, in the chat, what happens is basically, like, ingest, can can execute multiple things at a given time.\u003C/p>\n\u003Cp>Depending on your plan and your configuration, you can limit how much concurrency you want. So you can control I only wanna execute 1,000 things at a time, or you might have multiple servers that you've horizontally scaled to handle more load. You can control individually what that is. You can also with the multi tenant controls can can control how many concurrent jobs a given user gets. So you can limit it like that in case you're building that type of system.\u003C/p>\n\u003Cp>But, ingest handles that because it it it does the it actually processes the the work off the queue internally. It can you basically declaratively say, this is the concurrent number of like, amount of work that I wanna run. So that's a that's a great question though. So, you know, basically, to summarize, like, this part, there's why are we here? We're talking about in ingest and direct us.\u003C/p>\n\u003Cp>Right? So tie back a couple things that I that I went over. It's the same code base that you're already running. It's the same server. You get to take right.\u003C/p>\n\u003Cp>Use the ingest SDK to define the functions right in the same code base with everything else that you're using with Directus, and there's no additional compute necessary. And, and, Brian, you got some? No.\u003C/p>\n\u003Cp>Speaker 0: No. I was just jumping back in as we transition.\u003C/p>\n\u003Cp>Speaker 1: Yeah. I'll, I'll I'll, Brian's gonna go in a lot deeper about this, which I'm which I'm stoked for all y'all to see because he's built some really cool things. And and some of the things again, this is fault tolerance, the async workflows, you know, so you get the confidence, automatic retries, and control over these processes. And remember that steps encapsulate these retryable logic. You can bring existing logic, import new libraries, whatever the heck you want, write any workflow.\u003C/p>\n\u003Cp>And one thing that Brian will go deep on this, I think, is super cool about how it works together is Directus, hooks are super cool because it kinda provides this nice link between the direct to system and ingest functions, but I won't spoil anything there. I'll, I'll pass over to Brian to kinda take it from here. But, and then we can go to if there's any other questions, we'd probably go to the the QA afterwards so we can get through some of these things.\u003C/p>\n\u003Cp>Speaker 0: Some of\u003C/p>\n\u003Cp>Speaker 1: the questions might be answered.\u003C/p>\n\u003Cp>Speaker 0: Yeah. Definitely. So I pop the questions into the chat. We'll get to them if we can. I'll take the screen back, and we'll briefly cover what is Directus.\u003C/p>\n\u003Cp>For those, who are joining, who are not familiar, Directus is I I like to call it LEGO for developers where, like, everything that you need to build a digital experience, whether it's an app, a website, automatic content translation, which we're gonna do today, it it's all there for you. And you're just stacking these bricks together a lot like, what Dan was mentioning with ingest of, you know, these primitives that you have, and you could use them to build really powerful stuff. So Directus will give you intuitive UI, which we'll look at in a moment, a nice, like, admin CMS layer. You get instant rest and GraphQL APIs on top of any SQL database, and this is all self hostable. You can also run-in our cloud as well.\u003C/p>\n\u003Cp>And just a little tiny, fine print there of any, should be like most SQL databases. Right? Alright. So why Directus? Why ingest?\u003C/p>\n\u003Cp>Why together? A couple things. Like, Dan already mentioned, you know, the hooks that we have in Directus make this super easy to communicate with ingest. But more than that, ingest gives you that durable execution. To say being able to build these powerful workflows in code is tremendous.\u003C/p>\n\u003Cp>So the first question that, I was asked when I was putting this webinar together, and and this is from our team was, hey. Why why ingest when we already have automations inside Directus? So if you're still learning Directus, we have an automations module called Directus flows, and it is allows you to define a low code, no code automations. You know, when something happens, do this. These flows are great for short lived automations.\u003C/p>\n\u003Cp>Some of the things that that Dan showed in the previous slide where we have the syntax of, like, the step function, Directus doesn't have the ability inside a flow to wait for thirty days and send an email or to, wait for another event inside of a flow, without some complicated logic tying it all together. So flows are great for short lived automations or automations that that don't have super complex logic, and ingest is a great complement to flows. And, you know, in the future, I could even see us having an extension to trigger ingest, functions inside a flow. But, as we get into flows versus hooks versus extensions, as as Dan mentioned, Directus flows underneath the hood once you pull off the the mask here, the the Scooby Doo, I kinda mean, is just using Directus hooks. So, Directus hooks, if we pull up my browser, which is always dangerous since I'm using Arc, Directus hooks are just code that fires whenever certain events occur, and you've got a lot of different hooks at your disposal.\u003C/p>\n\u003Cp>This is using custom extensions, and we'll kinda cover that in just a minute. But at the core, hooks are allowing you to say, hey. When this event happens at Directus, do this. And, we'll we're leveraging these hooks extensively in this integration. So there's filter hooks, which happen before the event is emitted.\u003C/p>\n\u003Cp>So if I wanna run some code to either modify the payload before an item gets created or potentially stop that from occurring, I could do that via a filter hook. And then the ones that we're leveraging today in this workflow are gonna be our action hooks. So when after an item gets created, we wanna do something. In this case, it's going to be sending those events to ingest. Alright.\u003C/p>\n\u003Cp>Any questions on hooks before we kinda dive into this? We've also got, like, an endpoint that we're gonna use, and I I just wanna take a moment to touch on one of the most important pieces of Directus is this extensibility. So if you go into our documentation, which is a a great starting point after this webinar, take a look at the extensions overview. You could customize every piece of Directus. So you could customize the interfaces, like how the data is displayed inside the studio.\u003C/p>\n\u003Cp>But the parts that we're leveraging today are API extensions. The hooks, endpoints, operations are within flows, and, of course, bundles are how we put all these together and distribute them. Alright. So how do we make Directus and Ingest play nice? And this was the part that, I did the heavy lifting on with some help from Dan and his team.\u003C/p>\n\u003Cp>You're still blown away by how easy it is to write ingest functions. It really just kinda gets out of the way. And and once you guys go through this process and set this up and and don't worry. Wait. There's more type of moment here.\u003C/p>\n\u003Cp>We're we're gonna give you the whole code base as the bonus after this, so don't worry about taking notes or screenshots or anything like that. But, once you've got this set up, then you can build really incredible stuff, with the incredible pace, I would say. Alright, so I'm going to open up my code editor here and we're gonna take a look at the first piece of the puzzle which is our environment setup. And for Directus projects this almost always starts with a Docker composed file. So this is the pretty standard boilerplate Docker composed from, our documentation.\u003C/p>\n\u003Cp>I've I've done just a little bit of cleanup here and extracted some of these things out to environment variables versus, just inlining them. And a couple things that I want to note, when you spin up Directus, especially, like, using our standard Docker Compose, which we're using Postgres for the database, Directus will create this, this folder structure for you with the database, the extensions. This template is is just for applying the schema that we've got for this example, but, it will create all of that for you. And for a cleaner DX, I've basically just extracted this custom Directus extension out to the root in a queue folder. So this entire code base, I've I've gone through the trouble of trying to make comments, to to add a little bit of what I was thinking as we went through here.\u003C/p>\n\u003Cp>But all the the rest of this is pretty standard with the exception of a different port. I'm sure Dan can relate to this as you're developing. You've got, like, 35 instances of your product running on your local machine at one given time, so you gotta switch these ports over. The only addition here is the ingest dev server. So the ingest dev server, they've got a a docker image that you could pull.\u003C/p>\n\u003Cp>You can also run this via NPM too. Right? Dan, a node?\u003C/p>\n\u003Cp>Speaker 1: Yeah. Yeah. You can. You can install the binary via MPX or NPM.\u003C/p>\n\u003Cp>Speaker 0: Perfect. Perfect. Yeah. So, you know, we highly recommend Docker if you're working with Directus, but, you know, you could run this, ingest dev server using NPM if you prefer. But I just wanted to point this out like we've got it pointing to our direct assistance, and this is running on port eight two eight eight.\u003C/p>\n\u003Cp>The dev server is amazing for running locally. It's almost, identical to the ingest cloud dashboard, with a few exceptions. And, you know, one thing I do wanna call out, and I think Dan already iterated on it, is your ingest functions, unless you're doing, like, the the LLM infer that that Dan mentioned, these are all running on your same Directus instance whether you're using the dev server or ingest cloud. So I just wanted to call that piece out. Now for the environment that we've got set up here, you could see when you go to production and just has event signing keys or event keys and signing keys so you could keep, things very secure.\u003C/p>\n\u003Cp>For purposes here, we're just running, on the dev mode, keep it lightweight, make it easy to iterate. One thing to note, when we get into the translation workflow, we are using DeepL, so I've just got that environment variable. So when we are configuring a Directus extension, Directus has a extensions SDK that makes this process super easy. I just bang in this command into my terminal, n p x create direct us extension. Make sure you use the at latest tag to to pull that.\u003C/p>\n\u003Cp>And what you're going to initialize, I'll just show you kinda what this looks like, is a bundle extension. MPX Directus create extension at latest. This will ask you to install, and then we will go through and choose a bundle. And this is where my Internet just totally craps the bed apparently. Too many connections at once.\u003C/p>\n\u003Cp>Okay. Yep. So we scroll down, we hit a bundle, and a bundle is just a collection of these extensions. Now once you initialize that, it will look something like this. Where are we?\u003C/p>\n\u003Cp>So I've I've got this in our queue set up. You could see here we've got our Directus extension. It is a type of bundle, and then to add extensions to that bundle you run npm add or npm run add or p npm add, whichever package manager that you're using. So as far as the ingest setup here, let's dive into what that actually looks like. And, again, we're gonna give you access to all of this code.\u003C/p>\n\u003Cp>So at the high level here, we've got a function folder that stores all of our functions. We've got some hooks that we'll take a look at, and then we have this ingest client. So how do we set this up? How do we make sure it connects securely and plays nice with Directus? We are pulling in from the ingest package, and we're gonna skip this little middleware piece, but this create ingest client basically just returning a new ingest client that we can use to invoke these functions.\u003C/p>\n\u003Cp>Now the middleware piece here is rather important. Basically, all we're doing here is some dependency injection to use direct to services, like creating an item, updating an item, sending a notification to our user. So that's an important piece of the puzzle. There's a a couple things like a helper function in here to set the directest context so we can leverage that. And then we're just basically using a singleton pattern for this ingest client so that, you know, we're we're using the same client over and over.\u003C/p>\n\u003Cp>Setting this up and, you know, ingest needs to be able to serve those functions. So, in that we've got a endpoint that we define. So this is a custom direct us endpoint. You know, you can see some some standard, imports here. This is where we're actually going to import those functions.\u003C/p>\n\u003Cp>We'll show that in in just a few, but we're basically defining an endpoint. The ID here is gonna be the route that this gets served on. So, I'm using ingest. If I go to local host eight zero eight eight slash ingest, that's where my functions will be served from. And you can see here when we're defining an endpoint, we get the express router instance, and then we get the direct us context.\u003C/p>\n\u003Cp>And that is what we are calling here or or what we're passing when we set the directest context so that ingest has access to those directest services. Very important piece. If you're setting this up on your own, don't, don't forget that part of it. Last but not least, we'll basically just serve the ingest client here. You can see the syntax we're passing in the client that we've set up, and then we're giving it the functions to serve.\u003C/p>\n\u003Cp>Now the last piece of making these actually talk to each other is the direct us hooks. Right? So, we've got our hooks set up. The famous handler. I see.\u003C/p>\n\u003Cp>Not sure what you mean by that, Bazar, but, I we can unpack that in the q and a for sure. Alright. So hooks. Right? Again, we're just defining these actions that we want to run whenever a certain event occurs.\u003C/p>\n\u003Cp>And I was I was using I I won't say I was using this wrong, but Dan definitely gave me a level up, while we were prepping for this webinar. So, breaking these down. Right? We're using the action hooks inside Directus. So I only want to invoke a ingest function after a certain thing occurs, not not necessarily before.\u003C/p>\n\u003Cp>So if we break this down, whenever a post so we'll get into our data model in a moment, but whenever our post gets updated, we are going to send, ingest an event that our posts were updated and we're gonna pass the the event from Directus and this accountability object from our context, which is basically this is the user permissions and, like, the user ID and things like that. Is this user going to have access to actually update that post, after we do the translations that we take a look at. So\u003C/p>\n\u003Cp>Speaker 1: one\u003C/p>\n\u003Cp>Speaker 0: of the things that I I do wanna recommend as you go through this, mirror the event names inside your ingest functions from your direct us, events. So you could see this syntax here. Basically, I've just added direct us in case we've got some other service that we want to send events to ingest from. But here, I've just mirrored the syntax and the reason why, and Dan schooled me on this, is it makes it easier to debug and trace these things through your application, especially if you want to trigger multiple ingest functions on a post update or when a post gets created. Alright.\u003C/p>\n\u003Cp>So the last piece of our puzzle. Right? We've added hooks. We will create our ingest functions, and we'll take a look at that. We'll share this amazing slide deck at the end of this as well.\u003C/p>\n\u003Cp>But we've got a full guide if you've already got a direct us project and you just wanna walk through integrating this. We've got a full guide that we've written on our documentation for you to check out as well. Alright. So, Dan, I'm gonna bring you back. This was your diagram.\u003C/p>\n\u003Cp>You know, maybe maybe kinda touch on how this works a bit.\u003C/p>\n\u003Cp>Speaker 1: Yeah. Yeah. So I think, Brent really laid it out really well here in that. He just showed where with the extension and direct us hooks, that's where we're we're hooking to these actions and just broadcasting basically events over to ingest. So So those events, are basically declaring what happened within the direct to simple, system.\u003C/p>\n\u003Cp>And then our ingest functions, will declare when they run. Right? We we saw that before when I walked through some code. You declare when something happens, this is when I wanna run. So instead of directly invoking jobs, this allows you to decouple and also build things independently and do things like fan out, as Brian alluded to.\u003C/p>\n\u003Cp>So the ingest system basically understands what your function, your workflows, want to do when, and it basically routes them through back to to, to your server. So at the highest level, that's how we're stitching together the hooks with events ingest and then back to the back to the system to to call your code.\u003C/p>\n\u003Cp>Speaker 0: Beautiful. Beautiful. I I love the love the diagram, and it like, this was a a big reason why we shifted to this format to make it feel a little more interactive, a little more fun. So all that said. Right?\u003C/p>\n\u003Cp>Now to the main event of, like, hey. We we've got this set up. These two are talking together. How are we going to build this advanced workflow? What are we gonna build?\u003C/p>\n\u003Cp>So we're gonna show you guys content translations that are automatic and, of course, durable, and I will say totally unbreakable because I know I've written a lot of the code, but but very, very durable, and, like, an incredible workflow for, anybody on your content team that needs to handle translations. So if you've worked with content translations and other systems, this GIF probably holds true. It is so much fun, probably involving, like, a lot of spreadsheets, a lot of working back and forth with either different contractors or different team members. Directus makes that whole process a lot easier. We've got a beautiful interface for it, and, I think Matt on our team is gonna kill me for saying beautiful intuitive interface.\u003C/p>\n\u003Cp>But let's dive into why this why this sucks. Right? We're gonna do this manually as kind of the first step that usually involves those spreadsheets, then we can graduate to APIs. The one we're using today is DeepL, but, you know, Chat GPT and some of the other LLMs have gotten incredibly good at translations. Kind of a toss-up there between speed, DeepL, I've found is is really quick, and, it seems to be highly accurate.\u003C/p>\n\u003Cp>But as you get into that, especially when you're translating a lot of content, you run into, hey. This is gonna take a while. And as Dan alluded to, I've already ran into this, like, 35 times through the, building of this thing is, oops, we hit the rate limit of the APIs or one of those API calls fails. What happens? Right?\u003C/p>\n\u003Cp>Then we lose not only our data, so, you know, we just hope and pray that there's some logs that we can go back and get some of that data, from their side or, you know, just evaporates. So, this is what we seek to fix inside this workflow. And before I show you Directus and Ingest, I want to take a look at our our data model at a high level. Now, this is a simplified example, and I guarantee you when you go to production, you've got a lot of content that you're gonna translate that it probably has a more complex structure than this. But, I do wanna give you this, like, again, we're gonna give you the whole code base.\u003C/p>\n\u003Cp>You could take this and run with it, and so this starting point will make it easy for you to adapt to those, those more complex models. At the high level, we've got a post. The post has a title, a slug, and some content. Now, there's also a languages collection inside the Directus instance. Whenever you use our translations interface, we will create that for you if you don't have it already.\u003C/p>\n\u003Cp>The format is pretty standard. We have a code like in US, the name English direction, and then one of the additions I've added here is just a DeepL translation code because their API, doesn't necessarily follow the standard ISO codes. And then for each, post that we're gonna translate we have a relationship to a collection or a SQL table called post translations. So within that we have a pointer back to our language and then we have our title, slug, and content. So at a high level that's the data model we're working with.\u003C/p>\n\u003Cp>Let's kind of pull this up and and see what this actually looks like. Alright. Dan's gonna laugh at me here. I'm sure to mess up Arc here. Still haven't mastered the side by side in Arc.\u003C/p>\n\u003Cp>So over here on the left, I've got our, ingest dev server up and running. If I go in here, you could see I I don't have any runs yet, But over here on the right is our special themed version of Directus just for the ingest webinar. I really dig the the black and green vibes that you guys have on the website, Dan, by the way.\u003C/p>\n\u003Cp>Speaker 1: Same. Okay.\u003C/p>\n\u003Cp>Speaker 0: Nice. Alright. So, if we take a look at this, right, this is a beautiful gosh. I gotta stop saying beautiful, Matt. Intuitive interface for all of our content editors.\u003C/p>\n\u003Cp>We could see the default language up here at the top. So we're writing this in English. We're gonna pass that to the DeepL API. When we send this translation over, you know, I could set this and and write in whatever default language that I want, and we'll handle the translations. Directus gives you this beautiful side by side view for your translations, and we can see that all of these different languages in our system are fully translated.\u003C/p>\n\u003Cp>So, the flow that we're gonna set up, and I'll I'll walk through this in a minute, is we're gonna take any post anytime a a change happens or we create a new post, we're gonna fetch all the languages that we have that we wanna translate content for, and we're gonna go and actually translate that. Beautiful. Alright. So let's take a look at this invincible translation workflow. And usually I do this kinda in code, at the top of my document.\u003C/p>\n\u003Cp>I break this down. Since we did this format, I wanted to make it a little more visual, and I'll still walk you through the actual code for this. So at the start of this, what will happen and what I left off of this diagram is the user event. Right? User creates a new post, and then that kicks off the process here.\u003C/p>\n\u003Cp>So we're gonna send the post ID, we call it a key, to this workflow. And the first step is gonna be normalizing these event keys because the Directus, event emitter, like if we do post dot update we get an array of keys versus like post dot create we just get a string for the key. So we're gonna normalize those keys. We'll check and see if relevant fields have changed. So if nothing has changed on an update, doesn't make any sense to actually build these translations again.\u003C/p>\n\u003Cp>If something has changed, we're going to retrieve all the translations for that existing post, and that's an important piece. Again, you know, we don't wanna translate content if we don't have to, if it hasn't changed. This is gonna be using, Directus item service, so we'll dive into the code in just a moment. We have, we'll get all of our available languages via the direct Us item service again. We'll use both of those to build a list of translation items.\u003C/p>\n\u003Cp>So what do we actually need to translate? We'll fire those off using, the ingest step functions to the DeepL API and we're gonna do that in parallel and we'll also loop over those so we get that nice tracking and observability. We'll log any errors and then when we get that back we're going to upsert into the database and potentially notify the user. So that's the flow at a high level. Let's take a look at the actual code.\u003C/p>\n\u003Cp>And, Dan, if I gloss over something or I miss something on the ingest side, definitely call me out for it.\u003C/p>\n\u003Cp>Speaker 1: Will do.\u003C/p>\n\u003Cp>Speaker 0: Perfect. Alright. So, again, you can see here, this is, these are the two hooks that we're using to actually manage this workflow. So whenever an item gets updated, we send ingest, hey. We updated this, or we say, hey.\u003C/p>\n\u003Cp>We created this. And then our function looks like this. And, this is not the shortest function I've ever written, but definitely not the longest. I'll say that. So, again, an outline of the translate workflow, what I found is super helpful for me is just to quickly outline the logic at the top of each one of these.\u003C/p>\n\u003Cp>And if we go through, we could see that we're importing that ingest client. We've got the DeepL API, just their node client that we're gonna use, and we've got some types that we're pulling in to, make the TypeScript compiler TypeScript gods happy. So, we've got some translatable fields here. Again, it just defining some constants. These are the only fields that we wanna translate.\u003C/p>\n\u003Cp>You could easily set this up to be dynamic, and just defining some of the the DeepL params that we're gonna use in our API call. So when we get into the meat of the ingest function here, you can see we've got an ID. We've got our name just describing what this is. And then, you can trigger an ingest function, on any number of events. So the standard syntax here is an object with an event property, but if you want to have multiple triggers for this function, you could just pass an array of objects.\u003C/p>\n\u003Cp>Great. So onto our handler function, we're getting the event and the step, which is standard syntax and Jest is giving us that. And then we're also pulling out this direct us context that we added through that middleware. So through that, we're gonna get our services, we're gonna get our schema, our EMV variables, which is gonna give us the DeepL API key. So the first step in this function is normalizing those keys.\u003C/p>\n\u003Cp>You could see all of that code here. What's notably missing is the step functions that we saw earlier. And originally, I had these wrapped, but, a a nice little tip that Dan gave me is if this, the code that you're running doesn't actually mutate external state or depend on external state, you don't necessarily have to wrap it in a in one of the ingest step functions. Next up, we'll get our payload from the event. We'll check that to see if we have any translatable content included.\u003C/p>\n\u003Cp>And if we don't have any translated translatable content, then we can just return early. Right? Next, we'll get our translator. So this is the DeepL client. We'll get our schema from Directus, and we're gonna init these item services.\u003C/p>\n\u003Cp>So this is how we talk to the database, on the Directus side of it. There's just a little helper function down at the bottom of this file that that makes that a little less verbose, and then we run into our first step function. So here, we're going to get the current post, and this is just a, a service call by the post service. So we're gonna read by the query. We're gonna look for the post with the ID that we were passed in the event and return not just the root level fields, but also the translations that are attached to that post.\u003C/p>\n\u003Cp>So if we look at that in, like, the Directus UI to give you an example, we're not only gonna fetch this information, we're gonna fetch all the individual translations. And that is one of my favorite features of Directus is, being able to fetch the data that I need in a single API call. So I could go deeper into this if I wanted to, you know, three, four, five different levels. Eventually, you'll reach a max where you you don't wanna go, but, depending on the data that you've got, using these asterisks as a wildcard is incredibly helpful for local development. Now, going further, again, we're just going to have a step function that, will cancel this whole thing.\u003C/p>\n\u003Cp>If we can't get the languages from Directus, we don't know what to translate. And this was a a last minute addition. And, Dan, could you talk about, like, the retry logic just a little bit?\u003C/p>\n\u003Cp>Speaker 1: Yeah. As Ingest handles, errors automatically and does retries, some errors you might anticipate and say, this is a non retryable setup. So if I'm missing the API key, might as well not retry it because it's just not gonna work. So in that sense, you, ingest allows, includes a custom function, basically, which allows ingest to say, you know what? Let's stop here and not retry anything else.\u003C/p>\n\u003Cp>So that's what Brian is using here where this is a nonrecoverable error kinda situation. So but, typically, you can throw errors to customers and whatnot, and, those will all be retried automatically. You can even catch them and handle them however you want as well.\u003C/p>\n\u003Cp>Speaker 0: Beautiful. Beautiful. Thank you, Diane. Alright. Let me try to find where we were at.\u003C/p>\n\u003Cp>Okay. So we've got our languages, then we move on to the next step, which is actually building our translation list. Excuse me, guys. Dealing with six not six kids, but sick kids here at the house. Always a struggle.\u003C/p>\n\u003Cp>So here in this step function, we're basically just building up a list of the translations that we want. Right? So we're looping through all of those, posts that we've got, making sure, you know, we've got the fields that we wanna translate, and then we're basically building an array for those things that we'll we'll pass to the next step as we scroll down, which will be actually translating all these items in parallel. So, here, we're using promise dot all to fire these all at once. And, again, I think hey.\u003C/p>\n\u003Cp>Dan, you mentioned this, like, it like, defining a unique ID for these was not not strictly necessary, but, maybe talk about that for a minute if you don't mind.\u003C/p>\n\u003Cp>Speaker 1: Yeah. Yeah. When you're executing in a loop, it's or in something like here where you're paralyzing with promise dot all. You don't need to. Ingest basically takes this and under like, understands, like, internally.\u003C/p>\n\u003Cp>You could check out though the SDK as well if you're curious about how it works. Basically, it takes the step ID and and and appends some sort of, iterator and creates a hash. So, automatically, it, make sure that if two steps have the same ID, it is it will, like, not they won't overwrite each other conflict. But for the sake of debugging, which I think is a great idea of what Brian's done here is you can dynamically set these keys, in your in your loop, which makes it easier for what Brian will show in just a minute, in the UI. Yeah.\u003C/p>\n\u003Cp>Great.\u003C/p>\n\u003Cp>Speaker 0: Alright. So we go through we send all these to DeepL. We get all of those things back, and then, the final two steps here, we're basically, again, using promises to do all of these upserts. So, when you're using these services in, like, inside the actual directest, like, API endpoints or hooks, there is this upsert, which, if anybody on the Directus team is listening, would love to have the upsert on the SDK. Just put in a nod for that one.\u003C/p>\n\u003Cp>But, this makes it super easy to, upstart content. Super simple. Like, hey. If this ID doesn't exist, we're gonna create the translation here. And then last but not least, we're we want to notify the user, hey.\u003C/p>\n\u003Cp>Your translations are done. So you can go check them out, because this is running in the background. Awesome. So that's the flow. Let's take a look at the UI.\u003C/p>\n\u003Cp>How does this work? Let's, what do we what are we gonna translate? Dan, do you have any thoughts?\u003C/p>\n\u003Cp>Speaker 1: I don't know. You hit someone good yesterday. It was just, like, hello from\u003C/p>\n\u003Cp>Speaker 0: Hello from Dan and Bryant. Alright. So we're going to throw this in. What do we have? This is an amazing webinar.\u003C/p>\n\u003Cp>This is not me. This is somebody in the chat. We're not saying we're amazing. But alright. Here we go.\u003C/p>\n\u003Cp>So now what will happen as soon as I save this, over on the left, we should see the event being fired to ingest, unless I have done something totally wrong. And and just to prove that I'm not pulling any switcheroo on you guys, we don't see any translations there. So immediately after I've created that new post, now we could see, the ingest dev server and I get all this observability, all these steps that ran within the actual flow. So we get all of those steps broken down and you can see here we've got the individual steps within that loop that we use those specific IDs for. And as I go through here, you could see the output for each one of these.\u003C/p>\n\u003Cp>So, I don't speak Russian. Not sure if you do, Dan, or not. No. So there we go. So we could see here's the actual content that's that's getting translated.\u003C/p>\n\u003Cp>There's the slug. There's the title, etcetera, in all the different languages that we have set up. And this it's like seeing this together was, like, when it really hooked for me of, like, okay. Great. All this is running in the background.\u003C/p>\n\u003Cp>I get all the observability. So, like, when something inevitably screws up, which for me is often, if you catch any of the hundred apps hundred hours episodes. But and, like, having all this at your fingertips is incredibly powerful as a developer. And being a a developer that has all this and is able to build a flow like this that will do all the translations for your team automatically, turns you into a hero, %. So that is the flow.\u003C/p>\n\u003Cp>Dan, any anything to add before we kinda jump into queue?\u003C/p>\n\u003Cp>Speaker 1: Yeah. And what's really nice is I think just, you set it up earlier with Docker Compose, but with everything running on your your machine, you can work and iterate quickly on this flow and not have to worry about, like, conflicting with, you know, bumping into shared resources. Like, if you're using something like SQS or something like that on Amazon, you need to provision those things. It becomes a little bit of a nightmare. And what also is nice here is, like, you know, Brian's code works perfectly as we see.\u003C/p>\n\u003Cp>It's all green. But if there was an error, you'll be able to see that span go red. And what's nice about the dev server flow is that it saves the input of your function. So if Brian were to go back to his code base, fix that bug, save it, the dev server would basically you know, the his direct to server would would refresh, reload, and you could click rerun in this dev server, and it would just rerun the function again. So if he, you know, if you hit rerun live, it should just it should just work.\u003C/p>\n\u003Cp>And so this gives you, like, this fast feedback loop. So you're, like, in this kind of hot reload situation where I'm working on my I'm tweaking I'm tweaking. So instead of you having to, like, go to the right, manually click a bunch of buttons in the in the Directus UI, you can have a fast feedback loop. So, like, do it once in there, keep going, you know, tweak the output, look at things, tweak maybe prompts or different things that you might be using, to to create this. So at least, like, this allows you to kind of, hopefully move a lot faster when you're building these these things that can be complex.\u003C/p>\n\u003Cp>Speaker 0: But, yeah, I can't stress that enough. Like, the the speed at which you could iterate with the dev server and, direct us being able to, you know, go in and quickly model a feature and idea, and then also being able to, like, prepare that for scale using Ingest is, again, a great pair. Just, works really well together. Alright, guys. So if we move to our amazing slide deck, it is now time for Q and AO.\u003C/p>\n\u003Cp>If you guys have any questions in the chat that we wanna take a look at, Dan, do you spot any that we need to\u003C/p>\n\u003Cp>Speaker 1: I did see a couple questions I could talk to. The first thing I could do was a couple questions on self hosting for Jess side because we we know we can, self host direct us. Right? And I'm sure a lot of people do that as well.\u003C/p>\n\u003Cp>Speaker 0: Yeah. Self hosting is incredibly popular for Directus.\u003C/p>\n\u003Cp>Speaker 1: So I'm sure I'm sure there's a lot of people in the audience that are very curious about self hosting ingest as well. And you can self host ingest. The code that, Brian showed that runs in the DevServer, that binary is the exact same binary that you could self host. You can also you can run it in a very lightweight version or you can offload. Ingest has queuing and state history involved like that it that is backed by.\u003C/p>\n\u003Cp>So when it's running locally, it just runs in memory because it's low volume and it's very simple. But when you self host it and you deploy it into your own cloud or wherever you want, you can hook it up to an existing, instance of Postgres or you can also, plug it out and and connect it to a dedicated Redis, maybe running in another container or something like that. It can handle a little bit more scale and handle, like, you know, restarts of your ingest system. So, you know, you can self host. There are certain things that aren't in self host yet, like, some observable observability and metrics.\u003C/p>\n\u003Cp>A lot of those systems were built in ingest cloud. We're gonna be, you know, kind of bringing some of those things down to, to open source as well. But now, you know, all the key features, all the throttling, flow control, defining functions, are all are all there. So you can run that wherever you want and and and self host.\u003C/p>\n\u003Cp>Speaker 0: Amazing. Got it. So I on the directed side of it, of course, like, you could self host, we've got a BSL license, which basically, a free to run for anybody under under $5,000,000 in total finances or revenue. So So if you have questions on the license, definitely reach out to our team about that. You can do that through the website.\u003C/p>\n\u003Cp>What other questions do we have? I think there was one that I saw that, was super helpful. I like, comparing I like, when I first came into Ingest, I'd heard of Temporal, worked with it a bit. I can't find this one in the chat now, but, like, how do you guys stack up against temporal, Dan?\u003C/p>\n\u003Cp>Speaker 1: Yeah. Yeah. We that's a I think it's a great question. You know, especially with the term durable execution, temporal's, you know, in its essence, describes itself as a durable execution engine. So it is dead focused on a, what we believe is just like the durable execution of the function.\u003C/p>\n\u003Cp>Actually, the logic. Right? Is when something fails, it does checkpoints and it and it retries. So in that sense, there there is similarities. Right?\u003C/p>\n\u003Cp>But we consider that durable execution is just like a means to an end. Right? It is it is a feature. It is not the whole platform. So what ingest really, layers on is a couple things.\u003C/p>\n\u003Cp>We have an event based approach as Brian showed with the hooks, so you can fan out and you can replay and do more things, have a little bit more flexibility with events. And if you're someone who likes events, like, I'm sure that that resonates. And if you haven't, give it a try. And all the flow control and advanced queuing is one of the things that is unique to ingest and, is is in this self self hosted open source version as well is all this reliable flow control. So when people are building these systems, often you don't just wanna execute a job and run it to completion.\u003C/p>\n\u003Cp>You need to manage how fast it's processing, this job, you know, how many times per minute you might run something. Maybe I wanna delay, I wanna debounce something, I wanna rate limit this job, run things in batch processing. Maybe just dynamically say, when there's 20 posts that have been published, let's execute this batch instead of saying, you know, let's just push these 20 items in one big blob. So there's a a a lot of differences in that sense of, like, what we've built around. And I think one of the things that also is true with the ingest SDK is it doesn't mess with the runtime.\u003C/p>\n\u003Cp>If you have used temporal, you in the TypeScript or JavaScript, you know, SDK, temporal does something where they kind of, like, wrap your logic and certain things like random doesn't work. So there's some gotchas that's like, I don't know what's going on with this runtime or it's going to wrap some of your things and you might not it might just not be native code, but we've fundamentally chosen to build ingest, say, anything that you're using works. It's very easy to look in the source and see where things are running. It's very it aims to be a very thin layer, so there's no weird kind of, like, gotchas and things that I need to know. And, generally, like, you know, ingest is very dynamic and defining steps and everything is very fluid.\u003C/p>\n\u003Cp>So there aren't, there's a lot of friction to, like, the rigidity that you might find with other solutions. So I those are just a few things. There's many more, that you could check out, like, on our site and whatnot, but or ping me afterwards if you if you if you're curious.\u003C/p>\n\u003Cp>Speaker 0: Yeah. And I'm not sure that you could say this, but, like, the the syntax that that I found, like, writing the same sort of thing in in jest is dramatically, easier, and it just jives with the way that my brain works versus, like, some of the verbosity and, like, just how temporal structures things. So, do we have any other questions? I I guess that's gonna sting a bit if we have temporal onto one of the partner webinars as well. But\u003C/p>\n\u003Cp>Speaker 1: I, what is called? I'll mention one someone asked a question about, retries. What happens when something hits hits max retries? The function will be declared as failed. And what, ingest also allows you to do is basically say, you know, if there's a complete outage, say, DeepL's down for twenty four hours or or an hour, all your functions are failing and all the retries are exhausted, you can use ingest to say, select, you know, between these two time stamps, anything that failed, replay them so you can do bulk retrace, retries, we call replays of, of those functions.\u003C/p>\n\u003Cp>So you can recover from systems because we persist all the inputs, you know, if that's helpful.\u003C/p>\n\u003Cp>Speaker 0: Definitely. And then, I think probably the last question before we wrap up is, can you run webhooks in the ingest server, the dev server?\u003C/p>\n\u003Cp>Speaker 1: You, you can. You'll have to configure your webhooks in in in in I guess it depends on, like, how you're how you're running things. So, Ingest Cloud, it has webhooks and and transforms. And locally, you just need to write a little logic to just, like, wrap that transform to simulate what, what is happening. We'll be bringing in in the future a synchronization, some thing with Cloud to make that a little bit more easy and bring them into the DevServer.\u003C/p>\n\u003Cp>But fundamentally, the webhooks are just the same API endpoint that ingest dot send is using, that Brian showed in the in those, in those direct hooks. So it's just sending JSON payloads, and that's what, what webhooks primarily are. So, it's, it's pretty easy to to utilize it and build around. There's also a few different docs on our website about that if you're curious about webhooks.\u003C/p>\n\u003Cp>Speaker 0: Yeah. Awesome. I don't I don't see any other questions from the team. We're a little bit over, Dan, but I I I man, I appreciate you coming on. Like, before we kinda get into the awkward outro, I do want to just reiterate, like, for anybody still here, if you've registered for this, we'll hit you with an email with all the links to the repo, this amazing slide deck that we put together.\u003C/p>\n\u003Cp>So don't worry about that. That'll be coming in the next, couple hours, so just be patient. But wrapping this thing up, we we hope this was helpful for you guys. Please send us your feedback. We really enjoy doing these webinars, showcasing other tools and, things that that help you build faster.\u003C/p>\n\u003Cp>And, Dan, thank you for joining. Really appreciate the, you know, the collaborative effort on this thing. Learned a ton about it. And, for everybody else who like, next steps for you guys, what what does that look like? You know, you wanna sign off a little bit?\u003C/p>\n\u003Cp>Speaker 1: Yeah. Yeah. Thanks for thanks for having me, and, thanks for doing all the leg work. You wrote all the code. You built all of it.\u003C/p>\n\u003Cp>So it's, it was pretty awesome to see this, and I think it's a great example. I think if you really wanna figure it out and you want some more, there'll be the follow-up email. Brian also has a really great tutorial that he put together, which has a lot of detail. It's pretty incredible, but definitely check that out. I'm sure that'll be in the follow-up email.\u003C/p>\n\u003Cp>And, then also, I'd say, like, if you just wanna tinker a little bit, go with ingest, go check out, one of the quick starts on our docs and just go tinker a little bit with the dev server, build some different things with some dummy code, and then kind of, that's easy way to just kinda get started, and then you can kind of dive in for the in-depth stuff with Directus using, using Brian's tutorial that he wrote. So thanks for everybody joining. And if you ever have any questions, we, we do have a Discord community. You can find the link on our site, or you can always reach out to us, contact us anytime with ingest dot com. That's 2, by the way.\u003C/p>\n\u003Cp>Just always remember that one.\u003C/p>\n\u003Cp>Speaker 0: Two n's. Yeah. Perfect. Alright, Dan. Thank you.\u003C/p>\n\u003Cp>Thanks for the audience. That's a wrap. We'll see you all.\u003C/p>\n\u003Cp>Speaker 1: Thank you.\u003C/p>","With Directus and Inngest. I'm your host on the Directus side, Brian Gillespie, developer advocate, growth engineer, professional hack and slash developer over here at Directus. Super excited for today. Got a special guest that I'll introduce in just a moment. We should have a good time. We kinda reworked the slide deck a bit last minute, as I tend to do. But before we dive into introductions, let's just take a look at what's on the agenda. Alright. So we'll go through some super awkward introductions in just a moment. We'll deep dive into what is ingest, what powers ingest behind the scenes, why you would wanna use ingest, then we'll get into why Directus and ingest are a great pair together. And then we're gonna run through this advanced content workflow after we've, walked through this integration of invincible automatic localization, for your content, which is always a beast. So with that, I want to kick over to mister Dan. Dan, how are you, sir? Great. Great, Brian. Thanks for thanks for having me. Stoked about stoked about doing this. Thanks everyone for joining as well. My name is Dan, and I'm the CTO and cofounder at a company called Jest. And we'll, we'll get into a little bit more about what that is and, what we're doing today and, why you've joined. But, yeah, just thanks for joining, and I'm stoked about the stuff that Brian, Brian kicked off today. Yeah. Yeah. And, Abel, like, for for some context and backstory, like, I first found ingest, we were working on a project we were calling Event OS. And, Direct has made it a breeze to set this up, get some APIs to work with inside a Nuxt application, but it's not dissimilar from the platform you use to register for this event. One of the the main challenges was, like, how do we send notifications and, like, how do we set up reminders an hour before an event and still have that be robust, like, cancellable in case we move the event, you know, and all sorts of things that we're gonna cover today. So that was my introduction to ingest, and frankly, I was kinda blown away. I I don't think if Matt is here in the chat, I was like, man, we gotta get these guys on a partner webinar, and voila, here we are. So with that, we'll just drag the little slide deck over. And what's up next is what is ingest? So I'll kick it over to you, Dan, and, you know, let you dive into ingest a little bit deeper for those who are unfamiliar. Great. Thanks, Brian. Appreciate it. So I'm gonna give you a little high level overview today, and we're a technical audience, so have a little bit more of a a technical variant on slides today. So, if anyone needs things a little bit bigger, tell me to bump it up in the chat. I'll try to respond if there's anything. So yeah. So, basically, what is Ingest? Okay. What you can basically think about it as the at the high level is that Ingest is a workflow orchestration platform that enables you to run reliable asynchronous jobs anywhere. We're gonna get come back to that in just a just a minute or two. So why would you use ingest for this this task, this need? What ingest allows you to do is to define these step functions. So you're creating workflows directly in your existing code base, use our SDKs, and any logic that you're used to. There's no DAGs or really obscure complex DSLs to learn to build these, powerful functions using regular code. So they're also reliable by default. So everything that you run with ingest automatically is retries. I think we've probably a lot of people here have built systems where, things go wrong. An API goes down for a few minutes, there's a blip, there's a networking thing, there's a race condition that happens in your database, and all simple retry a few seconds later would solve your problem. So ingest does that by default, and it also includes, something that is called durable execution. You may have heard this term, and if you haven't, you can think of this as your code is designed to be fault tolerant and survive catastrophic failovers, basically. So I won't get too deep into that topic today, but there's plenty of resources that we can send you after if you are curious. The last thing is that we've built in queuing and flow control right into ingest so that you can go to production confidently. What that means is that there are concurrency management built into the system that you don't need to have run a a worker or a queue or anything like that. You just define it in your code very simply. You also have controls for multi tenant support, which is another advanced topic we won't get into yet today. But you can just kind of anything that you might have built into some sort of, asynchronous system that you might need is there at your fingertips with easy configuration like throttling, rate limiting, delaying the start of jobs, debouncing, batch processing, dynamically prioritize prioritizing certain jobs, as well as running work in parallel or fanning out to multiple jobs. So that's at the high level, but realistically, okay, what can I build with ingest? So let's just you know, at the highest level in general thing is ingest is a general purpose tool, and you can run any background task. Something simple, just one function, one operation to something that is that is very complex and has to orchestrate between different process. So you also have the control in your hands. You can control the flow of the logic with if statements, not DAGs, and you can kinda build whatever you want. So I'll give you a little taste of, you know, with this audience, which is technical, and I'm sure a lot of y'all are interested or already users of Directus. You know, what might work in or make sense in your in your situation or what you might be building? So one of the first things that's very hot, sure a lot of you are dabbling or building already, pushing these things to production is AI workflows. So you can handle the complexity of things like flaky LOM API calls that might fail, things like chaining, building agent loops, calling tools if you're getting that advanced, or do things like human in the loop flows. You know, you're you're proving the work that that, LLM did for you. So this might be a research AI workflow. Go out and get all this data and pull it back and store it somewhere, or you might be creating content. That kinda leads me into content processing, which is where you might be pre or post processing some content. It might be a blog post or could be static assets that are in your system like images or videos. You might be doing things like translating, resizing, transcribing more to those assets. You might need to kind of create a complex pipeline. So one example here is that you you might be taking data, from your, your Directus system and you might be piping that into maybe a React SVG library and generating visual assets on the fly for your content and then uploading it to where you're storing, your assets. You know, another thing there, like, kind of beyond content is is user journey automation. Brian said at the top something about scheduling emails with events and and and and that and and whatnot. Within just, sorry, Brian. No. No. That was a huge one for me, man. That was, that was, like, my introduction to it, and I think that was, like, one of the one of the first case studies or something I ran across in the docs of, like, hey. Just something as simple as let me sign a user up and then wait a day and send them something Yeah. Incredibly convoluted in other systems. Yeah. And and you know what? You might have this thing where you could some systems might allow you to schedule an email in the future, or you might have a cron based system. But what if you could just, you know, prepare something, do some operation maybe like you're sending somebody you know, you're sending someone an invite, and then that invite email goes out. And then twenty four hours before the event, you might wanna schedule and say, let's render this email. Might be using, say, React email, which is awesome, and, pulling in data from different sources and sending that person a personalized email twenty four hours ahead. Or you might wanna customize, hey. This user is in this time stamp. So you might wanna, like, have more control sorry, time, time stamp, time zone. So you might wanna have, like, a time aware thing. So you could use something like another complex tool or, whatever is within an event platform that you're doing or something like Intercom or HubSpot, but that can be either not very controllable. You can't bring in custom logic and code. It's very static. They're crazy expensive, for those purposes. So you kinda do whatever you want here. Build flows that are scheduling delays, Slack notification, even like things like provisioning resources or provisioning access. So, that's a fun kinda use case. You can kinda build anything triggered with these vents. I'll go through a couple of these other ones because these are kinda secondary. But, you know, if you're integrating your system with a third party webhook, maybe something like integrating Stripe payments somehow into in into your system, or maybe you're using a system like Mux, and you're uploading your video assets to Mux, but you need to do something in your Directus CMS after those videos are uploaded, you can use a webhook. And you can those events can go to ingest and trigger your functions to run your Directus instance. So you can kind of build more advanced integrations with more control that you might need. And the last thing that happens in a lot of systems is there's always data from one source to another to another place. So you might have just data import jobs. Kind of think about it like you're building ETL jobs or just pipelines or synchronization, but you don't, you know, you you don't need anything like a complex data science tool or, something that a data engineer might use like an airflow or something like that. So you can build kind of anything in this, but I hope these are kind of ideas that might work with, what you're already doing with Directus. So I'll go from there, and I'll, I'll get to that question in a little bit. We have some time at the end as well. And, I'll go and just show, like, a little bit of functions. You this again is a technical audience, so let's just show some code about how you build an Ingest function and how it works. So I'll run through this. Basically, what you're doing here is you're creating the Ingest function. It's pretty simple. You define it in your existing code base along with your directus functions, your directus logic and whatnot, your whole system. You can do things like define queuing or flow control right at the top of the function very easily. So if, for example, you just wanted to run this job, at most 30 times in sixty seconds and you wanted to max out at three retries or 15 retries, you can control these things very easily and the ingest system will just do it automatically for you. You don't need to battle queues and infrastructure for that. You also declaratively define the event that will trigger your function. So in this case, when a blog post is drafted, run this code. And then bay very simply, you define a function right here, which is a handler that has all your code. If you're thinking about what I talked about before which is workflows, steps, and whatnot, when you're defining ingest functions, all you need to do is learn a couple building blocks which are steps. The main one is step dot run. Basically, you can encapsulate some sort of business logic, some sort of side effect, and you can perform an operation in there. Each time you run code within one of these, it's automatically retried if it fails as per your retry policy. And if this does succeed, if there's any failures later in the function, it won't be retried. We cache those results and save them. So it makes your function a little durable if you're familiar with that term or fault tolerant, you know, that's the the a la the, durable execution side of things. So, basically what you're doing is you're defining steps and multiple steps to build out your workflow. You can use normal if logic. You can dynamically define steps on the fly. You do not need to predefine them, which is really great. And that's the building blocks. But what's interesting is that there's more power to these these these basic primitives of steps that you have. So here's one example that might tie back to what we were just talking about, which are that you can pause workflows and wait for approval. So you can or wait for other data to happen, other events. So you could build things that run, process some, you know, some data, and then wait for maybe an approval or maybe something else in your system to be updated, like a draft or publish status or something, and you can resume your function. So ingest your code will not be running. Ingest basically holds on to the state of your function. And then when it gets that event, it resumes and restarts your function from where it stopped, doing some different kind of magic and whatnot. So then again, you can use that approval to dynamically, you know, traverse and and run different steps. You can also do things that are a little bit, you know, kind of outside of this this this thing that are that are kind of extending the power, which are making reliable calls to LLMs by offloading those AI calls to ingest servers. You can also do things like what Brian talked about, which are basically use other types of steps like sleeps or delays. So you can delay to an exact time and then resume the code then, or you can delay to an arbitrary number of days or hours or weeks, and then your code resumes. So these are kinda just the the basic building blocks. There's a couple more that I didn't get into. But if you can learn these couple primitives, you can build a lot of complex logic that also is very reliable outside of box. So, you know, going from here, I'll just take a step back and look at the architecture. And, Brian, how are we doing on time right now? We are we're no. We're golden. Can you see? Okay. Yeah. Yeah. No. I just yeah. No. We're beautiful. No worries. I I I did just wanna say, like, the hey, like, the the background jobs and and, like, some of the the queuing stuff was what got me in the door. But, like, the the durable execution piece, especially when you're working with, like, the LLM stuff, after you've you've used, like, one of these big models where you could just literally watch credits evaporate when a function fails is, has been a, like, a nice piece for my workflow. So, kudos there. Yeah. Thank you. Thank you. Appreciate that. And, yes, to the to the, to the audience, yes. Using, step dot a I dot infer, if you're running in serverless because with something like Vercel, it can offload it to our servers. So your function stops running. It's not consuming any any compute and it offloads. So that was an easy one to jump in there. Thanks for that question. Yeah. I'll jump into the architecture because I wanna show you how it works with Directus and how the system is. So basically, at at the high level, this is a little, you know, you don't need to know, like, know everything in detail. I'll try to zoom and see if I can pinch in a little bit. There we go. So the ingest system comprises a bunch of things that you might typically build or need to to execute this and do this yourself. So and there's much more beyond that. But the fundamental thing is there's an event API that receives data from anywhere, could be from your direct assistance or the webhook, And there's, something that consumes those events and basically, creates new jobs and enqueues them. It stores the state of those jobs. It stores the history of those jobs in case there's a problem or anything or you wanna observe that later, which we'll get to, then it executes those jobs. And what happens is the ingest knows, okay, I'm running this function and it toasted on your direct instance. Your code is sitting in your direct instance, and it invokes it via HTTP. So it securely just calls it when it needs to, invokes the minimum number of work, and then returns to ingest. And then if it needs to resume or continue, it will make other calls as necessary. So if you sleep, it goes back to ingest, then it calls your server again later. So this is the basic kind of principle of how all the systems work together and how there's a queue built into the system that is basically pushing work to your system. And that's why it's really key to have the flow controls. You know, you don't want to you wanna set something like throttling because you don't wanna overwhelm and knock down your system or your database in case you're processing, say, a huge backlog of 10,000 or 10,000,000, jobs or tasks that you need to do. So this is really key and also just shows that the code that you write is hosted on your, your machine. It's where you have it. You keep it along with your existing, direct as code base, which is really great. So I'll, I'll show you a little bit about, like, why do we have a database, what is this dashboard UI. So when you're running these long jobs that are happening behind the scenes, they're often hard to observe. I'm sure that some of you have worked in systems where you're parsing just logs on CloudWatch or something like that. You're trying to figure out what's the status of this job. Okay. Now we need to chuck it into a database so we can observe this. And you're trying to piece together what happened and where things failed. So we get to that one question in one second. So what we have is we have a dashboard that gives you visibility into the system. So this is the ingest dashboard and ingest cloud where you can see the the output or status of all the different workflows and functions that you've run-in your system, and then you can expand these to see all the different steps and what might have failed, what have might retried, the outputs of those steps, some metadata that you might pull through like, say, tokens in the model that you might have used to make an AI call or the time stamp when something started or ended. You also can look at inputs, say, okay. What piece of content triggered this? You can debug what actually happened and understand, did this work? You can easily cancel and retry these things or retry them in bulk. There's a lot of operations you could do, but this is the core of, like, this observability that you have into the system. So in also at the, like, the highest level, you can see broad system level metrics, like what functions have failed. Is my import data pipeline failing at a high rate? What what's happening? I need to look into this. So it gives you this kind of overall picture of how the system is go is going where it's it might be doing things always in the background. It's it aren't always automatically visible. And you can run many functions and have all different types of, functions and workflows like generating video transcripts or creating chat completions or daily digest. You you just get the single pane of glass of, like, understanding what's running in my system. And so the point of concurrent runs that, was asked in the, in the, in the chat, what happens is basically, like, ingest, can can execute multiple things at a given time. Depending on your plan and your configuration, you can limit how much concurrency you want. So you can control I only wanna execute 1,000 things at a time, or you might have multiple servers that you've horizontally scaled to handle more load. You can control individually what that is. You can also with the multi tenant controls can can control how many concurrent jobs a given user gets. So you can limit it like that in case you're building that type of system. But, ingest handles that because it it it does the it actually processes the the work off the queue internally. It can you basically declaratively say, this is the concurrent number of like, amount of work that I wanna run. So that's a that's a great question though. So, you know, basically, to summarize, like, this part, there's why are we here? We're talking about in ingest and direct us. Right? So tie back a couple things that I that I went over. It's the same code base that you're already running. It's the same server. You get to take right. Use the ingest SDK to define the functions right in the same code base with everything else that you're using with Directus, and there's no additional compute necessary. And, and, Brian, you got some? No. No. I was just jumping back in as we transition. Yeah. I'll, I'll I'll, Brian's gonna go in a lot deeper about this, which I'm which I'm stoked for all y'all to see because he's built some really cool things. And and some of the things again, this is fault tolerance, the async workflows, you know, so you get the confidence, automatic retries, and control over these processes. And remember that steps encapsulate these retryable logic. You can bring existing logic, import new libraries, whatever the heck you want, write any workflow. And one thing that Brian will go deep on this, I think, is super cool about how it works together is Directus, hooks are super cool because it kinda provides this nice link between the direct to system and ingest functions, but I won't spoil anything there. I'll, I'll pass over to Brian to kinda take it from here. But, and then we can go to if there's any other questions, we'd probably go to the the QA afterwards so we can get through some of these things. Some of the questions might be answered. Yeah. Definitely. So I pop the questions into the chat. We'll get to them if we can. I'll take the screen back, and we'll briefly cover what is Directus. For those, who are joining, who are not familiar, Directus is I I like to call it LEGO for developers where, like, everything that you need to build a digital experience, whether it's an app, a website, automatic content translation, which we're gonna do today, it it's all there for you. And you're just stacking these bricks together a lot like, what Dan was mentioning with ingest of, you know, these primitives that you have, and you could use them to build really powerful stuff. So Directus will give you intuitive UI, which we'll look at in a moment, a nice, like, admin CMS layer. You get instant rest and GraphQL APIs on top of any SQL database, and this is all self hostable. You can also run-in our cloud as well. And just a little tiny, fine print there of any, should be like most SQL databases. Right? Alright. So why Directus? Why ingest? Why together? A couple things. Like, Dan already mentioned, you know, the hooks that we have in Directus make this super easy to communicate with ingest. But more than that, ingest gives you that durable execution. To say being able to build these powerful workflows in code is tremendous. So the first question that, I was asked when I was putting this webinar together, and and this is from our team was, hey. Why why ingest when we already have automations inside Directus? So if you're still learning Directus, we have an automations module called Directus flows, and it is allows you to define a low code, no code automations. You know, when something happens, do this. These flows are great for short lived automations. Some of the things that that Dan showed in the previous slide where we have the syntax of, like, the step function, Directus doesn't have the ability inside a flow to wait for thirty days and send an email or to, wait for another event inside of a flow, without some complicated logic tying it all together. So flows are great for short lived automations or automations that that don't have super complex logic, and ingest is a great complement to flows. And, you know, in the future, I could even see us having an extension to trigger ingest, functions inside a flow. But, as we get into flows versus hooks versus extensions, as as Dan mentioned, Directus flows underneath the hood once you pull off the the mask here, the the Scooby Doo, I kinda mean, is just using Directus hooks. So, Directus hooks, if we pull up my browser, which is always dangerous since I'm using Arc, Directus hooks are just code that fires whenever certain events occur, and you've got a lot of different hooks at your disposal. This is using custom extensions, and we'll kinda cover that in just a minute. But at the core, hooks are allowing you to say, hey. When this event happens at Directus, do this. And, we'll we're leveraging these hooks extensively in this integration. So there's filter hooks, which happen before the event is emitted. So if I wanna run some code to either modify the payload before an item gets created or potentially stop that from occurring, I could do that via a filter hook. And then the ones that we're leveraging today in this workflow are gonna be our action hooks. So when after an item gets created, we wanna do something. In this case, it's going to be sending those events to ingest. Alright. Any questions on hooks before we kinda dive into this? We've also got, like, an endpoint that we're gonna use, and I I just wanna take a moment to touch on one of the most important pieces of Directus is this extensibility. So if you go into our documentation, which is a a great starting point after this webinar, take a look at the extensions overview. You could customize every piece of Directus. So you could customize the interfaces, like how the data is displayed inside the studio. But the parts that we're leveraging today are API extensions. The hooks, endpoints, operations are within flows, and, of course, bundles are how we put all these together and distribute them. Alright. So how do we make Directus and Ingest play nice? And this was the part that, I did the heavy lifting on with some help from Dan and his team. You're still blown away by how easy it is to write ingest functions. It really just kinda gets out of the way. And and once you guys go through this process and set this up and and don't worry. Wait. There's more type of moment here. We're we're gonna give you the whole code base as the bonus after this, so don't worry about taking notes or screenshots or anything like that. But, once you've got this set up, then you can build really incredible stuff, with the incredible pace, I would say. Alright, so I'm going to open up my code editor here and we're gonna take a look at the first piece of the puzzle which is our environment setup. And for Directus projects this almost always starts with a Docker composed file. So this is the pretty standard boilerplate Docker composed from, our documentation. I've I've done just a little bit of cleanup here and extracted some of these things out to environment variables versus, just inlining them. And a couple things that I want to note, when you spin up Directus, especially, like, using our standard Docker Compose, which we're using Postgres for the database, Directus will create this, this folder structure for you with the database, the extensions. This template is is just for applying the schema that we've got for this example, but, it will create all of that for you. And for a cleaner DX, I've basically just extracted this custom Directus extension out to the root in a queue folder. So this entire code base, I've I've gone through the trouble of trying to make comments, to to add a little bit of what I was thinking as we went through here. But all the the rest of this is pretty standard with the exception of a different port. I'm sure Dan can relate to this as you're developing. You've got, like, 35 instances of your product running on your local machine at one given time, so you gotta switch these ports over. The only addition here is the ingest dev server. So the ingest dev server, they've got a a docker image that you could pull. You can also run this via NPM too. Right? Dan, a node? Yeah. Yeah. You can. You can install the binary via MPX or NPM. Perfect. Perfect. Yeah. So, you know, we highly recommend Docker if you're working with Directus, but, you know, you could run this, ingest dev server using NPM if you prefer. But I just wanted to point this out like we've got it pointing to our direct assistance, and this is running on port eight two eight eight. The dev server is amazing for running locally. It's almost, identical to the ingest cloud dashboard, with a few exceptions. And, you know, one thing I do wanna call out, and I think Dan already iterated on it, is your ingest functions, unless you're doing, like, the the LLM infer that that Dan mentioned, these are all running on your same Directus instance whether you're using the dev server or ingest cloud. So I just wanted to call that piece out. Now for the environment that we've got set up here, you could see when you go to production and just has event signing keys or event keys and signing keys so you could keep, things very secure. For purposes here, we're just running, on the dev mode, keep it lightweight, make it easy to iterate. One thing to note, when we get into the translation workflow, we are using DeepL, so I've just got that environment variable. So when we are configuring a Directus extension, Directus has a extensions SDK that makes this process super easy. I just bang in this command into my terminal, n p x create direct us extension. Make sure you use the at latest tag to to pull that. And what you're going to initialize, I'll just show you kinda what this looks like, is a bundle extension. MPX Directus create extension at latest. This will ask you to install, and then we will go through and choose a bundle. And this is where my Internet just totally craps the bed apparently. Too many connections at once. Okay. Yep. So we scroll down, we hit a bundle, and a bundle is just a collection of these extensions. Now once you initialize that, it will look something like this. Where are we? So I've I've got this in our queue set up. You could see here we've got our Directus extension. It is a type of bundle, and then to add extensions to that bundle you run npm add or npm run add or p npm add, whichever package manager that you're using. So as far as the ingest setup here, let's dive into what that actually looks like. And, again, we're gonna give you access to all of this code. So at the high level here, we've got a function folder that stores all of our functions. We've got some hooks that we'll take a look at, and then we have this ingest client. So how do we set this up? How do we make sure it connects securely and plays nice with Directus? We are pulling in from the ingest package, and we're gonna skip this little middleware piece, but this create ingest client basically just returning a new ingest client that we can use to invoke these functions. Now the middleware piece here is rather important. Basically, all we're doing here is some dependency injection to use direct to services, like creating an item, updating an item, sending a notification to our user. So that's an important piece of the puzzle. There's a a couple things like a helper function in here to set the directest context so we can leverage that. And then we're just basically using a singleton pattern for this ingest client so that, you know, we're we're using the same client over and over. Setting this up and, you know, ingest needs to be able to serve those functions. So, in that we've got a endpoint that we define. So this is a custom direct us endpoint. You know, you can see some some standard, imports here. This is where we're actually going to import those functions. We'll show that in in just a few, but we're basically defining an endpoint. The ID here is gonna be the route that this gets served on. So, I'm using ingest. If I go to local host eight zero eight eight slash ingest, that's where my functions will be served from. And you can see here when we're defining an endpoint, we get the express router instance, and then we get the direct us context. And that is what we are calling here or or what we're passing when we set the directest context so that ingest has access to those directest services. Very important piece. If you're setting this up on your own, don't, don't forget that part of it. Last but not least, we'll basically just serve the ingest client here. You can see the syntax we're passing in the client that we've set up, and then we're giving it the functions to serve. Now the last piece of making these actually talk to each other is the direct us hooks. Right? So, we've got our hooks set up. The famous handler. I see. Not sure what you mean by that, Bazar, but, I we can unpack that in the q and a for sure. Alright. So hooks. Right? Again, we're just defining these actions that we want to run whenever a certain event occurs. And I was I was using I I won't say I was using this wrong, but Dan definitely gave me a level up, while we were prepping for this webinar. So, breaking these down. Right? We're using the action hooks inside Directus. So I only want to invoke a ingest function after a certain thing occurs, not not necessarily before. So if we break this down, whenever a post so we'll get into our data model in a moment, but whenever our post gets updated, we are going to send, ingest an event that our posts were updated and we're gonna pass the the event from Directus and this accountability object from our context, which is basically this is the user permissions and, like, the user ID and things like that. Is this user going to have access to actually update that post, after we do the translations that we take a look at. So one of the things that I I do wanna recommend as you go through this, mirror the event names inside your ingest functions from your direct us, events. So you could see this syntax here. Basically, I've just added direct us in case we've got some other service that we want to send events to ingest from. But here, I've just mirrored the syntax and the reason why, and Dan schooled me on this, is it makes it easier to debug and trace these things through your application, especially if you want to trigger multiple ingest functions on a post update or when a post gets created. Alright. So the last piece of our puzzle. Right? We've added hooks. We will create our ingest functions, and we'll take a look at that. We'll share this amazing slide deck at the end of this as well. But we've got a full guide if you've already got a direct us project and you just wanna walk through integrating this. We've got a full guide that we've written on our documentation for you to check out as well. Alright. So, Dan, I'm gonna bring you back. This was your diagram. You know, maybe maybe kinda touch on how this works a bit. Yeah. Yeah. So I think, Brent really laid it out really well here in that. He just showed where with the extension and direct us hooks, that's where we're we're hooking to these actions and just broadcasting basically events over to ingest. So So those events, are basically declaring what happened within the direct to simple, system. And then our ingest functions, will declare when they run. Right? We we saw that before when I walked through some code. You declare when something happens, this is when I wanna run. So instead of directly invoking jobs, this allows you to decouple and also build things independently and do things like fan out, as Brian alluded to. So the ingest system basically understands what your function, your workflows, want to do when, and it basically routes them through back to to, to your server. So at the highest level, that's how we're stitching together the hooks with events ingest and then back to the back to the system to to call your code. Beautiful. Beautiful. I I love the love the diagram, and it like, this was a a big reason why we shifted to this format to make it feel a little more interactive, a little more fun. So all that said. Right? Now to the main event of, like, hey. We we've got this set up. These two are talking together. How are we going to build this advanced workflow? What are we gonna build? So we're gonna show you guys content translations that are automatic and, of course, durable, and I will say totally unbreakable because I know I've written a lot of the code, but but very, very durable, and, like, an incredible workflow for, anybody on your content team that needs to handle translations. So if you've worked with content translations and other systems, this GIF probably holds true. It is so much fun, probably involving, like, a lot of spreadsheets, a lot of working back and forth with either different contractors or different team members. Directus makes that whole process a lot easier. We've got a beautiful interface for it, and, I think Matt on our team is gonna kill me for saying beautiful intuitive interface. But let's dive into why this why this sucks. Right? We're gonna do this manually as kind of the first step that usually involves those spreadsheets, then we can graduate to APIs. The one we're using today is DeepL, but, you know, Chat GPT and some of the other LLMs have gotten incredibly good at translations. Kind of a toss-up there between speed, DeepL, I've found is is really quick, and, it seems to be highly accurate. But as you get into that, especially when you're translating a lot of content, you run into, hey. This is gonna take a while. And as Dan alluded to, I've already ran into this, like, 35 times through the, building of this thing is, oops, we hit the rate limit of the APIs or one of those API calls fails. What happens? Right? Then we lose not only our data, so, you know, we just hope and pray that there's some logs that we can go back and get some of that data, from their side or, you know, just evaporates. So, this is what we seek to fix inside this workflow. And before I show you Directus and Ingest, I want to take a look at our our data model at a high level. Now, this is a simplified example, and I guarantee you when you go to production, you've got a lot of content that you're gonna translate that it probably has a more complex structure than this. But, I do wanna give you this, like, again, we're gonna give you the whole code base. You could take this and run with it, and so this starting point will make it easy for you to adapt to those, those more complex models. At the high level, we've got a post. The post has a title, a slug, and some content. Now, there's also a languages collection inside the Directus instance. Whenever you use our translations interface, we will create that for you if you don't have it already. The format is pretty standard. We have a code like in US, the name English direction, and then one of the additions I've added here is just a DeepL translation code because their API, doesn't necessarily follow the standard ISO codes. And then for each, post that we're gonna translate we have a relationship to a collection or a SQL table called post translations. So within that we have a pointer back to our language and then we have our title, slug, and content. So at a high level that's the data model we're working with. Let's kind of pull this up and and see what this actually looks like. Alright. Dan's gonna laugh at me here. I'm sure to mess up Arc here. Still haven't mastered the side by side in Arc. So over here on the left, I've got our, ingest dev server up and running. If I go in here, you could see I I don't have any runs yet, But over here on the right is our special themed version of Directus just for the ingest webinar. I really dig the the black and green vibes that you guys have on the website, Dan, by the way. Same. Okay. Nice. Alright. So, if we take a look at this, right, this is a beautiful gosh. I gotta stop saying beautiful, Matt. Intuitive interface for all of our content editors. We could see the default language up here at the top. So we're writing this in English. We're gonna pass that to the DeepL API. When we send this translation over, you know, I could set this and and write in whatever default language that I want, and we'll handle the translations. Directus gives you this beautiful side by side view for your translations, and we can see that all of these different languages in our system are fully translated. So, the flow that we're gonna set up, and I'll I'll walk through this in a minute, is we're gonna take any post anytime a a change happens or we create a new post, we're gonna fetch all the languages that we have that we wanna translate content for, and we're gonna go and actually translate that. Beautiful. Alright. So let's take a look at this invincible translation workflow. And usually I do this kinda in code, at the top of my document. I break this down. Since we did this format, I wanted to make it a little more visual, and I'll still walk you through the actual code for this. So at the start of this, what will happen and what I left off of this diagram is the user event. Right? User creates a new post, and then that kicks off the process here. So we're gonna send the post ID, we call it a key, to this workflow. And the first step is gonna be normalizing these event keys because the Directus, event emitter, like if we do post dot update we get an array of keys versus like post dot create we just get a string for the key. So we're gonna normalize those keys. We'll check and see if relevant fields have changed. So if nothing has changed on an update, doesn't make any sense to actually build these translations again. If something has changed, we're going to retrieve all the translations for that existing post, and that's an important piece. Again, you know, we don't wanna translate content if we don't have to, if it hasn't changed. This is gonna be using, Directus item service, so we'll dive into the code in just a moment. We have, we'll get all of our available languages via the direct Us item service again. We'll use both of those to build a list of translation items. So what do we actually need to translate? We'll fire those off using, the ingest step functions to the DeepL API and we're gonna do that in parallel and we'll also loop over those so we get that nice tracking and observability. We'll log any errors and then when we get that back we're going to upsert into the database and potentially notify the user. So that's the flow at a high level. Let's take a look at the actual code. And, Dan, if I gloss over something or I miss something on the ingest side, definitely call me out for it. Will do. Perfect. Alright. So, again, you can see here, this is, these are the two hooks that we're using to actually manage this workflow. So whenever an item gets updated, we send ingest, hey. We updated this, or we say, hey. We created this. And then our function looks like this. And, this is not the shortest function I've ever written, but definitely not the longest. I'll say that. So, again, an outline of the translate workflow, what I found is super helpful for me is just to quickly outline the logic at the top of each one of these. And if we go through, we could see that we're importing that ingest client. We've got the DeepL API, just their node client that we're gonna use, and we've got some types that we're pulling in to, make the TypeScript compiler TypeScript gods happy. So, we've got some translatable fields here. Again, it just defining some constants. These are the only fields that we wanna translate. You could easily set this up to be dynamic, and just defining some of the the DeepL params that we're gonna use in our API call. So when we get into the meat of the ingest function here, you can see we've got an ID. We've got our name just describing what this is. And then, you can trigger an ingest function, on any number of events. So the standard syntax here is an object with an event property, but if you want to have multiple triggers for this function, you could just pass an array of objects. Great. So onto our handler function, we're getting the event and the step, which is standard syntax and Jest is giving us that. And then we're also pulling out this direct us context that we added through that middleware. So through that, we're gonna get our services, we're gonna get our schema, our EMV variables, which is gonna give us the DeepL API key. So the first step in this function is normalizing those keys. You could see all of that code here. What's notably missing is the step functions that we saw earlier. And originally, I had these wrapped, but, a a nice little tip that Dan gave me is if this, the code that you're running doesn't actually mutate external state or depend on external state, you don't necessarily have to wrap it in a in one of the ingest step functions. Next up, we'll get our payload from the event. We'll check that to see if we have any translatable content included. And if we don't have any translated translatable content, then we can just return early. Right? Next, we'll get our translator. So this is the DeepL client. We'll get our schema from Directus, and we're gonna init these item services. So this is how we talk to the database, on the Directus side of it. There's just a little helper function down at the bottom of this file that that makes that a little less verbose, and then we run into our first step function. So here, we're going to get the current post, and this is just a, a service call by the post service. So we're gonna read by the query. We're gonna look for the post with the ID that we were passed in the event and return not just the root level fields, but also the translations that are attached to that post. So if we look at that in, like, the Directus UI to give you an example, we're not only gonna fetch this information, we're gonna fetch all the individual translations. And that is one of my favorite features of Directus is, being able to fetch the data that I need in a single API call. So I could go deeper into this if I wanted to, you know, three, four, five different levels. Eventually, you'll reach a max where you you don't wanna go, but, depending on the data that you've got, using these asterisks as a wildcard is incredibly helpful for local development. Now, going further, again, we're just going to have a step function that, will cancel this whole thing. If we can't get the languages from Directus, we don't know what to translate. And this was a a last minute addition. And, Dan, could you talk about, like, the retry logic just a little bit? Yeah. As Ingest handles, errors automatically and does retries, some errors you might anticipate and say, this is a non retryable setup. So if I'm missing the API key, might as well not retry it because it's just not gonna work. So in that sense, you, ingest allows, includes a custom function, basically, which allows ingest to say, you know what? Let's stop here and not retry anything else. So that's what Brian is using here where this is a nonrecoverable error kinda situation. So but, typically, you can throw errors to customers and whatnot, and, those will all be retried automatically. You can even catch them and handle them however you want as well. Beautiful. Beautiful. Thank you, Diane. Alright. Let me try to find where we were at. Okay. So we've got our languages, then we move on to the next step, which is actually building our translation list. Excuse me, guys. Dealing with six not six kids, but sick kids here at the house. Always a struggle. So here in this step function, we're basically just building up a list of the translations that we want. Right? So we're looping through all of those, posts that we've got, making sure, you know, we've got the fields that we wanna translate, and then we're basically building an array for those things that we'll we'll pass to the next step as we scroll down, which will be actually translating all these items in parallel. So, here, we're using promise dot all to fire these all at once. And, again, I think hey. Dan, you mentioned this, like, it like, defining a unique ID for these was not not strictly necessary, but, maybe talk about that for a minute if you don't mind. Yeah. Yeah. When you're executing in a loop, it's or in something like here where you're paralyzing with promise dot all. You don't need to. Ingest basically takes this and under like, understands, like, internally. You could check out though the SDK as well if you're curious about how it works. Basically, it takes the step ID and and and appends some sort of, iterator and creates a hash. So, automatically, it, make sure that if two steps have the same ID, it is it will, like, not they won't overwrite each other conflict. But for the sake of debugging, which I think is a great idea of what Brian's done here is you can dynamically set these keys, in your in your loop, which makes it easier for what Brian will show in just a minute, in the UI. Yeah. Great. Alright. So we go through we send all these to DeepL. We get all of those things back, and then, the final two steps here, we're basically, again, using promises to do all of these upserts. So, when you're using these services in, like, inside the actual directest, like, API endpoints or hooks, there is this upsert, which, if anybody on the Directus team is listening, would love to have the upsert on the SDK. Just put in a nod for that one. But, this makes it super easy to, upstart content. Super simple. Like, hey. If this ID doesn't exist, we're gonna create the translation here. And then last but not least, we're we want to notify the user, hey. Your translations are done. So you can go check them out, because this is running in the background. Awesome. So that's the flow. Let's take a look at the UI. How does this work? Let's, what do we what are we gonna translate? Dan, do you have any thoughts? I don't know. You hit someone good yesterday. It was just, like, hello from Hello from Dan and Bryant. Alright. So we're going to throw this in. What do we have? This is an amazing webinar. This is not me. This is somebody in the chat. We're not saying we're amazing. But alright. Here we go. So now what will happen as soon as I save this, over on the left, we should see the event being fired to ingest, unless I have done something totally wrong. And and just to prove that I'm not pulling any switcheroo on you guys, we don't see any translations there. So immediately after I've created that new post, now we could see, the ingest dev server and I get all this observability, all these steps that ran within the actual flow. So we get all of those steps broken down and you can see here we've got the individual steps within that loop that we use those specific IDs for. And as I go through here, you could see the output for each one of these. So, I don't speak Russian. Not sure if you do, Dan, or not. No. So there we go. So we could see here's the actual content that's that's getting translated. There's the slug. There's the title, etcetera, in all the different languages that we have set up. And this it's like seeing this together was, like, when it really hooked for me of, like, okay. Great. All this is running in the background. I get all the observability. So, like, when something inevitably screws up, which for me is often, if you catch any of the hundred apps hundred hours episodes. But and, like, having all this at your fingertips is incredibly powerful as a developer. And being a a developer that has all this and is able to build a flow like this that will do all the translations for your team automatically, turns you into a hero, %. So that is the flow. Dan, any anything to add before we kinda jump into queue? Yeah. And what's really nice is I think just, you set it up earlier with Docker Compose, but with everything running on your your machine, you can work and iterate quickly on this flow and not have to worry about, like, conflicting with, you know, bumping into shared resources. Like, if you're using something like SQS or something like that on Amazon, you need to provision those things. It becomes a little bit of a nightmare. And what also is nice here is, like, you know, Brian's code works perfectly as we see. It's all green. But if there was an error, you'll be able to see that span go red. And what's nice about the dev server flow is that it saves the input of your function. So if Brian were to go back to his code base, fix that bug, save it, the dev server would basically you know, the his direct to server would would refresh, reload, and you could click rerun in this dev server, and it would just rerun the function again. So if he, you know, if you hit rerun live, it should just it should just work. And so this gives you, like, this fast feedback loop. So you're, like, in this kind of hot reload situation where I'm working on my I'm tweaking I'm tweaking. So instead of you having to, like, go to the right, manually click a bunch of buttons in the in the Directus UI, you can have a fast feedback loop. So, like, do it once in there, keep going, you know, tweak the output, look at things, tweak maybe prompts or different things that you might be using, to to create this. So at least, like, this allows you to kind of, hopefully move a lot faster when you're building these these things that can be complex. But, yeah, I can't stress that enough. Like, the the speed at which you could iterate with the dev server and, direct us being able to, you know, go in and quickly model a feature and idea, and then also being able to, like, prepare that for scale using Ingest is, again, a great pair. Just, works really well together. Alright, guys. So if we move to our amazing slide deck, it is now time for Q and AO. If you guys have any questions in the chat that we wanna take a look at, Dan, do you spot any that we need to I did see a couple questions I could talk to. The first thing I could do was a couple questions on self hosting for Jess side because we we know we can, self host direct us. Right? And I'm sure a lot of people do that as well. Yeah. Self hosting is incredibly popular for Directus. So I'm sure I'm sure there's a lot of people in the audience that are very curious about self hosting ingest as well. And you can self host ingest. The code that, Brian showed that runs in the DevServer, that binary is the exact same binary that you could self host. You can also you can run it in a very lightweight version or you can offload. Ingest has queuing and state history involved like that it that is backed by. So when it's running locally, it just runs in memory because it's low volume and it's very simple. But when you self host it and you deploy it into your own cloud or wherever you want, you can hook it up to an existing, instance of Postgres or you can also, plug it out and and connect it to a dedicated Redis, maybe running in another container or something like that. It can handle a little bit more scale and handle, like, you know, restarts of your ingest system. So, you know, you can self host. There are certain things that aren't in self host yet, like, some observable observability and metrics. A lot of those systems were built in ingest cloud. We're gonna be, you know, kind of bringing some of those things down to, to open source as well. But now, you know, all the key features, all the throttling, flow control, defining functions, are all are all there. So you can run that wherever you want and and and self host. Amazing. Got it. So I on the directed side of it, of course, like, you could self host, we've got a BSL license, which basically, a free to run for anybody under under $5,000,000 in total finances or revenue. So So if you have questions on the license, definitely reach out to our team about that. You can do that through the website. What other questions do we have? I think there was one that I saw that, was super helpful. I like, comparing I like, when I first came into Ingest, I'd heard of Temporal, worked with it a bit. I can't find this one in the chat now, but, like, how do you guys stack up against temporal, Dan? Yeah. Yeah. We that's a I think it's a great question. You know, especially with the term durable execution, temporal's, you know, in its essence, describes itself as a durable execution engine. So it is dead focused on a, what we believe is just like the durable execution of the function. Actually, the logic. Right? Is when something fails, it does checkpoints and it and it retries. So in that sense, there there is similarities. Right? But we consider that durable execution is just like a means to an end. Right? It is it is a feature. It is not the whole platform. So what ingest really, layers on is a couple things. We have an event based approach as Brian showed with the hooks, so you can fan out and you can replay and do more things, have a little bit more flexibility with events. And if you're someone who likes events, like, I'm sure that that resonates. And if you haven't, give it a try. And all the flow control and advanced queuing is one of the things that is unique to ingest and, is is in this self self hosted open source version as well is all this reliable flow control. So when people are building these systems, often you don't just wanna execute a job and run it to completion. You need to manage how fast it's processing, this job, you know, how many times per minute you might run something. Maybe I wanna delay, I wanna debounce something, I wanna rate limit this job, run things in batch processing. Maybe just dynamically say, when there's 20 posts that have been published, let's execute this batch instead of saying, you know, let's just push these 20 items in one big blob. So there's a a a lot of differences in that sense of, like, what we've built around. And I think one of the things that also is true with the ingest SDK is it doesn't mess with the runtime. If you have used temporal, you in the TypeScript or JavaScript, you know, SDK, temporal does something where they kind of, like, wrap your logic and certain things like random doesn't work. So there's some gotchas that's like, I don't know what's going on with this runtime or it's going to wrap some of your things and you might not it might just not be native code, but we've fundamentally chosen to build ingest, say, anything that you're using works. It's very easy to look in the source and see where things are running. It's very it aims to be a very thin layer, so there's no weird kind of, like, gotchas and things that I need to know. And, generally, like, you know, ingest is very dynamic and defining steps and everything is very fluid. So there aren't, there's a lot of friction to, like, the rigidity that you might find with other solutions. So I those are just a few things. There's many more, that you could check out, like, on our site and whatnot, but or ping me afterwards if you if you if you're curious. Yeah. And I'm not sure that you could say this, but, like, the the syntax that that I found, like, writing the same sort of thing in in jest is dramatically, easier, and it just jives with the way that my brain works versus, like, some of the verbosity and, like, just how temporal structures things. So, do we have any other questions? I I guess that's gonna sting a bit if we have temporal onto one of the partner webinars as well. But I, what is called? I'll mention one someone asked a question about, retries. What happens when something hits hits max retries? The function will be declared as failed. And what, ingest also allows you to do is basically say, you know, if there's a complete outage, say, DeepL's down for twenty four hours or or an hour, all your functions are failing and all the retries are exhausted, you can use ingest to say, select, you know, between these two time stamps, anything that failed, replay them so you can do bulk retrace, retries, we call replays of, of those functions. So you can recover from systems because we persist all the inputs, you know, if that's helpful. Definitely. And then, I think probably the last question before we wrap up is, can you run webhooks in the ingest server, the dev server? You, you can. You'll have to configure your webhooks in in in in I guess it depends on, like, how you're how you're running things. So, Ingest Cloud, it has webhooks and and transforms. And locally, you just need to write a little logic to just, like, wrap that transform to simulate what, what is happening. We'll be bringing in in the future a synchronization, some thing with Cloud to make that a little bit more easy and bring them into the DevServer. But fundamentally, the webhooks are just the same API endpoint that ingest dot send is using, that Brian showed in the in those, in those direct hooks. So it's just sending JSON payloads, and that's what, what webhooks primarily are. So, it's, it's pretty easy to to utilize it and build around. There's also a few different docs on our website about that if you're curious about webhooks. Yeah. Awesome. I don't I don't see any other questions from the team. We're a little bit over, Dan, but I I I man, I appreciate you coming on. Like, before we kinda get into the awkward outro, I do want to just reiterate, like, for anybody still here, if you've registered for this, we'll hit you with an email with all the links to the repo, this amazing slide deck that we put together. So don't worry about that. That'll be coming in the next, couple hours, so just be patient. But wrapping this thing up, we we hope this was helpful for you guys. Please send us your feedback. We really enjoy doing these webinars, showcasing other tools and, things that that help you build faster. And, Dan, thank you for joining. Really appreciate the, you know, the collaborative effort on this thing. Learned a ton about it. And, for everybody else who like, next steps for you guys, what what does that look like? You know, you wanna sign off a little bit? Yeah. Yeah. Thanks for thanks for having me, and, thanks for doing all the leg work. You wrote all the code. You built all of it. So it's, it was pretty awesome to see this, and I think it's a great example. I think if you really wanna figure it out and you want some more, there'll be the follow-up email. Brian also has a really great tutorial that he put together, which has a lot of detail. It's pretty incredible, but definitely check that out. I'm sure that'll be in the follow-up email. And, then also, I'd say, like, if you just wanna tinker a little bit, go with ingest, go check out, one of the quick starts on our docs and just go tinker a little bit with the dev server, build some different things with some dummy code, and then kind of, that's easy way to just kinda get started, and then you can kind of dive in for the in-depth stuff with Directus using, using Brian's tutorial that he wrote. So thanks for everybody joining. And if you ever have any questions, we, we do have a Discord community. You can find the link on our site, or you can always reach out to us, contact us anytime with ingest dot com. That's 2, by the way. Just always remember that one. Two n's. Yeah. Perfect. Alright, Dan. Thank you. Thanks for the audience. That's a wrap. We'll see you all. Thank you.","019ddedf-e66c-4156-89ff-380d9f7e15a4",[207,208],"42e32b28-830e-4966-b849-176902ae12f7","c812e8dd-f9fe-4ee6-bef4-19be62f3427d",[],{"id":133,"number":134,"show":122,"year":135,"episodes":211},[137,138,139,140],{"reps":213},[214,270],{"name":215,"sdr":8,"link":216,"countries":217,"states":219},"John Daniels","https://meet.directus.io/meetings/john2144/john-contact-form-meeting",[218],"United States",[220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269],"Michigan","Indiana","Ohio","West Virginia","Kentucky","Virginia","Tennessee","North Carolina","South Carolina","Georgia","Florida","Alabama","Mississippi","New York","MI","IN","OH","WV","KY","VA","TN","NC","SC","GA","FL","AL","MS","NY","Connecticut","CT","Delaware","DE","Maine","ME","Maryland","MD","Massachusetts","MA","New Hampshire","NH","New Jersey","NJ","Pennsylvania","PA","Rhode Island","RI","Vermont","VT","Washington DC","DC",{"name":271,"link":272,"countries":273},"Michelle Riber","https://meetings.hubspot.com/mriber",[274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,251,462,463],"Albania","ALB","Algeria","DZA","Andorra","AND","Angola","AGO","Austria","AUT","Belgium","BEL","Benin","BEN","Bosnia and Herzegovina","BIH","Botswana","BWA","Bulgaria","BGR","Burkina Faso","BFA","Burundi","BDI","Cameroon","CMR","Cape Verde","CPV","Central African Republic","CAF","Chad","TCD","Comoros","COM","Côte d'Ivoire","CIV","Croatia","HRV","Czech Republic","CZE","Democratic Republic of Congo","COD","Denmark","DNK","Djibouti","DJI","Egypt","EGY","Equatorial Guinea","GNQ","Eritrea","ERI","Estonia","EST","Eswatini","SWZ","Ethiopia","ETH","Finland","FIN","France","FRA","Gabon","GAB","Gambia","GMB","Ghana","GHA","Greece","GRC","Guinea","GIN","Guinea-Bissau","GNB","Hungary","HUN","Iceland","ISL","Ireland","IRL","Italy","ITA","Kenya","KEN","Latvia","LVA","Lesotho","LSO","Liberia","LBR","Libya","LBY","Liechtenstein","LIE","Lithuania","LTU","Luxembourg","LUX","Madagascar","MDG","Malawi","MWI","Mali","MLI","Malta","MLT","Mauritania","MRT","Mauritius","MUS","Moldova","MDA","Monaco","MCO","Montenegro","MNE","Morocco","MAR","Mozambique","MOZ","Namibia","NAM","Niger","NER","Nigeria","NGA","North Macedonia","MKD","Norway","NOR","Poland","POL","Portugal","PRT","Republic of Congo","COG","Romania","ROU","Rwanda","RWA","San Marino","SMR","São Tomé and Príncipe","STP","Senegal","SEN","Serbia","SRB","Seychelles","SYC","Sierra Leone","SLE","Slovakia","SVK","Slovenia","SVN","Somalia","SOM","South Africa","ZAF","South Sudan","SSD","Spain","ESP","Sudan","SDN","Sweden","SWE","Tanzania","TZA","Togo","TGO","Tunisia","TUN","Uganda","UGA","United Kingdom","GBR","Vatican City","VAT","Zambia","ZMB","Zimbabwe","ZWE","UK","Germany","Netherlands","Switzerland","CH","NL",1773850416346]