[{"data":1,"prerenderedAt":449},["ShallowReactive",2],{"footer-primary":3,"footer-secondary":93,"footer-description":119,"beyond-the-core-directus-copilot":121,"beyond-the-core-directus-copilot-next":173,"sales-reps":197},{"items":4},[5,29,49,69],{"id":6,"title":7,"url":8,"page":8,"children":9},"522e608a-77b0-4333-820d-d4f44be2ade1","Solutions",null,[10,15,20,25],{"id":11,"title":12,"url":8,"page":13},"fcafe85a-a798-4710-9e7a-776fe413aae5","Headless CMS",{"permalink":14},"/solutions/headless-cms",{"id":16,"title":17,"url":8,"page":18},"79972923-93cf-4777-9e32-5c9b0315fc10","Backend-as-a-Service",{"permalink":19},"/solutions/backend-as-a-service",{"id":21,"title":22,"url":8,"page":23},"0fa8d0c1-7b64-4f6f-939d-d7fdb99fc407","Product Information",{"permalink":24},"/solutions/product-information-management",{"id":26,"title":27,"url":28,"page":8},"63946d54-6052-4780-8ff4-91f5a9931dcc","100+ Things to Build","https://directus.io/blog/100-tools-apps-and-platforms-you-can-build-with-directus",{"id":30,"title":31,"url":8,"page":8,"children":32},"8ab4f9b1-f3e2-44d6-919b-011d91fe072f","Resources",[33,37,41,45],{"id":34,"title":35,"url":36,"page":8},"f951fb84-8777-4b84-9e91-996fe9d25483","Documentation","https://docs.directus.io",{"id":38,"title":39,"url":40,"page":8},"366febc7-a538-4c08-a326-e6204957f1e3","Guides","https://docs.directus.io/guides/",{"id":42,"title":43,"url":44,"page":8},"aeb9128e-1c5f-417f-863c-2449416433cd","Community","https://directus.chat",{"id":46,"title":47,"url":48,"page":8},"da1c2ed8-0a77-49b0-a903-49c56cb07de5","Release Notes","https://github.com/directus/directus/releases",{"id":50,"title":51,"url":8,"page":8,"children":52},"d61fae8c-7502-494a-822f-19ecff3d0256","Support",[53,57,61,65],{"id":54,"title":55,"url":56,"page":8},"8c43c781-7ebd-475f-a931-747e293c0a88","Issue Tracker","https://github.com/directus/directus/issues",{"id":58,"title":59,"url":60,"page":8},"d77bb78e-cf7b-4e01-932a-514414ba49d3","Feature Requests","https://github.com/directus/directus/discussions?discussions_q=is:open+sort:top",{"id":62,"title":63,"url":64,"page":8},"4346be2b-2c53-476e-b53b-becacec626a6","Community Chat","https://discord.com/channels/725371605378924594/741317677397704757",{"id":66,"title":67,"url":68,"page":8},"26c115d2-49f7-4edc-935e-d37d427fb89d","Cloud Dashboard","https://directus.cloud",{"id":70,"title":71,"url":8,"page":8,"children":72},"49141403-4f20-44ac-8453-25ace1265812","Organization",[73,78,84,88],{"id":74,"title":75,"url":76,"page":77},"1f36ea92-8a5e-47c8-914c-9822a8b9538a","About","/about",{"permalink":76},{"id":79,"title":80,"url":81,"page":82},"b84bf525-5471-4b14-a93c-225f6c386005","Careers","#",{"permalink":83},"/careers",{"id":85,"title":86,"url":87,"page":8},"86aabc3a-433d-434b-9efa-ad1d34be0a34","Brand Assets","https://drive.google.com/drive/folders/1lBOTba4RaA5ikqOn8Ewo4RYzD0XcymG9?usp=sharing",{"id":89,"title":90,"url":8,"page":91},"8d2fa1e3-198e-4405-81e1-2ceb858bc237","Contact",{"permalink":92},"/contact",{"items":94},[95,101,107,113],{"id":96,"title":97,"url":8,"page":98,"children":100},"8a1b7bfa-429d-4ffc-a650-2a5fdcf356da","Cloud Policies",{"permalink":99},"/cloud-policies",[],{"id":102,"title":103,"url":81,"page":104,"children":106},"bea848ef-828f-4306-8017-6b00ec5d4a0c","License",{"permalink":105},"/bsl",[],{"id":108,"title":109,"url":81,"page":110,"children":112},"4e914f47-4bee-42b7-b445-3119ee4196ef","Terms",{"permalink":111},"/terms",[],{"id":114,"title":115,"url":81,"page":116,"children":118},"ea69eda6-d317-4981-8421-fcabb1826bfd","Privacy",{"permalink":117},"/privacy",[],{"description":120},"\u003Cp>A composable backend to build your Headless CMS, BaaS, and more.&nbsp;\u003C/p>",{"id":122,"slug":123,"vimeo_id":124,"description":125,"tile":126,"length":127,"resources":128,"people":132,"episode_number":139,"published":140,"title":141,"video_transcript_html":142,"video_transcript_text":143,"content":8,"status":144,"episode_people":145,"recommendations":163,"season":164,"seo":8},"e22f53b4-e915-4b89-8057-6cccdcc58d5a","directus-copilot","906137170","Esther speaks to community member Donald about Directus Copilot - a panel extension allows users to ask contextual questions about their data within Insights dashboards.\n\n","cba039ec-2fda-47d6-950b-183a17bd013d",28,[129],{"name":130,"url":131},"Directus Copilot on GitHub","https://github.com/programmarchy/directus-extension-copilot",[133,136],{"name":134,"url":135},"Esther Agbaje","https://twitter.com/_estheradebayo",{"name":137,"url":138},"Donald Ness","https://programmarchy.com/",1,"2024-02-06","Directus Copilot","\u003Cp>Speaker 0: Getting a back end that is customer friendly out of the box is huge.\u003C/p>\u003Cp>Speaker 1: Hi, everyone, and welcome to another episode of the beyond the core show. The beyond the call show is really a director show that shines a spotlight on extension developers in the community. And today, I have a special guest with me. I have Donald. He is the winner of the past directors AI hackathon.\u003C/p>\u003Cp>And, yeah, he's just gonna be sharing about his journey on implementing the Director's Copilot extension. My name is Esther, and I work as a developer advocate at Director. So thanks for joining me, Donald. Would you like to introduce yourself?\u003C/p>\u003Cp>Speaker 0: Thanks for having me, Esther. Yeah. So I'm a software developer. I've been doing consulting for about 8 years now. And, you know, one of my niches is helping people move from WordPress to scale their business when they need custom software.\u003C/p>\u003Cp>And, you know, I found Directus is is a crucial part of that now. I think it's a I think it's one of the best headless CMSs out there. It's it's,\u003C/p>\u003Cp>Speaker 1: That's good to hear.\u003C/p>\u003Cp>Speaker 0: Yeah. So we can talk more about that later, but, yeah, I I happened to enter the the AI hackathon. I think it was maybe the first one that you guys Yes.\u003C/p>\u003Cp>Speaker 1: It was the first one.\u003C/p>\u003Cp>Speaker 0: Yeah. And so I I don't know. I just like, I'll take a shot at it, and, it was a lot of fun. Give me a good excuse to try out some of these new AI technologies. And, Yeah.\u003C/p>\u003Cp>Yeah.\u003C/p>\u003Cp>Speaker 1: Yeah. Nice. We'll be we'll be diving into that very soon. And I'm just curious to know, how did you get to know about directors in the first place?\u003C/p>\u003Cp>Speaker 0: So I had heard about headless CMSs, and I had a client project that needed 1. And I just I evaluated all of them out there pretty much. Mhmm. You know, some of the other popular ones are, like, Strapi and Contentful. Yeah.\u003C/p>\u003Cp>Direct has checked all the boxes for me. So, you know, open source. It sits on a Postgres database, which is really nice. I mean, you can connect it to any database. But, and then I looked at the code, and it was all TypeScript view, like, very well written, had a great community.\u003C/p>\u003Cp>Yeah. So I think I did, like, a pull request, and then everyone was super nice to me. Because when I was evaluating, I found, like, a bug. And so I just found that the whole community around it was really good too. So, yeah, that's how I found out about Directus.\u003C/p>\u003Cp>I ended up using it for for a client project after evaluating a bunch of other headless CMSs.\u003C/p>\u003Cp>Speaker 1: You you worked on the Director's Copilot extension, and just looking at it, it looked really amazing. I really loved it. So would you like to tell us more about this extension and, yeah, what does it do exactly?\u003C/p>\u003Cp>Speaker 0: Right. So it it basically connects Directus to OpenAI chat, and Mhmm. It allows you to use kind of like a chat interface to ask questions about your database. And so, you know, the idea behind it was someone that's nontechnical. It would be nice if they could formulate requests to the database just using a chat type of window.\u003C/p>\u003Cp>So, yeah, that was kind of the idea behind it, in terms of the user perspective. You know, I was thinking of really, when I was approaching it is, what can I get done in a short amount of time to be able to enter the hackathon? And, Directus had some very handy features for, connecting all those things together. So that was another aspect of it.\u003C/p>\u003Cp>Speaker 1: Nice. About how long did it take you to, you know, complete the extension? Did it take long?\u003C/p>\u003Cp>Speaker 0: No. It it came together pretty quickly when I discovered the pieces that I needed to put together. I think it was about, like, a day and a half.\u003C/p>\u003Cp>Speaker 1: Oh, that's fast. Yeah.\u003C/p>\u003Cp>Speaker 0: Yeah. So, like, a little a little over a day.\u003C/p>\u003Cp>Speaker 1: Okay. Okay. Nice. So would you like to just walk us through maybe some parts of the code and then a quick demo of the extension? I guess you could share your screen.\u003C/p>\u003Cp>Speaker 0: Right? Okay. So the main the main feature that I used in Directus was the specification service. So there's a there's an API endpoint in Directus that gives you back an open API schema back that basically describes all the endpoints that you can call with API and, you know, all the parameters that those endpoints take. Mhmm.\u003C/p>\u003Cp>And and that, OpenAI schema happens to fit perfectly into the, OpenAI. There's this feature of OpenAI called functions. And so when you submit an OpenAI request, you can tell it, hey. Here's a bunch of functions you could call, and here's the schema for it. And Nice.\u003C/p>\u003Cp>So then you send that along with your prompt and say, hey. You know, how many orders did I have today? And then that funk it looks in all those functions and picks the right one to call. It tells you that back, so you get so it's kind of 2 requests to OpenAI. So, you know, the first one the first endpoint so this is the endpoint for the, the chat interface.\u003C/p>\u003Cp>So first, it just hits Okay. Oops. It hits ask, and it basically tells ask OpenAI what endpoint should I call. Open a OpenAI tells tells, it responds with the API that things should be called with the parameters that will meet the the needs of the prompt. And then there's another call that actually makes that API call and sends the result back to OpenAI, and then it sends kind of like, once it has the result, it part then OpenAI parses that result and gives you\u003C/p>\u003Cp>Speaker 1: I see.\u003C/p>\u003Cp>Speaker 0: A language version of what the result it describes the result for you, essentially. Nice. So this is kinda as basic as you can get with, like, a, you know, an OpenAI, like, agent type. It's not really an agent. It could do a lot more fancy things.\u003C/p>\u003Cp>Like, you know, you could like, right now, it basically has to answer your question in one API call. It can't, like Okay. Call multiple APIs yet and and kinda then, like, combine those together somehow. But it could if you know, potentially, it could do something like that. But this is just kind of a\u003C/p>\u003Cp>Speaker 1: a program. One call per time?\u003C/p>\u003Cp>Speaker 0: Right. It's like a one shot one shot. So OpenAI has to be able to get it in one shot.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: And, yeah, I guess there's just, you know, the the UI code. Mhmm. But, yeah, there's, like, a service that interacts with OpenAI here. And so you can see, like, you know, what the prompt is. So there's a, like, a base prompt.\u003C/p>\u003Cp>Trying to remember what my base base prompt was here. Oh, okay. Yeah. It's it's something like this.\u003C/p>\u003Cp>Speaker 1: Okay. That's the base prompt just before you type anything extra.\u003C/p>\u003Cp>Speaker 0: Exactly. So this this gets put at the top of the prompt to OpenAI.\u003C/p>\u003Cp>Speaker 1: Nice.\u003C/p>\u003Cp>Speaker 0: And and, yeah, that that's that's pretty much all there is to it. I I use something called langchain.\u003C/p>\u003Cp>Speaker 1: Yeah. I'm interested. What's langchain used for?\u003C/p>\u003Cp>Speaker 0: So langchain helps you build, like, a chain of prompts that, you know, it can get pretty fancy to where you can have, like, an agent style thing where you can have multiple branches of, you know, prompt and and answers and kinda go back and forth and have, like, a you know, it's like a a graph of of what the agent could do. You know, this is a very simple chain where it's just, you know, an API call with a prompt and then, you know, like I explained before. But lane chain helps you build out those build out those workflows for the AI. And the nice thing about it is you can combine a bunch of different LLMs. So, like, you could do some you could choose, like, chat g g GPT 3.5 for some steps and GPT 4 for, like, harder steps where it needs to be smarter, I guess you could say.\u003C/p>\u003Cp>Okay.\u003C/p>\u003Cp>Speaker 1: So yeah. Yeah. Yeah. So, I have an interesting question. Would we still have, like, implemented this copilot without necessarily using launching, or is he an optional, like, tool?\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. I think I think you could've. Yes.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: I was I was just using it because it\u003C/p>\u003Cp>Speaker 1: It optimizes the experience and the performance more like. Right?\u003C/p>\u003Cp>Speaker 0: Yeah. And it was just a little easier than than trying to trying to build it build it all myself. Yeah.\u003C/p>\u003Cp>Speaker 1: Yeah. Nice. Alright. Cool. Like, I guess we can go into, like, directors to see how the panel works.\u003C/p>\u003Cp>Speaker 0: Yeah. This is the insights the insights tab, and\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: I've set up a little little dashboard here and added my component. So that's what this extension is. It's just a, yeah, it's just a an insights panel.\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: So insights panel here, and it has some settings. So here here's my key, which I'll I'll have to revoke after the after the call, but you can select the model that you wanna use. Okay. Put your open add key in. It'll get it from the environment if you don't put it in there.\u003C/p>\u003Cp>And, yeah, then you could just start asking questions. So I've got a pretty simple database here.\u003C/p>\u003Cp>Speaker 1: Data. Okay.\u003C/p>\u003Cp>Speaker 0: Like, a customer's table with with some products, and I've got, like, some orders that have it's kinda like a like, an invoice or something. So I maybe we could try\u003C/p>\u003Cp>Speaker 1: Yeah. Let's see the products. Maybe I could just, like, pray, like, what product is this price, though?\u003C/p>\u003Cp>Speaker 0: Yeah. And if do you wanna do you wanna go shopping and and pick pick some toys?\u003C/p>\u003Cp>Speaker 1: Yeah. Yeah. Sure. Maybe the Rubik's cube and, yeah. We could say how much is the Rubik's cube, something like that.\u003C/p>\u003Cp>Speaker 0: Okay. Yeah. I'll I'll create an order for you too. So Okay. I'll have to I'll have to make you a customer though customer though.\u003C/p>\u003Cp>Speaker 1: Okay. Let's do it.\u003C/p>\u003Cp>Speaker 0: Is that\u003C/p>\u003Cp>Speaker 1: Yeah. That's it.\u003C/p>\u003Cp>Speaker 0: And then save there. Yep. Okay. So make you an order. Let's get Esther.\u003C/p>\u003Cp>And today Yeah. We'll add, oh, yeah. You wanted a Rubik's cube.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: Okay. Group is q. K. Looks like it's oh oh, no. What?\u003C/p>\u003Cp>2? Okay. We'll just do 1.\u003C/p>\u003Cp>Speaker 1: Okay. Let's do 1.\u003C/p>\u003Cp>Speaker 0: And so let's see. I think I have these. You don't have to pay any tax.\u003C/p>\u003Cp>Speaker 1: Nice.\u003C/p>\u003Cp>Speaker 0: And click save. Okay. So now you've got an order here. I just need order. So here's order number 2.\u003C/p>\u003Cp>Okay. Okay. Sorry. I'm just making sure this is correct here.\u003C/p>\u003Cp>Speaker 1: Yeah. That's fine.\u003C/p>\u003Cp>Speaker 0: Something seems a little strange. Sorry.\u003C/p>\u003Cp>Speaker 1: Is it fact that it's just one order item?\u003C/p>\u003Cp>Speaker 0: Yeah. Because I thought I I don't know if maybe I clicked in here. Did I add a new order items? I just thought\u003C/p>\u003Cp>Speaker 1: You added the one for Rubik's cube.\u003C/p>\u003Cp>Speaker 0: Okay. And then I thought center oh, I did I added an existing, I think, is my problem. I added an existing item. So I I I guess I'd cannibalize another.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: I'll I'll I'll have this make make someone else buy this. So that just sketch It's in a sense.\u003C/p>\u003Cp>Speaker 1: Yep. So let's\u003C/p>\u003Cp>Speaker 0: Sorry.\u003C/p>\u003Cp>Speaker 1: So now\u003C/p>\u003Cp>Speaker 0: we have So now we have 2 2 others.\u003C/p>\u003Cp>Speaker 1: Now we have 2 others. Yeah.\u003C/p>\u003Cp>Speaker 0: Okay. So if we go back to the chat window, I think you had asked how much is a Rubik's cube.\u003C/p>\u003Cp>Speaker 1: Rubik's cube view.\u003C/p>\u003Cp>Speaker 0: I don't know if it will be able to do this one.\u003C/p>\u003Cp>Speaker 1: Let's see.\u003C/p>\u003Cp>Speaker 0: Oh, okay.\u003C/p>\u003Cp>Speaker 1: It did it. Yeah.\u003C/p>\u003Cp>Speaker 0: It did it. Okay. So Yeah. Keep keep in mind that price is incense. Okay.\u003C/p>\u003Cp>Here. Well, I'll have to ask it again. So how much is a Rubik's cube? Keep in mind prices are in cents.\u003C/p>\u003Cp>Speaker 1: In cents.\u003C/p>\u003Cp>Speaker 0: Shouldn't say it should say $7 now. Okay. There we\u003C/p>\u003Cp>Speaker 1: go. Dollars. Yeah.\u003C/p>\u003Cp>Speaker 0: 700¢. $7. Okay.\u003C/p>\u003Cp>Speaker 1: Yeah. Makes a lot of sense. Yeah. It's really smart. It's using OpenAI, so it's really smart.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. It it can it can be pretty surprising with what it can do. You know, one thing I I realized when building this is the the open the open API, not to Mhmm. So it's kinda confusing because there's open API and then open AI.\u003C/p>\u003Cp>But the\u003C/p>\u003Cp>Speaker 1: the open API Okay. Schema,\u003C/p>\u003Cp>Speaker 0: is is generated from the stuff we basically, from the way we create fields and things in. So, you know, when you're when you're creating a field, it can be helpful to to, like, give it semantic information because that could help inform the API or the the AI, if if that makes sense. So I tried to, like, have a field description to to say that the price was in cents here. So that might be but that I don't think that passes through to the schema\u003C/p>\u003Cp>Speaker 1: yet. Yeah.\u003C/p>\u003Cp>Speaker 0: But if we could pass that through the schema, it might be able to get that in in one shot in one shot. But\u003C/p>\u003Cp>Speaker 1: Yeah. Possibly. Possibly. Do you wanna do another prompt?\u003C/p>\u003Cp>Speaker 0: Yeah. Let's do that. Okay. What did you have in mind?\u003C/p>\u003Cp>Speaker 1: Nothing.\u003C/p>\u003Cp>Speaker 0: Okay. Could say something like how many orders did\u003C/p>\u003Cp>Speaker 1: I have today? Yeah.\u003C/p>\u003Cp>Speaker 0: Okay. So that didn't get this one. The product does not contain information by orders.\u003C/p>\u003Cp>Speaker 1: Maybe we can modify to how many orders do I have instead of today. Okay. Let's see.\u003C/p>\u003Cp>Speaker 0: I wonder why it's not Should\u003C/p>\u003Cp>Speaker 1: we call it other items? No?\u003C/p>\u003Cp>Speaker 0: Well, I wanted to list how many orders that has here.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: But, you know, this very possible that I'm doing something wrong, and it is a prototype after all.\u003C/p>\u003Cp>Speaker 1: Yeah. It says.\u003C/p>\u003Cp>Speaker 0: But it has been able to answer questions like these in the past. I'm not sure I'm not sure why. Make sure I'm using GPT 4. Okay. Yes.\u003C/p>\u003Cp>We could try a GPT 3.\u003C/p>\u003Cp>Speaker 1: Let's see. Yeah. 3.5. Okay.\u003C/p>\u003Cp>Speaker 0: How many how many total orders do I have? Okay.\u003C/p>\u003Cp>Speaker 1: Yeah. So it got So 33.5 worked. Okay. Could it be an issue with the model? Not sure.\u003C/p>\u003Cp>Speaker 0: Yeah. I'm not sure what's tripping it up, really. It seemed like it wasn't find finding the right API to call.\u003C/p>\u003Cp>Speaker 1: Okay. Yeah. So I'm guessing you also when you were developing this extension, you also added the, error message and the fetching. Like, why it says when it's fetching and says calling the API, you're the one that's programmed that here.\u003C/p>\u003Cp>Speaker 0: Yes. So this this is the once it once you send it this prompt Mhmm. And then it gets the response, At this point is where it has either successfully chosen an API to call or it doesn't understand how to fulfill your request.\u003C/p>\u003Cp>Speaker 1: Fulfill them. Okay. Thank you. That makes sense. Okay.\u003C/p>\u003Cp>But it's really powerful. This is a prototype. I'm sure, there are other things that can be improved, like just making the experience a lot smoother. But this is really amazing. Just seeing how we can implement open API, AI's open API, and, you know, create an extension to just query your orders instead of having to.\u003C/p>\u003Cp>Because, you know, if you have a long list of so many orders or so much data, you can easily just use this parts for extension to find the at the actual detail for that data instead of going through the entire spreadsheet or database. So this is definitely very useful. And what what were some maybe key considerations or things that you had to, you know, modify when using open a open API. I always trip on that. So confusing when using the open API to, you know, develop these extensions where there's certain considerations and things they had to modify.\u003C/p>\u003Cp>Speaker 0: Yeah. So, you know, I mean, one of them was actually with Linkchain. So Linkchain is it was originally developed in Python, and I needed the JavaScript version. So there's a JavaScript version that's kind of trailing along the Python version.\u003C/p>\u003Cp>Speaker 1: Thank you.\u003C/p>\u003Cp>Speaker 0: And, at first, it wasn't parsing the open API schema correctly, and so it it it wouldn't work. But that's another good community over there. I was able to submit a pull request to fix that little bug, and they merged it and and now it works. Yeah. Another one was you can only send a limited number of functions.\u003C/p>\u003Cp>So I tried to reduce what functions I was sending in the schema. So, basically, I only allow OpenAI to call, get methods. So that trims down the function to only only APIs that can do, read data. And if you have a large number of collections, though, you're still that's still gonna be too many, because I think you maybe only have 40 functions that you can send it. So that is just kind of an inbuilt limitation now.\u003C/p>\u003Cp>You'd need something like a a way to organize or or, like, to to pick the most relevant, API calls for your prompt. And one way to do that is with vector embeddings. We create vector embeddings for all of your all of your functions and, like, the description of the function, and then use that to select the most relevant functions to send to OpenAI, OpenAI. But, Directus doesn't support that yet, but it could in the future, which would be awesome. I've seen some chatter, on GitHub about that.\u003C/p>\u003Cp>So that was an that's another limitation. And, you know, limiting to get requests is I think if we if if I could open it up to post requests, there could be some interesting things that it could do. Like, for example, you could ask it to, hey. Could you mock up could you create some mock data for my customers table? And then OpenAI is pretty good at creating data like that.\u003C/p>\u003Cp>So if you if you allow it to write to your bay database, which I I don't think many people might might not wanna do that. But it kind of opened up opens up some interesting possibilities. But, yeah, I would say those two things are probably the most difficult, the the lang chaining bug and then, figuring out how to pair down what functions get sent to OpenAI.\u003C/p>\u003Cp>Speaker 1: Yeah. Yeah. That makes sense. But it's definitely incredible to see how much you're able to pull off. Like, just pulling off this in a day and a half.\u003C/p>\u003Cp>Like, it's amazing. And, yeah, I'm excited to probably see this in the marketplace when we eventually launch and see all the incredible work that you'll also be doing in the director's community. So, yeah, thanks for sharing and, yeah, excited about what you do next. Alright. So before we wrap this up, I would just like to ask you, an exciting question.\u003C/p>\u003Cp>I know when we're starting this, you talked about how your well, I say your mission is to, like, migrate develop parts from WordPress to directors, and, you know, you've used directors and you enjoyed. So what would you say is the most exciting director's feature that you've what what's that thing that you may love about directors? Or if there are more than 1, feel free to share.\u003C/p>\u003Cp>Speaker 0: Okay. I mean, I mean, getting getting a back end that is customer friendly out of the box is huge for me. So that's a that just saves so much time not having to build that. And, so that that's that's very powerful. And then, my favorite feature is probably flows.\u003C/p>\u003Cp>I've actually been able to do a lot with flows, And it from email reminders and all these got, like, half a dozen email flows, set up on one of my projects, and I've been really impressed with Flows. Yeah. It's a it's a very powerful system for just developing new features that even your customer can can do themselves. So, you know, it's kinda like a no code solution, but you can add a little bit of code to to really ramp up the the abilities. So yeah.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Flows is my favorite feature, I think.\u003C/p>\u003Cp>Speaker 1: Yeah. It's it's interesting you mentioned that because I actually really love flows as well. It's so useful. And, you know, just combining that with your data, there's so many amazing things that you can do with flows instead of maybe keying into a third party app to just handle, like, a basic flow. Directors can do that for you.\u003C/p>\u003Cp>So, yeah, it's amazing to hear. Alright, Donald. It's been a pleasure just having you come talk to us about, your extension and everything that you you know, the the key points that you took notice of when developing it. So I'm excited about what you do in community, and, yeah, feel free to always drop by in our Discord community. I know you're an active member already, so feel free to do more.\u003C/p>\u003Cp>And should we expect more extensions for from you in the future?\u003C/p>\u003Cp>Speaker 0: Yeah. I think so.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Yeah. I'd like to hit help out with some of the the vector database extensions. So if if anyone's out there that is thinking about doing that, love to love to chat.\u003C/p>\u003Cp>Speaker 1: It's the not up.\u003C/p>","Getting a back end that is customer friendly out of the box is huge. Hi, everyone, and welcome to another episode of the beyond the core show. The beyond the call show is really a director show that shines a spotlight on extension developers in the community. And today, I have a special guest with me. I have Donald. He is the winner of the past directors AI hackathon. And, yeah, he's just gonna be sharing about his journey on implementing the Director's Copilot extension. My name is Esther, and I work as a developer advocate at Director. So thanks for joining me, Donald. Would you like to introduce yourself? Thanks for having me, Esther. Yeah. So I'm a software developer. I've been doing consulting for about 8 years now. And, you know, one of my niches is helping people move from WordPress to scale their business when they need custom software. And, you know, I found Directus is is a crucial part of that now. I think it's a I think it's one of the best headless CMSs out there. It's it's, That's good to hear. Yeah. So we can talk more about that later, but, yeah, I I happened to enter the the AI hackathon. I think it was maybe the first one that you guys Yes. It was the first one. Yeah. And so I I don't know. I just like, I'll take a shot at it, and, it was a lot of fun. Give me a good excuse to try out some of these new AI technologies. And, Yeah. Yeah. Yeah. Nice. We'll be we'll be diving into that very soon. And I'm just curious to know, how did you get to know about directors in the first place? So I had heard about headless CMSs, and I had a client project that needed 1. And I just I evaluated all of them out there pretty much. Mhmm. You know, some of the other popular ones are, like, Strapi and Contentful. Yeah. Direct has checked all the boxes for me. So, you know, open source. It sits on a Postgres database, which is really nice. I mean, you can connect it to any database. But, and then I looked at the code, and it was all TypeScript view, like, very well written, had a great community. Yeah. So I think I did, like, a pull request, and then everyone was super nice to me. Because when I was evaluating, I found, like, a bug. And so I just found that the whole community around it was really good too. So, yeah, that's how I found out about Directus. I ended up using it for for a client project after evaluating a bunch of other headless CMSs. You you worked on the Director's Copilot extension, and just looking at it, it looked really amazing. I really loved it. So would you like to tell us more about this extension and, yeah, what does it do exactly? Right. So it it basically connects Directus to OpenAI chat, and Mhmm. It allows you to use kind of like a chat interface to ask questions about your database. And so, you know, the idea behind it was someone that's nontechnical. It would be nice if they could formulate requests to the database just using a chat type of window. So, yeah, that was kind of the idea behind it, in terms of the user perspective. You know, I was thinking of really, when I was approaching it is, what can I get done in a short amount of time to be able to enter the hackathon? And, Directus had some very handy features for, connecting all those things together. So that was another aspect of it. Nice. About how long did it take you to, you know, complete the extension? Did it take long? No. It it came together pretty quickly when I discovered the pieces that I needed to put together. I think it was about, like, a day and a half. Oh, that's fast. Yeah. Yeah. So, like, a little a little over a day. Okay. Okay. Nice. So would you like to just walk us through maybe some parts of the code and then a quick demo of the extension? I guess you could share your screen. Right? Okay. So the main the main feature that I used in Directus was the specification service. So there's a there's an API endpoint in Directus that gives you back an open API schema back that basically describes all the endpoints that you can call with API and, you know, all the parameters that those endpoints take. Mhmm. And and that, OpenAI schema happens to fit perfectly into the, OpenAI. There's this feature of OpenAI called functions. And so when you submit an OpenAI request, you can tell it, hey. Here's a bunch of functions you could call, and here's the schema for it. And Nice. So then you send that along with your prompt and say, hey. You know, how many orders did I have today? And then that funk it looks in all those functions and picks the right one to call. It tells you that back, so you get so it's kind of 2 requests to OpenAI. So, you know, the first one the first endpoint so this is the endpoint for the, the chat interface. So first, it just hits Okay. Oops. It hits ask, and it basically tells ask OpenAI what endpoint should I call. Open a OpenAI tells tells, it responds with the API that things should be called with the parameters that will meet the the needs of the prompt. And then there's another call that actually makes that API call and sends the result back to OpenAI, and then it sends kind of like, once it has the result, it part then OpenAI parses that result and gives you I see. A language version of what the result it describes the result for you, essentially. Nice. So this is kinda as basic as you can get with, like, a, you know, an OpenAI, like, agent type. It's not really an agent. It could do a lot more fancy things. Like, you know, you could like, right now, it basically has to answer your question in one API call. It can't, like Okay. Call multiple APIs yet and and kinda then, like, combine those together somehow. But it could if you know, potentially, it could do something like that. But this is just kind of a a program. One call per time? Right. It's like a one shot one shot. So OpenAI has to be able to get it in one shot. Okay. And, yeah, I guess there's just, you know, the the UI code. Mhmm. But, yeah, there's, like, a service that interacts with OpenAI here. And so you can see, like, you know, what the prompt is. So there's a, like, a base prompt. Trying to remember what my base base prompt was here. Oh, okay. Yeah. It's it's something like this. Okay. That's the base prompt just before you type anything extra. Exactly. So this this gets put at the top of the prompt to OpenAI. Nice. And and, yeah, that that's that's pretty much all there is to it. I I use something called langchain. Yeah. I'm interested. What's langchain used for? So langchain helps you build, like, a chain of prompts that, you know, it can get pretty fancy to where you can have, like, an agent style thing where you can have multiple branches of, you know, prompt and and answers and kinda go back and forth and have, like, a you know, it's like a a graph of of what the agent could do. You know, this is a very simple chain where it's just, you know, an API call with a prompt and then, you know, like I explained before. But lane chain helps you build out those build out those workflows for the AI. And the nice thing about it is you can combine a bunch of different LLMs. So, like, you could do some you could choose, like, chat g g GPT 3.5 for some steps and GPT 4 for, like, harder steps where it needs to be smarter, I guess you could say. Okay. So yeah. Yeah. Yeah. So, I have an interesting question. Would we still have, like, implemented this copilot without necessarily using launching, or is he an optional, like, tool? Yeah. Yeah. I think I think you could've. Yes. Okay. I was I was just using it because it It optimizes the experience and the performance more like. Right? Yeah. And it was just a little easier than than trying to trying to build it build it all myself. Yeah. Yeah. Nice. Alright. Cool. Like, I guess we can go into, like, directors to see how the panel works. Yeah. This is the insights the insights tab, and Yeah. I've set up a little little dashboard here and added my component. So that's what this extension is. It's just a, yeah, it's just a an insights panel. Mhmm. So insights panel here, and it has some settings. So here here's my key, which I'll I'll have to revoke after the after the call, but you can select the model that you wanna use. Okay. Put your open add key in. It'll get it from the environment if you don't put it in there. And, yeah, then you could just start asking questions. So I've got a pretty simple database here. Data. Okay. Like, a customer's table with with some products, and I've got, like, some orders that have it's kinda like a like, an invoice or something. So I maybe we could try Yeah. Let's see the products. Maybe I could just, like, pray, like, what product is this price, though? Yeah. And if do you wanna do you wanna go shopping and and pick pick some toys? Yeah. Yeah. Sure. Maybe the Rubik's cube and, yeah. We could say how much is the Rubik's cube, something like that. Okay. Yeah. I'll I'll create an order for you too. So Okay. I'll have to I'll have to make you a customer though customer though. Okay. Let's do it. Is that Yeah. That's it. And then save there. Yep. Okay. So make you an order. Let's get Esther. And today Yeah. We'll add, oh, yeah. You wanted a Rubik's cube. Yeah. Okay. Group is q. K. Looks like it's oh oh, no. What? 2? Okay. We'll just do 1. Okay. Let's do 1. And so let's see. I think I have these. You don't have to pay any tax. Nice. And click save. Okay. So now you've got an order here. I just need order. So here's order number 2. Okay. Okay. Sorry. I'm just making sure this is correct here. Yeah. That's fine. Something seems a little strange. Sorry. Is it fact that it's just one order item? Yeah. Because I thought I I don't know if maybe I clicked in here. Did I add a new order items? I just thought You added the one for Rubik's cube. Okay. And then I thought center oh, I did I added an existing, I think, is my problem. I added an existing item. So I I I guess I'd cannibalize another. Okay. I'll I'll I'll have this make make someone else buy this. So that just sketch It's in a sense. Yep. So let's Sorry. So now we have So now we have 2 2 others. Now we have 2 others. Yeah. Okay. So if we go back to the chat window, I think you had asked how much is a Rubik's cube. Rubik's cube view. I don't know if it will be able to do this one. Let's see. Oh, okay. It did it. Yeah. It did it. Okay. So Yeah. Keep keep in mind that price is incense. Okay. Here. Well, I'll have to ask it again. So how much is a Rubik's cube? Keep in mind prices are in cents. In cents. Shouldn't say it should say $7 now. Okay. There we go. Dollars. Yeah. 700¢. $7. Okay. Yeah. Makes a lot of sense. Yeah. It's really smart. It's using OpenAI, so it's really smart. Yeah. Yeah. It it can it can be pretty surprising with what it can do. You know, one thing I I realized when building this is the the open the open API, not to Mhmm. So it's kinda confusing because there's open API and then open AI. But the the open API Okay. Schema, is is generated from the stuff we basically, from the way we create fields and things in. So, you know, when you're when you're creating a field, it can be helpful to to, like, give it semantic information because that could help inform the API or the the AI, if if that makes sense. So I tried to, like, have a field description to to say that the price was in cents here. So that might be but that I don't think that passes through to the schema yet. Yeah. But if we could pass that through the schema, it might be able to get that in in one shot in one shot. But Yeah. Possibly. Possibly. Do you wanna do another prompt? Yeah. Let's do that. Okay. What did you have in mind? Nothing. Okay. Could say something like how many orders did I have today? Yeah. Okay. So that didn't get this one. The product does not contain information by orders. Maybe we can modify to how many orders do I have instead of today. Okay. Let's see. I wonder why it's not Should we call it other items? No? Well, I wanted to list how many orders that has here. Okay. But, you know, this very possible that I'm doing something wrong, and it is a prototype after all. Yeah. It says. But it has been able to answer questions like these in the past. I'm not sure I'm not sure why. Make sure I'm using GPT 4. Okay. Yes. We could try a GPT 3. Let's see. Yeah. 3.5. Okay. How many how many total orders do I have? Okay. Yeah. So it got So 33.5 worked. Okay. Could it be an issue with the model? Not sure. Yeah. I'm not sure what's tripping it up, really. It seemed like it wasn't find finding the right API to call. Okay. Yeah. So I'm guessing you also when you were developing this extension, you also added the, error message and the fetching. Like, why it says when it's fetching and says calling the API, you're the one that's programmed that here. Yes. So this this is the once it once you send it this prompt Mhmm. And then it gets the response, At this point is where it has either successfully chosen an API to call or it doesn't understand how to fulfill your request. Fulfill them. Okay. Thank you. That makes sense. Okay. But it's really powerful. This is a prototype. I'm sure, there are other things that can be improved, like just making the experience a lot smoother. But this is really amazing. Just seeing how we can implement open API, AI's open API, and, you know, create an extension to just query your orders instead of having to. Because, you know, if you have a long list of so many orders or so much data, you can easily just use this parts for extension to find the at the actual detail for that data instead of going through the entire spreadsheet or database. So this is definitely very useful. And what what were some maybe key considerations or things that you had to, you know, modify when using open a open API. I always trip on that. So confusing when using the open API to, you know, develop these extensions where there's certain considerations and things they had to modify. Yeah. So, you know, I mean, one of them was actually with Linkchain. So Linkchain is it was originally developed in Python, and I needed the JavaScript version. So there's a JavaScript version that's kind of trailing along the Python version. Thank you. And, at first, it wasn't parsing the open API schema correctly, and so it it it wouldn't work. But that's another good community over there. I was able to submit a pull request to fix that little bug, and they merged it and and now it works. Yeah. Another one was you can only send a limited number of functions. So I tried to reduce what functions I was sending in the schema. So, basically, I only allow OpenAI to call, get methods. So that trims down the function to only only APIs that can do, read data. And if you have a large number of collections, though, you're still that's still gonna be too many, because I think you maybe only have 40 functions that you can send it. So that is just kind of an inbuilt limitation now. You'd need something like a a way to organize or or, like, to to pick the most relevant, API calls for your prompt. And one way to do that is with vector embeddings. We create vector embeddings for all of your all of your functions and, like, the description of the function, and then use that to select the most relevant functions to send to OpenAI, OpenAI. But, Directus doesn't support that yet, but it could in the future, which would be awesome. I've seen some chatter, on GitHub about that. So that was an that's another limitation. And, you know, limiting to get requests is I think if we if if I could open it up to post requests, there could be some interesting things that it could do. Like, for example, you could ask it to, hey. Could you mock up could you create some mock data for my customers table? And then OpenAI is pretty good at creating data like that. So if you if you allow it to write to your bay database, which I I don't think many people might might not wanna do that. But it kind of opened up opens up some interesting possibilities. But, yeah, I would say those two things are probably the most difficult, the the lang chaining bug and then, figuring out how to pair down what functions get sent to OpenAI. Yeah. Yeah. That makes sense. But it's definitely incredible to see how much you're able to pull off. Like, just pulling off this in a day and a half. Like, it's amazing. And, yeah, I'm excited to probably see this in the marketplace when we eventually launch and see all the incredible work that you'll also be doing in the director's community. So, yeah, thanks for sharing and, yeah, excited about what you do next. Alright. So before we wrap this up, I would just like to ask you, an exciting question. I know when we're starting this, you talked about how your well, I say your mission is to, like, migrate develop parts from WordPress to directors, and, you know, you've used directors and you enjoyed. So what would you say is the most exciting director's feature that you've what what's that thing that you may love about directors? Or if there are more than 1, feel free to share. Okay. I mean, I mean, getting getting a back end that is customer friendly out of the box is huge for me. So that's a that just saves so much time not having to build that. And, so that that's that's very powerful. And then, my favorite feature is probably flows. I've actually been able to do a lot with flows, And it from email reminders and all these got, like, half a dozen email flows, set up on one of my projects, and I've been really impressed with Flows. Yeah. It's a it's a very powerful system for just developing new features that even your customer can can do themselves. So, you know, it's kinda like a no code solution, but you can add a little bit of code to to really ramp up the the abilities. So yeah. Okay. Flows is my favorite feature, I think. Yeah. It's it's interesting you mentioned that because I actually really love flows as well. It's so useful. And, you know, just combining that with your data, there's so many amazing things that you can do with flows instead of maybe keying into a third party app to just handle, like, a basic flow. Directors can do that for you. So, yeah, it's amazing to hear. Alright, Donald. It's been a pleasure just having you come talk to us about, your extension and everything that you you know, the the key points that you took notice of when developing it. So I'm excited about what you do in community, and, yeah, feel free to always drop by in our Discord community. I know you're an active member already, so feel free to do more. And should we expect more extensions for from you in the future? Yeah. I think so. Okay. Yeah. I'd like to hit help out with some of the the vector database extensions. So if if anyone's out there that is thinking about doing that, love to love to chat. It's the not up.","published",[146,153],{"people_id":147},{"id":148,"first_name":149,"last_name":150,"avatar":151,"bio":152,"links":8},"60c9fd68-1b7a-4423-97ac-c8768bd8227a","Esther","Agbaje","cff9cdc8-bb76-4fea-a9e3-74c40d3f8274","Educator",{"people_id":154},{"id":155,"first_name":156,"last_name":157,"avatar":158,"bio":159,"links":160},"944fa23b-b1a6-419a-9de8-0e2afb347152","Donald","Ness","930c351b-f9ca-4ac9-960b-44509e6135b8","Software Consultant at Programmarchy",[161],{"url":138,"service":162},"website",[],{"id":165,"number":139,"year":166,"episodes":167,"show":170},"9e90c8ed-0bf8-43bb-8c4e-29ece4187819","2024",[122,168,169],"63ef529e-3a79-4116-a8b3-e9741dc70b3f","8f4235cb-a9f3-4d25-ab5a-0af8f0fc892e",{"title":171,"tile":172},"Beyond the Core","67399d60-cf53-404b-b5d4-a35b52850130",{"id":168,"slug":174,"season":165,"vimeo_id":175,"description":176,"tile":177,"length":178,"resources":179,"people":183,"episode_number":188,"published":189,"title":190,"video_transcript_html":191,"video_transcript_text":192,"content":8,"seo":8,"status":144,"episode_people":193,"recommendations":196},"media-ai-bundle","906133845","Esther speaks to community member Marcus about their Media AI Bundle - a group of extensions that allow extraction of key details using AI tools. ","d7ad67a6-de8b-4f29-a5a8-c17b05389e34",22,[180],{"name":181,"url":182},"Media AI Bundle on GitHub","https://github.com/Arood/directus-extension-media-ai-bundle",[184,185],{"name":134,"url":135},{"name":186,"url":187},"Marcus Olovsson","https://marcusolovsson.com/",2,"2024-02-20","Media AI Bundle","\u003Cp>Speaker 0: But I'm also open to add more AI features, depending on use cases.\u003C/p>\u003Cp>Speaker 1: Hi there, and welcome to another episode of the Beyond the Core Show. It's a director show where we shine the spotlight on extension developers in the community. My name is Esther Agbaji, and I work as a developer advocate at Directus. And today, I'm joined by a super community member, Ahud. Ahud is the community winner of the AI hackathon that held a couple of months back, and he won the hackathon by creating the media AI bundle extension.\u003C/p>\u003Cp>And today, he's here with me to share all about the exciting journey to develop Hynis Extension and some of its features. So thank you, Arud, for joining me here. Could you share, like, some background about yourself and how you get to know about directors?\u003C/p>\u003Cp>Speaker 0: Sure. I I work as a developer, at an agency. Okay. And I was looking for, alternatives to CMSs like WordPress, for example, but I wanted something more, API focused.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: So so we had so we could find something that works with larger projects, and more, like, apps and such. And after trying some of them out, I I wasn't really happy with how they worked. They weren't, like, easy to use for the users, at least in my opinion. But then in a Reddit post, I think it was, someone was mentioning, directors and, like, how it was. They they said it was fantastic, but the the it was kind of, under the radar, at that time.\u003C/p>\u003Cp>So Okay. I checked it out, and I liked it too much. So, I started trying to, like, get it approved to to be used in our agency.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: And now we have successfully rolled out a few projects using it. And so far, it's been great.\u003C/p>\u003Cp>Speaker 1: Interesting. Interesting. And what's your favorite director's feature?\u003C/p>\u003Cp>Speaker 0: I think it's the interface. Okay. How extensible it is, and how how I can fine tune it for the editors.\u003C/p>\u003Cp>Speaker 1: Okay. Okay. So you like the fact that, like, it's very intuitive and is also extensible. Yeah. Yeah.\u003C/p>\u003Cp>I'm sure you probably like the extension feature because you've created not just 1 or 2 extensions at least that I know of. And, yeah, we are here to talk about the media AI bundle, which is a really cool extension. I saw the demo that you did during submission, and it was really good. So, what was the motivation for actually creating this extension?\u003C/p>\u003Cp>Speaker 0: It started off as, idea for a personal project of mine. I wanted to be able to take pictures, of sticky notes.\u003C/p>\u003Cp>Speaker 1: Interesting.\u003C/p>\u003Cp>Speaker 0: Like photos with my phone and upload them to a Kanban board or similar. So that's what got me started. I was I had some APIs in mind that I wanted to use, but now I had a like, an excuse to get it working in directors. So I started working on the, an operations bundle. And then I realized that this could be used for much more.\u003C/p>\u003Cp>For example, the the integration with alt text dot io so you can get alt text for images. So that kind of, formed it into becoming the media, bundle.\u003C/p>\u003Cp>Speaker 1: Okay. Okay. Nice to hear. So you mentioned the old text. Is it like an AI API?\u003C/p>\u003Cp>Speaker 0: Yeah.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: So it's like a service you sign up for, and they they transform the image into a readable text, like a sentence or\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Sentences for that describes the image.\u003C/p>\u003Cp>Speaker 1: Okay. Okay. So, basically, the extension that you created, you upload an image, and it just gives you the old text or even if it's a screenshot of maybe, you know, text on a piece of paper, you can also read and extract that text also. Right?\u003C/p>\u003Cp>Speaker 0: Yeah. And I have 2 two operations currently in the bundle. 1 is for the, one I call describe, image, which does the alt text thing. It takes the image and extracts the text or sentence. You can also use Amazon Web Services, but then you'll only get, like, a comma separated list of words.\u003C/p>\u003Cp>So it's not as fancy. Okay. But it's an alternate alternative Alternative. Okay. Yeah.\u003C/p>\u003Cp>And the other one is the extract text operation that actually reads, with, like, OCR, and tries to find text in an image. So you can extract.\u003C/p>\u003Cp>Speaker 1: Okay. Extract the text from an image. Yeah. Alright. Yeah.\u003C/p>\u003Cp>Would you like to share your screen? So just walk us through maybe some parts of the code and then a quick demo of the extension?\u003C/p>\u003Cp>Speaker 0: So as I mentioned, it's a bundle. So you have a source directory with the different operations in this case. And the plan was to add more, as I get more, like, cases\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: Use cases.\u003C/p>\u003Cp>Speaker 1: Use cases. Yeah.\u003C/p>\u003Cp>Speaker 0: So here we have the describe image, and the extract text from the image operation. So if you take the describe the image, I think that's the more more fun one.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: We have the front end part or the app part Mhmm. Which defines the operation. And here we have some settings that you can set up. You can choose if you want to use alt text dot io or Amazon Amazon\u003C/p>\u003Cp>Speaker 1: Cognition. Okay.\u003C/p>\u003Cp>Speaker 0: And you you will need API keys for these and Mhmm. You set them up in your environment variables. Okay. But I'm hoping to add some kind of, like, settings page or something, where you can do it a bit easier.\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: And then on the, client side or or the server side\u003C/p>\u003Cp>Speaker 1: Service side. Yeah.\u003C/p>\u003Cp>Speaker 0: We have the API for this, or the director's API. So we get an image. You you can set up, like, a hook for or a flow with a hook for file upload.\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: And then we'll check if we have, like, a image, if the file is an image, and then we will create a buffer from it so we can send the entire image from\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: To this to the API.\u003C/p>\u003Cp>Speaker 1: Okay. So you first verify if it's an image before you then send it.\u003C/p>\u003Cp>Speaker 0: Yeah. So Okay. We don't waste any, like, credits or such. Okay. And then we get a result.\u003C/p>\u003Cp>And, I try to, like, return a common format for it. So it shouldn't depend matter which API you use. You should have, like, a you should be able to expect what kind of\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Properties it will return, and they are documented in the readme.\u003C/p>\u003Cp>Speaker 1: In the readme. Okay.\u003C/p>\u003Cp>Speaker 0: So in this case, you get the description. But I also have, like, a dollar param row property that has the original payload. So if you want to get something specific from Amazon or from alt text, you can\u003C/p>\u003Cp>Speaker 1: You can get\u003C/p>\u003Cp>Speaker 0: that as well.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Nice. Without having to exchange the the extension. And the same thing here for Amazon. The API used there is called detect labels.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Yeah.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: Nice. That's how it works.\u003C/p>\u003Cp>Speaker 1: That's That's the one for describing the image. Yeah? Yeah. Okay. Okay.\u003C/p>\u003Cp>Let's check out the one for, like, extracting the text from image briefly.\u003C/p>\u003Cp>Speaker 0: Yeah. It works pretty much the same, except it doesn't have it only has Amazon recognition for now.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: I am we I am planning to add ash Azure Vision AI.\u003C/p>\u003Cp>Speaker 1: Azure. Okay.\u003C/p>\u003Cp>Speaker 0: Because we use that a lot at my work.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: But for now, it's just Amazon.\u003C/p>\u003Cp>Speaker 1: Alright.\u003C/p>\u003Cp>Speaker 0: But the principle is the same. We we you you we receive an image. We take the stream, and send it to, an API. Mhmm. And the only difference here is, of course, which, API we're sending it to.\u003C/p>\u003Cp>So in this case, it's the detect text command\u003C/p>\u003Cp>Speaker 1: Command. Okay.\u003C/p>\u003Cp>Speaker 0: On the Amazon SDK. And in this case, you get a bit more parameters back, property spec. So you can get either the I try to transform them into lines, with text and, like, where in the image it's located. But you also get a full text if you're just trying to, like, transcribe an image.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: Okay. So if you want that quick and easy fix, you can use the full text form.\u003C/p>\u003Cp>Speaker 1: Full text. Okay. Okay.\u003C/p>\u003Cp>Speaker 0: But if you want to, like, get down with the greater details, you can use the lines.\u003C/p>\u003Cp>Speaker 1: The lines. Yeah. That's very clear and nifty. Did you face any challenges or issues when you were building, you know, all of these bundles?\u003C/p>\u003Cp>Speaker 0: I think the for for the most part, it's it's been pretty\u003C/p>\u003Cp>Speaker 1: No. It's pretty straightforward. Okay.\u003C/p>\u003Cp>Speaker 0: Yeah. Of course, the the I guess the one thing that was a bit tricky is, like, trying to find the right services. I know that that has been added to the documentation now.\u003C/p>\u003Cp>Speaker 1: Yes. It has.\u003C/p>\u003Cp>Speaker 0: When I wrote this, there wasn't really much.\u003C/p>\u003Cp>Speaker 1: Wasn't. Yeah. True. I remember. Yeah.\u003C/p>\u003Cp>True. But now we've included services in the docs.\u003C/p>\u003Cp>Speaker 0: Yeah. So getting the, like, asset service and figuring out how that works, took a bit of time. It wasn't wasn't hard, but you had to, like, shake your house.\u003C/p>\u003Cp>Speaker 1: Again, to the core and the doors.\u003C/p>\u003Cp>Speaker 0: Yeah. But that's also a strength of directors, I think, yeah, that you can do that.\u003C/p>\u003Cp>Speaker 1: Yeah. Can you imagine that? Okay. That's cool. Let's go into the bundle to see how it works in directors.\u003C/p>\u003Cp>Speaker 0: Yes. So I have a flow here. Let me see in a bit.\u003C/p>\u003Cp>Speaker 1: Yeah. Yeah. It's good now.\u003C/p>\u003Cp>Speaker 0: Called file uploads.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: And here we have when I created this, I made a trigger with non blocking action with the scope files.\u003C/p>\u003Cp>Speaker 1: Files that's uploaded.\u003C/p>\u003Cp>Speaker 0: Mhmm. Currently, there's no filter for it. I think I'm gonna write a feature request for that. I think it would be useful to, like, be wait for the\u003C/p>\u003Cp>Speaker 1: Before I wait for some time before I\u003C/p>\u003Cp>Speaker 0: finish before fires up. Yeah. Because right now, you you when you upload it, you you get sent to the, like, start page, and you don't see the changes until they have performed. So you have to wait a bit.\u003C/p>\u003Cp>Speaker 1: I see. Yeah.\u003C/p>\u003Cp>Speaker 0: But for now, it works well with the non blocking one. Then we have the operation from my extension, describe image. And here we are select which API we want to use and you are able to change the field if you want to. But in most cases, it's the trigger dot key, which is the image you have uploaded. But I'm left it a bit configurable if you have more advanced flows.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: Then when this is run, you get some data. So I have an update data operation here.\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: Where I simply just update the, payload of the file that's created in Directus. So in this case, I put the description from the operation into the description field.\u003C/p>\u003Cp>Speaker 1: Mhmm. Yeah.\u003C/p>\u003Cp>Speaker 0: In some cases, I might put a transcript or similar in between just to clean up a bit, especially if I use the other, operation that can the the extract text one. So if you if you would need to, like, make sure that it's not too long or something like that.\u003C/p>\u003Cp>Speaker 1: Okay. Do that. I just remembered, did you, maybe handle errors and all in the whole, you know, operations?\u003C/p>\u003Cp>Speaker 0: Some errors, but in most cases, it will silently fail. Like, if you upload a image that isn't or a file that isn't an image. It will just silently It's a test. Failed.\u003C/p>\u003Cp>Speaker 1: So you don't Okay.\u003C/p>\u003Cp>Speaker 0: Yeah. So you don't get, like, a lot of error messages. But, I'm not sure if I I think I have some,\u003C/p>\u003Cp>Speaker 1: yeah, some error message. Okay.\u003C/p>\u003Cp>Speaker 0: Oh, actually, I see. You seem to throw even if the image is\u003C/p>\u003Cp>Speaker 1: If it's not image, it threw as an error also. Yeah.\u003C/p>\u003Cp>Speaker 0: Or maybe I added that. Yeah. Anyway Yeah. So you can do a failed state if you want.\u003C/p>\u003Cp>Speaker 1: Path also. Okay. Okay. Yeah. I think that's fine.\u003C/p>\u003Cp>Yeah. Would you like to maybe try with could we do a demo of just uploading a an image or a screenshot? Sure.\u003C/p>\u003Cp>Speaker 0: Just gonna find the good image to upload. Okay. So here we have my file library. And Mhmm. I'm gonna drop an image here from my other screen.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: And we wait a little bit, and then we check if we get\u003C/p>\u003Cp>Speaker 1: Nice. The tracking that we get. White cut. Yeah. Sitting on a blanket indicate very descriptive to the point.\u003C/p>\u003Cp>Yeah. Yeah. That makes a lot of sense. Lovely.\u003C/p>\u003Cp>Speaker 0: And, of course, the the big advantage of this would be, like, to have alternative texts in on websites if you have, someone with a disability so they can't see. Yes. Yep. Then they can get this re read up instead.\u003C/p>\u003Cp>Speaker 1: I would just love to know, in terms of improvements to the extension, the things that, you know, features that you would like to add, what are some of the features or improvements going forward that you love to add to the extension?\u003C/p>\u003Cp>Speaker 0: Yeah. So one thing I want to add is, like a settings page.\u003C/p>\u003Cp>Speaker 1: So The settings page?\u003C/p>\u003Cp>Speaker 0: Yeah. So so you can ease more easily add the API keys, I'm thinking. I'm also thinking, like, that that will be needed for, for example, the marketplace.\u003C/p>\u003Cp>Speaker 1: Yes.\u003C/p>\u003Cp>Speaker 0: Perhaps or at least it will make it easier. But I'm also open to add more AI features, depending on use cases. I had one, for example, with, trying to detect objects in an image. Okay. So that might be something we I'm gonna add later on.\u003C/p>\u003Cp>And as I mentioned, support for other services like Azure, perhaps Google, if if, anyone needs that.\u003C/p>\u003Cp>Speaker 1: Okay.\u003C/p>\u003Cp>Speaker 0: So so if anyone has any suggestions, I recommend posting it in on GitHub.\u003C/p>\u003Cp>Speaker 1: Feel free to. Yeah. Either comment or make an issue or just even comment in our Discord channel as well. Thank you. Alright.\u003C/p>\u003Cp>Yeah. I'm really excited to see when the marketplace launches. Hopefully, we're able to have this extension in the marketplace because I know lots of people would also find it equally useful and important in their projects as well. Alright. So the very final question I have is, I know you've also built the Jira panel.\u003C/p>\u003Cp>The last hackathon you worked on Jira panels. So what's will I say advice or you know? Yeah, advice would you have or give to anyone that is building extensions? Because you have bit of more experience in that aspect. So anyone building a custom director's extension.\u003C/p>\u003Cp>Speaker 0: I think my advice would be to not be afraid of looking into the, direct to source code, because there's a lot of it's built very modular. So a lot of the existing interfaces and operations and all of that are available. So you can see how they are built. So even if the even if you can't find anything in if you're looking for something in the documentation and not finding anything that you might find how, under interface or operation, have sold.\u003C/p>","But I'm also open to add more AI features, depending on use cases. Hi there, and welcome to another episode of the Beyond the Core Show. It's a director show where we shine the spotlight on extension developers in the community. My name is Esther Agbaji, and I work as a developer advocate at Directus. And today, I'm joined by a super community member, Ahud. Ahud is the community winner of the AI hackathon that held a couple of months back, and he won the hackathon by creating the media AI bundle extension. And today, he's here with me to share all about the exciting journey to develop Hynis Extension and some of its features. So thank you, Arud, for joining me here. Could you share, like, some background about yourself and how you get to know about directors? Sure. I I work as a developer, at an agency. Okay. And I was looking for, alternatives to CMSs like WordPress, for example, but I wanted something more, API focused. Okay. So so we had so we could find something that works with larger projects, and more, like, apps and such. And after trying some of them out, I I wasn't really happy with how they worked. They weren't, like, easy to use for the users, at least in my opinion. But then in a Reddit post, I think it was, someone was mentioning, directors and, like, how it was. They they said it was fantastic, but the the it was kind of, under the radar, at that time. So Okay. I checked it out, and I liked it too much. So, I started trying to, like, get it approved to to be used in our agency. Okay. And now we have successfully rolled out a few projects using it. And so far, it's been great. Interesting. Interesting. And what's your favorite director's feature? I think it's the interface. Okay. How extensible it is, and how how I can fine tune it for the editors. Okay. Okay. So you like the fact that, like, it's very intuitive and is also extensible. Yeah. Yeah. I'm sure you probably like the extension feature because you've created not just 1 or 2 extensions at least that I know of. And, yeah, we are here to talk about the media AI bundle, which is a really cool extension. I saw the demo that you did during submission, and it was really good. So, what was the motivation for actually creating this extension? It started off as, idea for a personal project of mine. I wanted to be able to take pictures, of sticky notes. Interesting. Like photos with my phone and upload them to a Kanban board or similar. So that's what got me started. I was I had some APIs in mind that I wanted to use, but now I had a like, an excuse to get it working in directors. So I started working on the, an operations bundle. And then I realized that this could be used for much more. For example, the the integration with alt text dot io so you can get alt text for images. So that kind of, formed it into becoming the media, bundle. Okay. Okay. Nice to hear. So you mentioned the old text. Is it like an AI API? Yeah. Okay. So it's like a service you sign up for, and they they transform the image into a readable text, like a sentence or Okay. Sentences for that describes the image. Okay. Okay. So, basically, the extension that you created, you upload an image, and it just gives you the old text or even if it's a screenshot of maybe, you know, text on a piece of paper, you can also read and extract that text also. Right? Yeah. And I have 2 two operations currently in the bundle. 1 is for the, one I call describe, image, which does the alt text thing. It takes the image and extracts the text or sentence. You can also use Amazon Web Services, but then you'll only get, like, a comma separated list of words. So it's not as fancy. Okay. But it's an alternate alternative Alternative. Okay. Yeah. And the other one is the extract text operation that actually reads, with, like, OCR, and tries to find text in an image. So you can extract. Okay. Extract the text from an image. Yeah. Alright. Yeah. Would you like to share your screen? So just walk us through maybe some parts of the code and then a quick demo of the extension? So as I mentioned, it's a bundle. So you have a source directory with the different operations in this case. And the plan was to add more, as I get more, like, cases Mhmm. Use cases. Use cases. Yeah. So here we have the describe image, and the extract text from the image operation. So if you take the describe the image, I think that's the more more fun one. Okay. We have the front end part or the app part Mhmm. Which defines the operation. And here we have some settings that you can set up. You can choose if you want to use alt text dot io or Amazon Amazon Cognition. Okay. And you you will need API keys for these and Mhmm. You set them up in your environment variables. Okay. But I'm hoping to add some kind of, like, settings page or something, where you can do it a bit easier. Mhmm. And then on the, client side or or the server side Service side. Yeah. We have the API for this, or the director's API. So we get an image. You you can set up, like, a hook for or a flow with a hook for file upload. Mhmm. And then we'll check if we have, like, a image, if the file is an image, and then we will create a buffer from it so we can send the entire image from Mhmm. To this to the API. Okay. So you first verify if it's an image before you then send it. Yeah. So Okay. We don't waste any, like, credits or such. Okay. And then we get a result. And, I try to, like, return a common format for it. So it shouldn't depend matter which API you use. You should have, like, a you should be able to expect what kind of Okay. Properties it will return, and they are documented in the readme. In the readme. Okay. So in this case, you get the description. But I also have, like, a dollar param row property that has the original payload. So if you want to get something specific from Amazon or from alt text, you can You can get that as well. Okay. Nice. Without having to exchange the the extension. And the same thing here for Amazon. The API used there is called detect labels. Okay. Yeah. Okay. Nice. That's how it works. That's That's the one for describing the image. Yeah? Yeah. Okay. Okay. Let's check out the one for, like, extracting the text from image briefly. Yeah. It works pretty much the same, except it doesn't have it only has Amazon recognition for now. Okay. I am we I am planning to add ash Azure Vision AI. Azure. Okay. Because we use that a lot at my work. Okay. But for now, it's just Amazon. Alright. But the principle is the same. We we you you we receive an image. We take the stream, and send it to, an API. Mhmm. And the only difference here is, of course, which, API we're sending it to. So in this case, it's the detect text command Command. Okay. On the Amazon SDK. And in this case, you get a bit more parameters back, property spec. So you can get either the I try to transform them into lines, with text and, like, where in the image it's located. But you also get a full text if you're just trying to, like, transcribe an image. Yeah. Okay. So if you want that quick and easy fix, you can use the full text form. Full text. Okay. Okay. But if you want to, like, get down with the greater details, you can use the lines. The lines. Yeah. That's very clear and nifty. Did you face any challenges or issues when you were building, you know, all of these bundles? I think the for for the most part, it's it's been pretty No. It's pretty straightforward. Okay. Yeah. Of course, the the I guess the one thing that was a bit tricky is, like, trying to find the right services. I know that that has been added to the documentation now. Yes. It has. When I wrote this, there wasn't really much. Wasn't. Yeah. True. I remember. Yeah. True. But now we've included services in the docs. Yeah. So getting the, like, asset service and figuring out how that works, took a bit of time. It wasn't wasn't hard, but you had to, like, shake your house. Again, to the core and the doors. Yeah. But that's also a strength of directors, I think, yeah, that you can do that. Yeah. Can you imagine that? Okay. That's cool. Let's go into the bundle to see how it works in directors. Yes. So I have a flow here. Let me see in a bit. Yeah. Yeah. It's good now. Called file uploads. Okay. And here we have when I created this, I made a trigger with non blocking action with the scope files. Files that's uploaded. Mhmm. Currently, there's no filter for it. I think I'm gonna write a feature request for that. I think it would be useful to, like, be wait for the Before I wait for some time before I finish before fires up. Yeah. Because right now, you you when you upload it, you you get sent to the, like, start page, and you don't see the changes until they have performed. So you have to wait a bit. I see. Yeah. But for now, it works well with the non blocking one. Then we have the operation from my extension, describe image. And here we are select which API we want to use and you are able to change the field if you want to. But in most cases, it's the trigger dot key, which is the image you have uploaded. But I'm left it a bit configurable if you have more advanced flows. Yeah. Then when this is run, you get some data. So I have an update data operation here. Mhmm. Where I simply just update the, payload of the file that's created in Directus. So in this case, I put the description from the operation into the description field. Mhmm. Yeah. In some cases, I might put a transcript or similar in between just to clean up a bit, especially if I use the other, operation that can the the extract text one. So if you if you would need to, like, make sure that it's not too long or something like that. Okay. Do that. I just remembered, did you, maybe handle errors and all in the whole, you know, operations? Some errors, but in most cases, it will silently fail. Like, if you upload a image that isn't or a file that isn't an image. It will just silently It's a test. Failed. So you don't Okay. Yeah. So you don't get, like, a lot of error messages. But, I'm not sure if I I think I have some, yeah, some error message. Okay. Oh, actually, I see. You seem to throw even if the image is If it's not image, it threw as an error also. Yeah. Or maybe I added that. Yeah. Anyway Yeah. So you can do a failed state if you want. Path also. Okay. Okay. Yeah. I think that's fine. Yeah. Would you like to maybe try with could we do a demo of just uploading a an image or a screenshot? Sure. Just gonna find the good image to upload. Okay. So here we have my file library. And Mhmm. I'm gonna drop an image here from my other screen. Okay. And we wait a little bit, and then we check if we get Nice. The tracking that we get. White cut. Yeah. Sitting on a blanket indicate very descriptive to the point. Yeah. Yeah. That makes a lot of sense. Lovely. And, of course, the the big advantage of this would be, like, to have alternative texts in on websites if you have, someone with a disability so they can't see. Yes. Yep. Then they can get this re read up instead. I would just love to know, in terms of improvements to the extension, the things that, you know, features that you would like to add, what are some of the features or improvements going forward that you love to add to the extension? Yeah. So one thing I want to add is, like a settings page. So The settings page? Yeah. So so you can ease more easily add the API keys, I'm thinking. I'm also thinking, like, that that will be needed for, for example, the marketplace. Yes. Perhaps or at least it will make it easier. But I'm also open to add more AI features, depending on use cases. I had one, for example, with, trying to detect objects in an image. Okay. So that might be something we I'm gonna add later on. And as I mentioned, support for other services like Azure, perhaps Google, if if, anyone needs that. Okay. So so if anyone has any suggestions, I recommend posting it in on GitHub. Feel free to. Yeah. Either comment or make an issue or just even comment in our Discord channel as well. Thank you. Alright. Yeah. I'm really excited to see when the marketplace launches. Hopefully, we're able to have this extension in the marketplace because I know lots of people would also find it equally useful and important in their projects as well. Alright. So the very final question I have is, I know you've also built the Jira panel. The last hackathon you worked on Jira panels. So what's will I say advice or you know? Yeah, advice would you have or give to anyone that is building extensions? Because you have bit of more experience in that aspect. So anyone building a custom director's extension. I think my advice would be to not be afraid of looking into the, direct to source code, because there's a lot of it's built very modular. So a lot of the existing interfaces and operations and all of that are available. So you can see how they are built. So even if the even if you can't find anything in if you're looking for something in the documentation and not finding anything that you might find how, under interface or operation, have sold.",[194,195],"ab35f5f0-c867-45fc-ba93-1db703826e8d","491e73a6-c4e8-497a-a18f-597c3aadbda6",[],{"reps":198},[199,255],{"name":200,"sdr":8,"link":201,"countries":202,"states":204},"John Daniels","https://meet.directus.io/meetings/john2144/john-contact-form-meeting",[203],"United States",[205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254],"Michigan","Indiana","Ohio","West Virginia","Kentucky","Virginia","Tennessee","North Carolina","South Carolina","Georgia","Florida","Alabama","Mississippi","New York","MI","IN","OH","WV","KY","VA","TN","NC","SC","GA","FL","AL","MS","NY","Connecticut","CT","Delaware","DE","Maine","ME","Maryland","MD","Massachusetts","MA","New Hampshire","NH","New Jersey","NJ","Pennsylvania","PA","Rhode Island","RI","Vermont","VT","Washington DC","DC",{"name":256,"link":257,"countries":258},"Michelle Riber","https://meetings.hubspot.com/mriber",[259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,236,447,448],"Albania","ALB","Algeria","DZA","Andorra","AND","Angola","AGO","Austria","AUT","Belgium","BEL","Benin","BEN","Bosnia and Herzegovina","BIH","Botswana","BWA","Bulgaria","BGR","Burkina Faso","BFA","Burundi","BDI","Cameroon","CMR","Cape Verde","CPV","Central African Republic","CAF","Chad","TCD","Comoros","COM","Côte d'Ivoire","CIV","Croatia","HRV","Czech Republic","CZE","Democratic Republic of Congo","COD","Denmark","DNK","Djibouti","DJI","Egypt","EGY","Equatorial Guinea","GNQ","Eritrea","ERI","Estonia","EST","Eswatini","SWZ","Ethiopia","ETH","Finland","FIN","France","FRA","Gabon","GAB","Gambia","GMB","Ghana","GHA","Greece","GRC","Guinea","GIN","Guinea-Bissau","GNB","Hungary","HUN","Iceland","ISL","Ireland","IRL","Italy","ITA","Kenya","KEN","Latvia","LVA","Lesotho","LSO","Liberia","LBR","Libya","LBY","Liechtenstein","LIE","Lithuania","LTU","Luxembourg","LUX","Madagascar","MDG","Malawi","MWI","Mali","MLI","Malta","MLT","Mauritania","MRT","Mauritius","MUS","Moldova","MDA","Monaco","MCO","Montenegro","MNE","Morocco","MAR","Mozambique","MOZ","Namibia","NAM","Niger","NER","Nigeria","NGA","North Macedonia","MKD","Norway","NOR","Poland","POL","Portugal","PRT","Republic of Congo","COG","Romania","ROU","Rwanda","RWA","San Marino","SMR","São Tomé and Príncipe","STP","Senegal","SEN","Serbia","SRB","Seychelles","SYC","Sierra Leone","SLE","Slovakia","SVK","Slovenia","SVN","Somalia","SOM","South Africa","ZAF","South Sudan","SSD","Spain","ESP","Sudan","SDN","Sweden","SWE","Tanzania","TZA","Togo","TGO","Tunisia","TUN","Uganda","UGA","United Kingdom","GBR","Vatican City","VAT","Zambia","ZMB","Zimbabwe","ZWE","UK","Germany","Netherlands","Switzerland","CH","NL",1773850420577]