[{"data":1,"prerenderedAt":435},["ShallowReactive",2],{"footer-primary":3,"footer-secondary":93,"footer-description":119,"quick-connect-clarifai":121,"quick-connect-clarifai-next":169,"sales-reps":183},{"items":4},[5,29,49,69],{"id":6,"title":7,"url":8,"page":8,"children":9},"522e608a-77b0-4333-820d-d4f44be2ade1","Solutions",null,[10,15,20,25],{"id":11,"title":12,"url":8,"page":13},"fcafe85a-a798-4710-9e7a-776fe413aae5","Headless CMS",{"permalink":14},"/solutions/headless-cms",{"id":16,"title":17,"url":8,"page":18},"79972923-93cf-4777-9e32-5c9b0315fc10","Backend-as-a-Service",{"permalink":19},"/solutions/backend-as-a-service",{"id":21,"title":22,"url":8,"page":23},"0fa8d0c1-7b64-4f6f-939d-d7fdb99fc407","Product Information",{"permalink":24},"/solutions/product-information-management",{"id":26,"title":27,"url":28,"page":8},"63946d54-6052-4780-8ff4-91f5a9931dcc","100+ Things to Build","https://directus.io/blog/100-tools-apps-and-platforms-you-can-build-with-directus",{"id":30,"title":31,"url":8,"page":8,"children":32},"8ab4f9b1-f3e2-44d6-919b-011d91fe072f","Resources",[33,37,41,45],{"id":34,"title":35,"url":36,"page":8},"f951fb84-8777-4b84-9e91-996fe9d25483","Documentation","https://docs.directus.io",{"id":38,"title":39,"url":40,"page":8},"366febc7-a538-4c08-a326-e6204957f1e3","Guides","https://docs.directus.io/guides/",{"id":42,"title":43,"url":44,"page":8},"aeb9128e-1c5f-417f-863c-2449416433cd","Community","https://directus.chat",{"id":46,"title":47,"url":48,"page":8},"da1c2ed8-0a77-49b0-a903-49c56cb07de5","Release Notes","https://github.com/directus/directus/releases",{"id":50,"title":51,"url":8,"page":8,"children":52},"d61fae8c-7502-494a-822f-19ecff3d0256","Support",[53,57,61,65],{"id":54,"title":55,"url":56,"page":8},"8c43c781-7ebd-475f-a931-747e293c0a88","Issue Tracker","https://github.com/directus/directus/issues",{"id":58,"title":59,"url":60,"page":8},"d77bb78e-cf7b-4e01-932a-514414ba49d3","Feature Requests","https://github.com/directus/directus/discussions?discussions_q=is:open+sort:top",{"id":62,"title":63,"url":64,"page":8},"4346be2b-2c53-476e-b53b-becacec626a6","Community Chat","https://discord.com/channels/725371605378924594/741317677397704757",{"id":66,"title":67,"url":68,"page":8},"26c115d2-49f7-4edc-935e-d37d427fb89d","Cloud Dashboard","https://directus.cloud",{"id":70,"title":71,"url":8,"page":8,"children":72},"49141403-4f20-44ac-8453-25ace1265812","Organization",[73,78,84,88],{"id":74,"title":75,"url":76,"page":77},"1f36ea92-8a5e-47c8-914c-9822a8b9538a","About","/about",{"permalink":76},{"id":79,"title":80,"url":81,"page":82},"b84bf525-5471-4b14-a93c-225f6c386005","Careers","#",{"permalink":83},"/careers",{"id":85,"title":86,"url":87,"page":8},"86aabc3a-433d-434b-9efa-ad1d34be0a34","Brand Assets","https://drive.google.com/drive/folders/1lBOTba4RaA5ikqOn8Ewo4RYzD0XcymG9?usp=sharing",{"id":89,"title":90,"url":8,"page":91},"8d2fa1e3-198e-4405-81e1-2ceb858bc237","Contact",{"permalink":92},"/contact",{"items":94},[95,101,107,113],{"id":96,"title":97,"url":8,"page":98,"children":100},"8a1b7bfa-429d-4ffc-a650-2a5fdcf356da","Cloud Policies",{"permalink":99},"/cloud-policies",[],{"id":102,"title":103,"url":81,"page":104,"children":106},"bea848ef-828f-4306-8017-6b00ec5d4a0c","License",{"permalink":105},"/bsl",[],{"id":108,"title":109,"url":81,"page":110,"children":112},"4e914f47-4bee-42b7-b445-3119ee4196ef","Terms",{"permalink":111},"/terms",[],{"id":114,"title":115,"url":81,"page":116,"children":118},"ea69eda6-d317-4981-8421-fcabb1826bfd","Privacy",{"permalink":117},"/privacy",[],{"description":120},"\u003Cp>A composable backend to build your Headless CMS, BaaS, and more.&nbsp;\u003C/p>",{"id":122,"slug":123,"vimeo_id":124,"description":125,"tile":126,"length":127,"resources":128,"people":132,"episode_number":136,"published":137,"title":138,"video_transcript_html":139,"video_transcript_text":140,"content":8,"status":141,"episode_people":142,"recommendations":153,"season":154,"seo":8},"8f933ee9-4e4f-4e35-8c1f-e99ad0684bfa","clarifai","898557806","Automatically tag new image files with Clarifai's Image Recognition Model.","60c0e3e7-7141-4121-afd9-fa25e460d161",10,[129],{"name":130,"url":131},"Clarifai Image API Reference","https://docs.clarifai.com/api-guide/predict/images/",[133],{"name":134,"url":135},"Kevin Lewis","https://directus.io/team/kevin-lewis",5,"2024-01-26","Tag Images Automatically with Clarifai","\u003Cp>Speaker 0: Clarify allow you to train and use machine learning models via APIs. Today in QuickConnect, we're going to integrate clarify with directors using directors flows. So whenever we upload a new image to our directors project, we will go off to clarify, understand what's in the image, and update the tags in an image accordingly. Here's an image uploaded in my director's project, and we see here that there are tags. So what we'll be doing is populating this with what we believe is in the image based on a Clarify output.\u003C/p>\u003Cp>So with that, let's get started. When you sign up for a Clarify account, they'll prompt you to create a project, which is totally fine. Projects contain individual permissions and API keys, which you can manage within the scope of the project. Now for today's, today's example, we're actually just going to use the account level personal access token. So even if you don't have a project, head over to your user settings to the security section and copy this personal access token here containing all scopes.\u003C/p>\u003Cp>Just a reminder, other people shouldn't see this key. And if people do, you can go ahead and delete it and create a brand new one. So that's what we need to do here. Let's head over to our director's project and set this up. In your directors project, create a brand new flow.\u003C/p>\u003Cp>I'm going to call this one tag images with clarify. It's going to be a non blocking event hook with a scope of files dot upload, which means this flow will automatically be triggered whenever a new file is uploaded. Let's actually test that out. Let's upload this nice image of a curry that I had recently, and we'll head back to our flow here and refresh. And we should see over in logs that it has been run one time.\u003C/p>\u003Cp>Now inside of the payload, we see that we have this object. The object has a key. So this is a unique identifier for this file within the director's files collection. And there is this payload object with a bunch of properties, including a type. Now this is important because this flow will actually run when we upload any file regardless of file type.\u003C/p>\u003Cp>So the first thing we're gonna wanna do is just filter this down to make sure this flow only continues if the file is an image. So let's create a new condition. I'll call this one is image, and I'll use the following condition rule, which just means that the type has to contain the word image. That means it doesn't matter if it's a JPEG, a PNG, a webp file, a GIF, whatever, as as long as it contains the word image inside of the file type, which it will for any of those types, we can continue. Now we're gonna go ahead and actually call the clarify API.\u003C/p>\u003Cp>So let's create a new operation, and we'll call this one clarify. We'll be making a web request. It will be a post request to this specific URL provided by the Clarify documentation. This is a built in model that they provide called general image recognition, and we're gonna be using this specific trained version of the model. Once again, I've grabbed that straight from their documentation.\u003C/p>\u003Cp>Now we need to authenticate, but it is us. So authorize station authorization. Yeah. Key and then our personal access token. Finally, we need to actually provide the URL of the image we want it to interpret.\u003C/p>\u003Cp>It's a little bit of a convoluted object here. So I've copied and pasted our starter. But what's important is that every image has a direct URL that we can provide to it from the director's project. That is the full URL of your project slash assets and then slash the actual ID or key of that file. And we've already seen that during the trigger.\u003C/p>\u003Cp>It's called trigger dot key. Now this will work if the file permissions are public, but if they're not, we need to provide a way for Clarify to actually access this image. And we can do that by adding a query parameter called access token and then an access token of a user that has access to that image. So let's hit save, and let's try running that again. So let's just delete this image, and we will reupload it.\u003C/p>\u003Cp>We'll head back to our flow. We'll refresh it, and we will see the output of the second invocation of this flow. So if we head to the payload here, we can see a a ton of information about the model itself. What else have we got here? The input image that we provided here, and then all of these concepts, food, meal, dinner, no person, vegetable grow, and so on.\u003C/p>\u003Cp>Now every one of these also has a value between 0 and 1, which is the confidence that it is correct. Now what we wanna do here is we want to end up to create our tags, filtering out any that have 2 lower value and then creating just an array of strings. So the next step here is to create a an operation called transcript. Here we go. So I'll call this one concepts.\u003C/p>\u003Cp>Now that was a big object. I've obviously done a little bit of prep work, so we don't have to go, you know, traversing through this object. I know that the data we want is inside of data dot clarify because that is the name of this step that's hidden by this pane here. Clarify dot outputs, that is an array. We just want the first one.\u003C/p>\u003Cp>Dot data dot concepts. And each one of those is one of those objects that contains the name of the of the concept and the score and so on. So we'll return this, but we wanna do a little bit more work to it. Firstly, we wanna go ahead and filter. We only want concepts where the score or I think they call it the value, so where the concept dot value is greater than 0.95.\u003C/p>\u003Cp>And depending on your use case, you can decide what, what's the correct value is for you. Then we want to go ahead and map it. So we only get come on, Kevin, you can type. So we only get the name. So let's save this.\u003C/p>\u003Cp>Let's run this again, and we'll see the output of this concept script. And what we should get is just an array of strings of the concepts, which have a score of greater than 0.95. Let's just double check that that is correct here. So let's reupload the picture of our food. Let's refresh this and we should see another invocation here yes Oh, that was a little error there.\u003C/p>\u003Cp>So let's head back and figure out what's gone wrong here. Clarify dot outputs. Ah, I don't think this is right. I think it was actually nested inside of the data, the data value. Let's try that again.\u003C/p>\u003Cp>Let's delete this, re upload it refresh There we go. So there's our array of strings, which is fantastic. And this just leaves us with one final step, which is to actually update the tags of that of that newly uploaded images with those values. So let's go ahead and update image. Let's update data.\u003C/p>\u003Cp>This is gonna be inside of the Directus files collection. We'll give it full access. We only want to upload the IDs of the images which match the key of the starting image. It'll only update 1. And finally, the payload here is going to be tags, concepts like so.\u003C/p>\u003Cp>And we can do that because context was an array that was returned. So let's hit save. Let's save this off, and let's give this one final go. We'll delete this image. We will re upload it.\u003C/p>\u003Cp>And in theory, if I open it now, we should see the image is tagged, which you can then use in API requests in order to build applications on top of this data. So I hope you found this interesting. Clarify have a ton of trained models, so we can actually do a lot more with it. But in this small example, you can see how to get started, explore the Clarify documentation, and do even more. Until the next episode, see you later.\u003C/p>","Clarify allow you to train and use machine learning models via APIs. Today in QuickConnect, we're going to integrate clarify with directors using directors flows. So whenever we upload a new image to our directors project, we will go off to clarify, understand what's in the image, and update the tags in an image accordingly. Here's an image uploaded in my director's project, and we see here that there are tags. So what we'll be doing is populating this with what we believe is in the image based on a Clarify output. So with that, let's get started. When you sign up for a Clarify account, they'll prompt you to create a project, which is totally fine. Projects contain individual permissions and API keys, which you can manage within the scope of the project. Now for today's, today's example, we're actually just going to use the account level personal access token. So even if you don't have a project, head over to your user settings to the security section and copy this personal access token here containing all scopes. Just a reminder, other people shouldn't see this key. And if people do, you can go ahead and delete it and create a brand new one. So that's what we need to do here. Let's head over to our director's project and set this up. In your directors project, create a brand new flow. I'm going to call this one tag images with clarify. It's going to be a non blocking event hook with a scope of files dot upload, which means this flow will automatically be triggered whenever a new file is uploaded. Let's actually test that out. Let's upload this nice image of a curry that I had recently, and we'll head back to our flow here and refresh. And we should see over in logs that it has been run one time. Now inside of the payload, we see that we have this object. The object has a key. So this is a unique identifier for this file within the director's files collection. And there is this payload object with a bunch of properties, including a type. Now this is important because this flow will actually run when we upload any file regardless of file type. So the first thing we're gonna wanna do is just filter this down to make sure this flow only continues if the file is an image. So let's create a new condition. I'll call this one is image, and I'll use the following condition rule, which just means that the type has to contain the word image. That means it doesn't matter if it's a JPEG, a PNG, a webp file, a GIF, whatever, as as long as it contains the word image inside of the file type, which it will for any of those types, we can continue. Now we're gonna go ahead and actually call the clarify API. So let's create a new operation, and we'll call this one clarify. We'll be making a web request. It will be a post request to this specific URL provided by the Clarify documentation. This is a built in model that they provide called general image recognition, and we're gonna be using this specific trained version of the model. Once again, I've grabbed that straight from their documentation. Now we need to authenticate, but it is us. So authorize station authorization. Yeah. Key and then our personal access token. Finally, we need to actually provide the URL of the image we want it to interpret. It's a little bit of a convoluted object here. So I've copied and pasted our starter. But what's important is that every image has a direct URL that we can provide to it from the director's project. That is the full URL of your project slash assets and then slash the actual ID or key of that file. And we've already seen that during the trigger. It's called trigger dot key. Now this will work if the file permissions are public, but if they're not, we need to provide a way for Clarify to actually access this image. And we can do that by adding a query parameter called access token and then an access token of a user that has access to that image. So let's hit save, and let's try running that again. So let's just delete this image, and we will reupload it. We'll head back to our flow. We'll refresh it, and we will see the output of the second invocation of this flow. So if we head to the payload here, we can see a a ton of information about the model itself. What else have we got here? The input image that we provided here, and then all of these concepts, food, meal, dinner, no person, vegetable grow, and so on. Now every one of these also has a value between 0 and 1, which is the confidence that it is correct. Now what we wanna do here is we want to end up to create our tags, filtering out any that have 2 lower value and then creating just an array of strings. So the next step here is to create a an operation called transcript. Here we go. So I'll call this one concepts. Now that was a big object. I've obviously done a little bit of prep work, so we don't have to go, you know, traversing through this object. I know that the data we want is inside of data dot clarify because that is the name of this step that's hidden by this pane here. Clarify dot outputs, that is an array. We just want the first one. Dot data dot concepts. And each one of those is one of those objects that contains the name of the of the concept and the score and so on. So we'll return this, but we wanna do a little bit more work to it. Firstly, we wanna go ahead and filter. We only want concepts where the score or I think they call it the value, so where the concept dot value is greater than 0.95. And depending on your use case, you can decide what, what's the correct value is for you. Then we want to go ahead and map it. So we only get come on, Kevin, you can type. So we only get the name. So let's save this. Let's run this again, and we'll see the output of this concept script. And what we should get is just an array of strings of the concepts, which have a score of greater than 0.95. Let's just double check that that is correct here. So let's reupload the picture of our food. Let's refresh this and we should see another invocation here yes Oh, that was a little error there. So let's head back and figure out what's gone wrong here. Clarify dot outputs. Ah, I don't think this is right. I think it was actually nested inside of the data, the data value. Let's try that again. Let's delete this, re upload it refresh There we go. So there's our array of strings, which is fantastic. And this just leaves us with one final step, which is to actually update the tags of that of that newly uploaded images with those values. So let's go ahead and update image. Let's update data. This is gonna be inside of the Directus files collection. We'll give it full access. We only want to upload the IDs of the images which match the key of the starting image. It'll only update 1. And finally, the payload here is going to be tags, concepts like so. And we can do that because context was an array that was returned. So let's hit save. Let's save this off, and let's give this one final go. We'll delete this image. We will re upload it. And in theory, if I open it now, we should see the image is tagged, which you can then use in API requests in order to build applications on top of this data. So I hope you found this interesting. Clarify have a ton of trained models, so we can actually do a lot more with it. But in this small example, you can see how to get started, explore the Clarify documentation, and do even more. Until the next episode, see you later.","published",[143],{"people_id":144},{"id":145,"first_name":146,"last_name":147,"avatar":148,"bio":149,"links":150},"82b3f7e5-637b-4890-93b2-378b497d5dc6","Kevin","Lewis","a662f91b-1ee9-4277-8c9d-3ac1878e44ad","Director of Developer Experience at Directus",[151],{"url":135,"service":152},"website",[],{"id":155,"number":156,"year":157,"episodes":158,"show":166},"3b8b7d34-a0fb-4ea6-85ff-2b5bfbb8e0b6",1,"2023",[159,160,161,162,122,163,164,165],"502dcf7e-c23e-4dfd-b147-65f5abaea5c7","a230c9ef-8db4-4c00-a0cb-9524f7934eb0","5f41dc16-29b7-485f-a6e1-081c3f1acc4f","81417d25-26d2-4f05-be37-7ced51a0594e","71e081db-92f8-4978-b020-7d2460a46187","8e47020d-bd5a-43a7-bca9-54af4f5d465d","bfb8bc25-ef1b-4544-b50d-402008c638a1",{"title":167,"tile":168},"Quick Connect","1171b046-491e-4cfb-a68c-527b89c2c348",{"id":170,"slug":171,"season":172,"vimeo_id":173,"description":174,"tile":175,"length":127,"resources":8,"people":8,"episode_number":156,"published":176,"title":174,"video_transcript_html":177,"video_transcript_text":178,"content":8,"seo":179,"status":141,"episode_people":180,"recommendations":182},"979db4da-a870-4120-94ee-bd80789f411c","firecrawl","cf7a056d-fa10-4bc5-8cc3-c2b9ef59b684","1026203173","Integrating Firecrawl with Directus","d0a87153-8475-433f-aca0-dea9802caf03","2024-10-16","\u003Cp>Speaker 0: Hello there. I'm really excited about this tutorial. So on Directus TV, we already have a show called Quick Connect, which shows you how to integrate third party services with Directus using Directus Automate and Flows. And in the spirit of that show, today, I'm gonna show you how to integrate FireCrawl with Directus. Now here they say that they turn websites into LLM ready data.\u003C/p>\u003Cp>And what that means in practice is you can feed it a URL, provide some options if you want, and it will go and take a look at that web page and return some structured data for you like so. This is their scrape endpoint, which will take a single web page and scrape some data from it. They also have a couple of other endpoints, crawl and map, but today, we're gonna use scrape. Now I've already logged into FireCrawl Cloud and generated an API key, which I'll copy for later. You can also self host FireCrawl, but for ease, I'm just gonna use their cloud product here.\u003C/p>\u003Cp>Now I have this directors project over here with a new empty collection called companies. In this collection, there are a few fields. URL, a name, a description, mission, and a boolean, a true force value, is it open source. And our goal will be to provide the URL and then have FireCrawl automatically populate the rest with flows. So let's go ahead and create a new flow.\u003C/p>\u003Cp>So this is our automation builder if you've not seen it before. I'll call this one get company data, I guess. And we are going to use a manual trigger, which will add a button to the side of collection and item pages. So we're going to say we'll run this on the company's collection. What else matters here?\u003C/p>\u003Cp>We're going to, not require selection, so the button always works. And we're going to require confirmation, which will pop up a modal. And in that modal, we will just add a URL. We'll make it a string input, and we'll make it full widths. And I think that's all we need to do here.\u003C/p>\u003Cp>So just to see what happens here, if I go back to the, to the company's collection, we now see this button here, this manual flow, trigger. I click that. It pulls up the box, and we'll put in a URL and hit run flow. So now if we go back to our flow, we should immediately see that there is one log. And in here, there is a body, and the URL is the value that we typed.\u003C/p>\u003Cp>Fantastic. Now we need to actually do something with it. So, let's go ahead and add a new operation here. And, honestly, fire crawl is pretty sick. You can just make one web request.\u003C/p>\u003Cp>Let's take a look at their docs. We're gonna use the LLM extract, endpoint here, and let's just take a look at the kind of construction of this API call. It's a post request to this URL. We're gonna pass in our, our API key here as an authorization header, and then they give us this kind of JSON payload here. Here, it's telling it to go ahead and extract specifically these four fields, the company mission, does it support SSO, is it open source, and is it in Y Combinator?\u003C/p>\u003Cp>And it's saying you must go get all four of these. So let's actually just turn this straight into a flow request, request URL, operation. So we're gonna do a post request to this URL, post request to this URL. I'm gonna go and copy my API key again here, and at the end of this I'll, I'll destroy the key. Authorization authorization, bearer API key, save, And then there's the request body.\u003C/p>\u003Cp>And, honestly, it contains a little more than we need, but this contains everything we need. So we'll just pop that in there. The only thing we wanna do, of course, is pass in the URL that we put in the box. So we'll replace this with trigger.body. URL.\u003C/p>\u003Cp>Fantastic. Let's save that and see what happens if we go over to content, press the button, and type in directors.io. We see that's running. That's running. That's a good sign.\u003C/p>\u003Cp>It means it's going off and making the request, waiting for the request. And then we see there is a second log. And we get some data back. There was a 200, so it was successful. Inside of data, there is a property called data, and then there is this value called extract.\u003C/p>\u003Cp>Extract contains all of those custom keys we asked for, company mission, supports SSO, is open source, and is in YC. And then always when you scrape, you get this metadata object, title, description, language, Open Graph data, source URL, and so on. So, really, all we wanna do here now is we wanna take this data and create a new company from it. So let's add a new, let's create a new operation on the resolve path of that web request. Let's call it create data.\u003C/p>\u003Cp>We're gonna create something in the company's collection. We'll give it full access, and then we just need to provide a payload. So let's go ahead and do that. We have an object here. We have a name.\u003C/p>\u003Cp>Oh, we have a name, and we're gonna pass in the value of the last operation dot data.data.metadata dot title. Then I, for one, am I'm just gonna copy this and edit it each time. So we have name, URL. So that's last .data.data.metadata. Source URL.\u003C/p>\u003Cp>We could, of course, just take it from the trigger body URL, but this is properly formatted. You'll notice I typed in directus dot I o, but when it came back in the payload, it came back with the with the, with the protocol HTTPS and so on. So we have name. We have URL. We have a description.\u003C/p>\u003Cp>Now this one is also from the metadata description. We have the mission. Now this was a custom piece of data we asked to be extracted. Company mission is what we called it. And finally, we have open, open underscore source.\u003C/p>\u003Cp>Last data, data extract and then is underscore open underscore source and then remove that trailing comma. So I believe that's the name of all of the fields. We'll figure it out in a moment when when it inevitably doesn't work. We'll hit the button again, directors.io, and hit run flow. Once again, that's going off to fire crawl using their endpoint and there we see there we see it straight here.\u003C/p>\u003Cp>URL name, description, mission, and the boolean is open source. Let's, let's try that once more. Let's go in here and say firefirecrawl.devrun flow. So let's see. And in theory, we should just give that a moment, and there it is.\u003C/p>\u003Cp>So now you can go ahead and grab more data. Now, of course, if we take a look at this endpoint here, you can provide custom properties, and it will try its best to get data out from that. They have a couple of other interesting things which I'll draw your attention to even if I don't think it works in this context. They have extracting without a schema. So this extract here was us creating a a schema.\u003C/p>\u003Cp>Right? You can give it just a text. You can give it a prompt. Extract the company mission from the page. But the thing I don't like about that is you're not explicitly saying what the name of the key is, so you don't necessarily know what it's gonna be at the end.\u003C/p>\u003Cp>I like creating a schema personally. They do something else that's kinda interesting. If I take a look at where is I think it's in their API API reference here inside of scrape. They have this interesting thing called actions. So you can get it to wait, to take a screenshot, to click, write text, press a key, and scroll.\u003C/p>\u003Cp>And the combination of clicking and writing text means you can get it to interact with your web page. You see it here, actions, wait two milliseconds. You could get it to, like, sign into things perhaps, perform search terms. I think it's super interesting. And then take screenshots, of course, and upload those to directors if you fancy.\u003C/p>\u003Cp>So there's a lot of flexibility in this. Having seen kinda how easy this API is, I think I'll go ahead and turn this into, an extension some point in the next few weeks, which we can release as part of Directus AI. But, yeah, that's how to integrate FireCrawl with Directus using Directus Automate. Hope you found this interesting, and by all means, if you have questions, just reach out.\u003C/p>","Hello there. I'm really excited about this tutorial. So on Directus TV, we already have a show called Quick Connect, which shows you how to integrate third party services with Directus using Directus Automate and Flows. And in the spirit of that show, today, I'm gonna show you how to integrate FireCrawl with Directus. Now here they say that they turn websites into LLM ready data. And what that means in practice is you can feed it a URL, provide some options if you want, and it will go and take a look at that web page and return some structured data for you like so. This is their scrape endpoint, which will take a single web page and scrape some data from it. They also have a couple of other endpoints, crawl and map, but today, we're gonna use scrape. Now I've already logged into FireCrawl Cloud and generated an API key, which I'll copy for later. You can also self host FireCrawl, but for ease, I'm just gonna use their cloud product here. Now I have this directors project over here with a new empty collection called companies. In this collection, there are a few fields. URL, a name, a description, mission, and a boolean, a true force value, is it open source. And our goal will be to provide the URL and then have FireCrawl automatically populate the rest with flows. So let's go ahead and create a new flow. So this is our automation builder if you've not seen it before. I'll call this one get company data, I guess. And we are going to use a manual trigger, which will add a button to the side of collection and item pages. So we're going to say we'll run this on the company's collection. What else matters here? We're going to, not require selection, so the button always works. And we're going to require confirmation, which will pop up a modal. And in that modal, we will just add a URL. We'll make it a string input, and we'll make it full widths. And I think that's all we need to do here. So just to see what happens here, if I go back to the, to the company's collection, we now see this button here, this manual flow, trigger. I click that. It pulls up the box, and we'll put in a URL and hit run flow. So now if we go back to our flow, we should immediately see that there is one log. And in here, there is a body, and the URL is the value that we typed. Fantastic. Now we need to actually do something with it. So, let's go ahead and add a new operation here. And, honestly, fire crawl is pretty sick. You can just make one web request. Let's take a look at their docs. We're gonna use the LLM extract, endpoint here, and let's just take a look at the kind of construction of this API call. It's a post request to this URL. We're gonna pass in our, our API key here as an authorization header, and then they give us this kind of JSON payload here. Here, it's telling it to go ahead and extract specifically these four fields, the company mission, does it support SSO, is it open source, and is it in Y Combinator? And it's saying you must go get all four of these. So let's actually just turn this straight into a flow request, request URL, operation. So we're gonna do a post request to this URL, post request to this URL. I'm gonna go and copy my API key again here, and at the end of this I'll, I'll destroy the key. Authorization authorization, bearer API key, save, And then there's the request body. And, honestly, it contains a little more than we need, but this contains everything we need. So we'll just pop that in there. The only thing we wanna do, of course, is pass in the URL that we put in the box. So we'll replace this with trigger.body. URL. Fantastic. Let's save that and see what happens if we go over to content, press the button, and type in directors.io. We see that's running. That's running. That's a good sign. It means it's going off and making the request, waiting for the request. And then we see there is a second log. And we get some data back. There was a 200, so it was successful. Inside of data, there is a property called data, and then there is this value called extract. Extract contains all of those custom keys we asked for, company mission, supports SSO, is open source, and is in YC. And then always when you scrape, you get this metadata object, title, description, language, Open Graph data, source URL, and so on. So, really, all we wanna do here now is we wanna take this data and create a new company from it. So let's add a new, let's create a new operation on the resolve path of that web request. Let's call it create data. We're gonna create something in the company's collection. We'll give it full access, and then we just need to provide a payload. So let's go ahead and do that. We have an object here. We have a name. Oh, we have a name, and we're gonna pass in the value of the last operation dot data.data.metadata dot title. Then I, for one, am I'm just gonna copy this and edit it each time. So we have name, URL. So that's last .data.data.metadata. Source URL. We could, of course, just take it from the trigger body URL, but this is properly formatted. You'll notice I typed in directus dot I o, but when it came back in the payload, it came back with the with the, with the protocol HTTPS and so on. So we have name. We have URL. We have a description. Now this one is also from the metadata description. We have the mission. Now this was a custom piece of data we asked to be extracted. Company mission is what we called it. And finally, we have open, open underscore source. Last data, data extract and then is underscore open underscore source and then remove that trailing comma. So I believe that's the name of all of the fields. We'll figure it out in a moment when when it inevitably doesn't work. We'll hit the button again, directors.io, and hit run flow. Once again, that's going off to fire crawl using their endpoint and there we see there we see it straight here. URL name, description, mission, and the boolean is open source. Let's, let's try that once more. Let's go in here and say firefirecrawl.devrun flow. So let's see. And in theory, we should just give that a moment, and there it is. So now you can go ahead and grab more data. Now, of course, if we take a look at this endpoint here, you can provide custom properties, and it will try its best to get data out from that. They have a couple of other interesting things which I'll draw your attention to even if I don't think it works in this context. They have extracting without a schema. So this extract here was us creating a a schema. Right? You can give it just a text. You can give it a prompt. Extract the company mission from the page. But the thing I don't like about that is you're not explicitly saying what the name of the key is, so you don't necessarily know what it's gonna be at the end. I like creating a schema personally. They do something else that's kinda interesting. If I take a look at where is I think it's in their API API reference here inside of scrape. They have this interesting thing called actions. So you can get it to wait, to take a screenshot, to click, write text, press a key, and scroll. And the combination of clicking and writing text means you can get it to interact with your web page. You see it here, actions, wait two milliseconds. You could get it to, like, sign into things perhaps, perform search terms. I think it's super interesting. And then take screenshots, of course, and upload those to directors if you fancy. So there's a lot of flexibility in this. Having seen kinda how easy this API is, I think I'll go ahead and turn this into, an extension some point in the next few weeks, which we can release as part of Directus AI. But, yeah, that's how to integrate FireCrawl with Directus using Directus Automate. Hope you found this interesting, and by all means, if you have questions, just reach out.","9c575c49-4fb1-4eed-9731-e1e1acb55da3",[181],"4830ae03-22cd-4399-82f2-5767bf5cf0cc",[],{"reps":184},[185,241],{"name":186,"sdr":8,"link":187,"countries":188,"states":190},"John Daniels","https://meet.directus.io/meetings/john2144/john-contact-form-meeting",[189],"United States",[191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240],"Michigan","Indiana","Ohio","West Virginia","Kentucky","Virginia","Tennessee","North Carolina","South Carolina","Georgia","Florida","Alabama","Mississippi","New York","MI","IN","OH","WV","KY","VA","TN","NC","SC","GA","FL","AL","MS","NY","Connecticut","CT","Delaware","DE","Maine","ME","Maryland","MD","Massachusetts","MA","New Hampshire","NH","New Jersey","NJ","Pennsylvania","PA","Rhode Island","RI","Vermont","VT","Washington DC","DC",{"name":242,"link":243,"countries":244},"Michelle Riber","https://meetings.hubspot.com/mriber",[245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,222,433,434],"Albania","ALB","Algeria","DZA","Andorra","AND","Angola","AGO","Austria","AUT","Belgium","BEL","Benin","BEN","Bosnia and Herzegovina","BIH","Botswana","BWA","Bulgaria","BGR","Burkina Faso","BFA","Burundi","BDI","Cameroon","CMR","Cape Verde","CPV","Central African Republic","CAF","Chad","TCD","Comoros","COM","Côte d'Ivoire","CIV","Croatia","HRV","Czech Republic","CZE","Democratic Republic of Congo","COD","Denmark","DNK","Djibouti","DJI","Egypt","EGY","Equatorial Guinea","GNQ","Eritrea","ERI","Estonia","EST","Eswatini","SWZ","Ethiopia","ETH","Finland","FIN","France","FRA","Gabon","GAB","Gambia","GMB","Ghana","GHA","Greece","GRC","Guinea","GIN","Guinea-Bissau","GNB","Hungary","HUN","Iceland","ISL","Ireland","IRL","Italy","ITA","Kenya","KEN","Latvia","LVA","Lesotho","LSO","Liberia","LBR","Libya","LBY","Liechtenstein","LIE","Lithuania","LTU","Luxembourg","LUX","Madagascar","MDG","Malawi","MWI","Mali","MLI","Malta","MLT","Mauritania","MRT","Mauritius","MUS","Moldova","MDA","Monaco","MCO","Montenegro","MNE","Morocco","MAR","Mozambique","MOZ","Namibia","NAM","Niger","NER","Nigeria","NGA","North Macedonia","MKD","Norway","NOR","Poland","POL","Portugal","PRT","Republic of Congo","COG","Romania","ROU","Rwanda","RWA","San Marino","SMR","São Tomé and Príncipe","STP","Senegal","SEN","Serbia","SRB","Seychelles","SYC","Sierra Leone","SLE","Slovakia","SVK","Slovenia","SVN","Somalia","SOM","South Africa","ZAF","South Sudan","SSD","Spain","ESP","Sudan","SDN","Sweden","SWE","Tanzania","TZA","Togo","TGO","Tunisia","TUN","Uganda","UGA","United Kingdom","GBR","Vatican City","VAT","Zambia","ZMB","Zimbabwe","ZWE","UK","Germany","Netherlands","Switzerland","CH","NL",1773850443279]