[{"data":1,"prerenderedAt":473},["ShallowReactive",2],{"footer-primary":3,"footer-secondary":93,"footer-description":119,"request-review-config-as-code":121,"request-review-config-as-code-next":198,"sales-reps":221},{"items":4},[5,29,49,69],{"id":6,"title":7,"url":8,"page":8,"children":9},"522e608a-77b0-4333-820d-d4f44be2ade1","Solutions",null,[10,15,20,25],{"id":11,"title":12,"url":8,"page":13},"fcafe85a-a798-4710-9e7a-776fe413aae5","Headless CMS",{"permalink":14},"/solutions/headless-cms",{"id":16,"title":17,"url":8,"page":18},"79972923-93cf-4777-9e32-5c9b0315fc10","Backend-as-a-Service",{"permalink":19},"/solutions/backend-as-a-service",{"id":21,"title":22,"url":8,"page":23},"0fa8d0c1-7b64-4f6f-939d-d7fdb99fc407","Product Information",{"permalink":24},"/solutions/product-information-management",{"id":26,"title":27,"url":28,"page":8},"63946d54-6052-4780-8ff4-91f5a9931dcc","100+ Things to Build","https://directus.io/blog/100-tools-apps-and-platforms-you-can-build-with-directus",{"id":30,"title":31,"url":8,"page":8,"children":32},"8ab4f9b1-f3e2-44d6-919b-011d91fe072f","Resources",[33,37,41,45],{"id":34,"title":35,"url":36,"page":8},"f951fb84-8777-4b84-9e91-996fe9d25483","Documentation","https://docs.directus.io",{"id":38,"title":39,"url":40,"page":8},"366febc7-a538-4c08-a326-e6204957f1e3","Guides","https://docs.directus.io/guides/",{"id":42,"title":43,"url":44,"page":8},"aeb9128e-1c5f-417f-863c-2449416433cd","Community","https://directus.chat",{"id":46,"title":47,"url":48,"page":8},"da1c2ed8-0a77-49b0-a903-49c56cb07de5","Release Notes","https://github.com/directus/directus/releases",{"id":50,"title":51,"url":8,"page":8,"children":52},"d61fae8c-7502-494a-822f-19ecff3d0256","Support",[53,57,61,65],{"id":54,"title":55,"url":56,"page":8},"8c43c781-7ebd-475f-a931-747e293c0a88","Issue Tracker","https://github.com/directus/directus/issues",{"id":58,"title":59,"url":60,"page":8},"d77bb78e-cf7b-4e01-932a-514414ba49d3","Feature Requests","https://github.com/directus/directus/discussions?discussions_q=is:open+sort:top",{"id":62,"title":63,"url":64,"page":8},"4346be2b-2c53-476e-b53b-becacec626a6","Community Chat","https://discord.com/channels/725371605378924594/741317677397704757",{"id":66,"title":67,"url":68,"page":8},"26c115d2-49f7-4edc-935e-d37d427fb89d","Cloud Dashboard","https://directus.cloud",{"id":70,"title":71,"url":8,"page":8,"children":72},"49141403-4f20-44ac-8453-25ace1265812","Organization",[73,78,84,88],{"id":74,"title":75,"url":76,"page":77},"1f36ea92-8a5e-47c8-914c-9822a8b9538a","About","/about",{"permalink":76},{"id":79,"title":80,"url":81,"page":82},"b84bf525-5471-4b14-a93c-225f6c386005","Careers","#",{"permalink":83},"/careers",{"id":85,"title":86,"url":87,"page":8},"86aabc3a-433d-434b-9efa-ad1d34be0a34","Brand Assets","https://drive.google.com/drive/folders/1lBOTba4RaA5ikqOn8Ewo4RYzD0XcymG9?usp=sharing",{"id":89,"title":90,"url":8,"page":91},"8d2fa1e3-198e-4405-81e1-2ceb858bc237","Contact",{"permalink":92},"/contact",{"items":94},[95,101,107,113],{"id":96,"title":97,"url":8,"page":98,"children":100},"8a1b7bfa-429d-4ffc-a650-2a5fdcf356da","Cloud Policies",{"permalink":99},"/cloud-policies",[],{"id":102,"title":103,"url":81,"page":104,"children":106},"bea848ef-828f-4306-8017-6b00ec5d4a0c","License",{"permalink":105},"/bsl",[],{"id":108,"title":109,"url":81,"page":110,"children":112},"4e914f47-4bee-42b7-b445-3119ee4196ef","Terms",{"permalink":111},"/terms",[],{"id":114,"title":115,"url":81,"page":116,"children":118},"ea69eda6-d317-4981-8421-fcabb1826bfd","Privacy",{"permalink":117},"/privacy",[],{"description":120},"\u003Cp>A composable backend to build your Headless CMS, BaaS, and more.&nbsp;\u003C/p>",{"id":122,"slug":123,"vimeo_id":124,"description":125,"tile":126,"length":127,"resources":128,"people":132,"episode_number":142,"published":143,"title":144,"video_transcript_html":145,"video_transcript_text":146,"content":8,"status":147,"episode_people":148,"recommendations":177,"season":178,"seo":8},"86fa152b-6a8b-477e-94b5-bd91e1202d21","config-as-code","906788449","In this recording of our live event on January 25 2024, Rijk, Jonathan, and Daniel discuss configuration as code.","bd5024fb-4ef7-455a-8ff6-8631da26b5d2",56,[129],{"name":130,"url":131},"GitHub Discussion","https://github.com/directus/directus/discussions/13041",[133,136,139],{"name":134,"url":135},"Rijk van Zanten","https://directus.io/team/rijk-van-zanten",{"name":137,"url":138},"Jonathan Wagner","https://directus.io/team/jonathan-wagner",{"name":140,"url":141},"Daniel Biegler","https://directus.io/team/daniel-biegler",2,"2024-02-01","Configuration as Code","\u003Cp>Speaker 0: Welcome everybody once more to a wonderful request review session here where we go over feature requests and figure it out. Now what do we do? I'm afraid we ramble on for about an hour about the technical complexities. Remember, the goal here is to basically divergently discuss, you know, what is the feature request, what are we trying to do, what is it trying to achieve, And how do we think we can make it happen in a very sort of direct to see way? What are we talking about this week?\u003C/p>\u003Cp>Speaker 1: Yes. We're talking about configuration.\u003C/p>\u003Cp>Speaker 0: Configuration as code. Let's let's figure out how to take schema endpoints to the max and actually Schema endpoints to the max. Manage the entire project as code. So this is really with a focus on GitOps. Right?\u003C/p>\u003Cp>Where you have a sort of centralized repository of static files that is the single source of truth for all configuration of the running project. Which as you might guess, they get complicated fairly quick. Hello. And as per usual, we'll be eyeing the chat. So if you have any questions in between or any suggestions or any good thoughts, please do please do put it in the chat.\u003C/p>\u003Cp>I already saw his name fly by. Well, most likely I have a very special guest for you today, because our very own Connor has been researching, you know, some of this for a little while now. But before we dive into the research results there, let's discuss a little bit of the requirements that are presented here in the current feature requests. Right? Because the one thing we know now, you know, the current state of affairs, we have that schema snapshot and apply endpoint, that we use and sort of recommend for, you know, moving bits of schema, to and from dev to prod, that sort of thing.\u003C/p>\u003Cp>But as people have pointed out, you know, that is still for schema only. Right? So we know one of the big requirements for this is gonna be you need to figure out additional configuration, additional additional, data points maybe from your own tables, you know, environment migrations, like you mentioned there, which includes, you know, what about roles? What about flows? What about presets?\u003C/p>\u003Cp>What about translation strings, etcetera? This well, one of the complexities for this is figuring out, you know, what is configuration within the context of directives in the first place. Right? Which is a discussion topic that I have had some trouble with just going through myself already, which is what is configuration? You know, are your roles and the way you configured permission configured, you know, permissions configuration, probably.\u003C/p>\u003Cp>But the users within those roles, probably not. But then users with static tokens, maybe. Right? If you have your own tables, maybe you have a single, you know, app settings, singleton collection that you use for configuration is that now configuration that is part of code first configuration. Right?\u003C/p>\u003Cp>Even though it's not a system table and you're not configuring directives, you might still be configuring other things. Although, that's where the fun starts. So maybe we could scroll down a little bit, Jonathan, just take a quick peek through, the other the motivation and the requirements here. So, you know, as we kinda touched on already, the same here from, Erif van Oort. Pretty sure that would be a Dutch user.\u003C/p>\u003Cp>It's it's about things like permission logic, you know, keep the local dev environment in sync, source control is the source of truth. Right? You wanna make sure that you can spin up new Directus instances not completely empty, but start it from, you know, a template that is in your repo. If there's an issue, you can easily, you know, share, the configuration of your platform. Daniel, if you would kindly mute, you're being very annoying.\u003C/p>\u003Cp>From the replying system, this is immediately where where it gets complicated. Right? It's like, what is configuration? Right? What is configuration?\u003C/p>\u003Cp>When it comes to import export, how to define what gets imported, what gets exported? Basically, the same question to me. Right? How does it get imported? You know, are you merging stuff?\u003C/p>\u003Cp>Are you overwriting stuff? What happens if you try to insert something that already exists? You know? How do you deal with conflicts? Very good question.\u003C/p>\u003Cp>So if you wanna scroll down a little bit further to see what else is in here.\u003C/p>\u003Cp>Speaker 1: Yeah. No. I don't know about that point. That's a good question, but a very, very long one to answer properly. But the gist is, you know, if you work with multiple people with different setups and if somebody changes your database schema, for example, how do you synchronize the state between your instance and another instance?\u003C/p>\u003Cp>You can do that with our schema endpoint. You can we already have that, capability. But, technically, you would want to or ideally let's say ideally, you would want to set up your configuration as code, because then you have a single source of truth. If you're developing a new feature, for example, you need a new table, you need new fields, you want to test something, you wanna try something, but then you, you know, delete some fields, How do you get the changes synchronized between different setups? And even the problem gets even larger if you have an organization, for example, with, let's say, I don't know, one dev department of, like, 8 people, for example.\u003C/p>\u003Cp>Stuff gets really gnarly really quick. How do you synchronize then between 8 people, for example, between different branches, different features, different collections, different fields? You know.\u003C/p>\u003Cp>Speaker 0: Let alone a test team of 200. Right?\u003C/p>\u003Cp>Speaker 1: Yeah. And this this is, you know, for a very small team, it can get quite gnarly pretty quickly. But, you know Yeah.\u003C/p>\u003Cp>Speaker 0: There's a couple other things there too. Right? When it comes to the git repo flow specifically is that any change to the schema of the project is now sort of, like, version controlled, so you know what happened when and you can roll back. And you have accountability because you know who made the change, through that sort of git first approach. Right?\u003C/p>\u003Cp>The other main thing there too, I think, is from a database template, you don't have files, which is one thing we'll touch on in a and the second thing is it's database vendor specific. That's another thing. Right? Like, you could plop the whole SQLite file in a repo, but, you know, if you wanna move if you have a local dev instance that uses SQLite and you wanna go push your change into production in Postgres, now you have a workflow trouble. Right?\u003C/p>\u003Cp>Even if you have local Postgres, the server Postgres, you might go, you know, I don't know, Postgres 10 to 13 or something. If there's a version mismatch, you know, there's there's things to to consider there. Of course, there's third party tools. I see Ansible mentioned here that you can use to sort of move databases across, sort of thing. This would really be sort of direct as native way to move configuration around.\u003C/p>\u003Cp>Right? Which I personally see it as an improvement or an upgrade to the schema snapshot system that we have rather than a completely new thing. Just the real question just becomes, you know, how do we add more stuff into that so you can use it for this? That's that's really the, to me, the the underlying discussion. Right.\u003C/p>\u003Cp>Jonathan, if you wanna scroll down a little bit further, you can see if there's any other points. Wanna make sure we don't forget. Export considerations, multiple files. I think that's a very important requirement because we've already seen some of the schema snapshots just get bonkers large. Right?\u003C/p>\u003Cp>Because if you have a 1,000 collections, a total of 25 100 fields sounds insane, but it happens in the wild. The one export file, you know, is is megabytes and megabytes and megabytes worth of of JSON. Tens if not 100, which gets unwieldy pretty quick. It also makes it more difficult to import, by the way, because we're not really able to stream it all that well and then it becomes a very large file. So you have to read it into memory and then and then use it.\u003C/p>\u003Cp>Let's see. Selective export. That, I think, is a tricky one. Right? How do you know what you're exporting if you, consider your roles and permissions part of this, but you have one admin dev role that you don't care about for your production instance, How do you pick and choose?\u003C/p>\u003Cp>Right? Pick and choose what to what to include, what what not to include, and handling, you know, sensitive data. Very good point. You know, is this gonna be plain text in a static file? Tricky.\u003C/p>\u003Cp>Right? Tricky in a repo. If you scroll down a little bit further, The modular files of extensions, single file per collection, you know, we kinda test.\u003C/p>\u003Cp>Speaker 2: Does\u003C/p>\u003Cp>Speaker 0: does it actually make more sense to have selective import versus export? Great question. Great great question. Maybe. Maybe.\u003C/p>\u003Cp>Yeah. It's it's like if you have Go.\u003C/p>\u003Cp>Speaker 2: I'm sorry. Go ahead.\u003C/p>\u003Cp>Speaker 1: I I don't have to remember to mute and unmute myself between every sentence. That's fine. Yeah. I can see both being very useful. Right?\u003C/p>\u003Cp>For example, if if you have a very, very large, instance with, like, Greg mentioned, right, like, a 1,000 collections, And on your dev instance, you only want to add one thing, do you really need to export, like, this whole thing that's, like, I don't know, 10 megabytes or whatever? Maybe, you know, maybe, it would be enough to just export that table with its fields, and you'd be good to go, because then you could import that partial instance, maybe. But, yeah, for for import or for export, both could be useful. But, yeah, like we said, it's just lots lots and lots of stuff to talk about there. Yeah.\u003C/p>\u003Cp>Speaker 0: TBD is is the honest answer. I I also feel like both is probably where we need to end up with that. Because to your point, if you have a large project and you only care about a small subset of that as a sort of templatable piece, you know, you don't want to export everything and have a bunch of unneeded data in your repo muddying up, you know, the workflow and the reviews. Because then also imagine that you make an export and then now you have a PR of, like, 16,000 lines of stuff that you don't really need. Right?\u003C/p>\u003Cp>But, yeah, let's see. Extend, you know, existing schema files. That's an interesting one. Merging multiple together, importing snippets from other files, maybe, you know, from nested collections. So that's all about, you know, the the file structure for the project.\u003C/p>\u003Cp>Saving the non defaults, I think that is more of a technical requirement to me. Right? It's like we don't have to save default values from Directus in the schema snapshot because they're the default values. Dynamic configuration sync, it's just whenever you make a change in the studio, it auto exports basically, which feels heavy, personally. Feels a bit heavy, but could could potentially work depending on the file format.\u003C/p>\u003Cp>But then again, how do you choose what to export on the automated one? Right? So TBD. That's also why it's a could have. I mean, they've they've thought about it luckily.\u003C/p>\u003Cp>Automatic real time sync, sort of similar idea. Right? But an option in the Data Studio API triggered chrono periodically high. So the one thing I do notice in the requirements list here is that there's a lot of talk about how to get it out of Directus and in what file format, but not so much the other way around. Right?\u003C/p>\u003Cp>How do you get it back in? So if you have something in your repo, whatever that something is, what does that code look like and how do you get that back into the Directus instance? Right? This might be a good point actually, a nice little segue. Like I hinted at at the beginning, our very own Connor has been doing quite a lot of homework on this just to figure out, you know, the, the format and some of the ideas around this and how it could work.\u003C/p>\u003Cp>So if let me see if I can find him. Where is this little so many here. Look at that. Hello, Connor. What have\u003C/p>\u003Cp>Speaker 2: you been what have\u003C/p>\u003Cp>Speaker 0: you been up to recently?\u003C/p>\u003Cp>Speaker 2: I have been up to quite a bit involving this config as code and how it plays into all the other different parts of direct disc that we wanna do. Let me get my notes up. Here we go. So you said you wanted me to talk about the structure of the exports?\u003C/p>\u003Cp>Speaker 0: I think it'd be cool if you wanna give a quick overview of sort of the the research process itself. Like, what are the things that you've been looking into? What have been the considerations or requirements? And sort of the things that you found. And then dive into some initial conclusions.\u003C/p>\u003Cp>Speaker 2: Sure. So what I have been going through and researching is basically we have a couple of different feature requests from config as code to templates to migrating between instances to migrating between different databases. And all of it sort of involves, you know, moving configuration between instances, moving data between instances, and moving files and assets between instances. And that is a very big task when you're trying to be database agnostic and you're trying to be efficient. You're trying to have you're trying to support multiple different use cases where sometimes you wanna overwrite everything, sometimes you just wanna bring in some stuff, sometimes you only wanna take out some stuff.\u003C/p>\u003Cp>Sometimes you wanna stream it all. Sometimes you wanna have the file small. So there's a lot of very different considerations that go into it, and then making it all happen with one sort of directest way of making it all magical. It becomes a very big rabbit hole that you start diving down into. And so one of the stuff one of the things that we've that I have been looking at is, you know, what are all the different use cases for it, and what are the requirements for all those different use cases?\u003C/p>\u003Cp>And so configuration as code is one of the use cases on there. It's not completely fleshed out yet because that was not on it's one of the later goals of what I've been working on. But with it brings, you know, how do you integrate it with CICD, you know, GitHub, GitLab. You know, do you have your own hosted GitHub, You know, self hosted GitHub. You know?\u003C/p>\u003Cp>Where do all your stuff is stored? And so there's a whole bunch of different parts of it. Right now with the schema service, we go and we give you you export a schema of your stuff. It exports everything. You diff it against your instance, and then you apply that diff, and it gives you, like, the changes if you can do it or not.\u003C/p>\u003Cp>And right now, that's really it. There's not really too much to the schema service outside of that right now, and adding in all these different layers and features. The schema service is definitely gonna have to take a new look to it. And so one of the things that we've been looking at is, you know, that initial export of a schema, you know, making it more of a distributable type of folder structure file structure, whether it's a compressed zip file or some type of other special file. But basically redefining how that schema export looks to be able to hold all these different configuration items, to be able to hold data, to be able to hold assets, and defining that structure and, you know, you know, as Ryke mentioned earlier, you know, do you want that stuff stored in plain text or do you want it to be stored in, you know, some type of encrypted format or do you want it to be compressed?\u003C/p>\u003Cp>So there's a lot of different variables there. And then once you take that distributable that gets made, it can be I mean, for some instances, if you've pulled out data and assets and configuration, you know, that thing could be huge. And so we wanna go we wanna bring that into the this new instance of the target instance. Right? And so we need to different and change it.\u003C/p>\u003Cp>And so bringing all that in and processing in is a whole another thing. You know, do you wanna bring in all of it? You know, you have all the export controls. Do you wanna have import controls to how it gets implied and, you know, how it gets imported? And so I've been going through and documenting all those different ways that we can do stuff, you know, what is dependent, you know, if we wanna do this, then, you know, we have to do that, you know.\u003C/p>\u003Cp>And so we've been looking at one of the things this week, you know, is what type of file format for all of this type of stuff, for how it gets really big. You know, if it gets a lot of data, if somebody has a 1,000 collections and 4,000 fields, you know, is a CSV file, a JSON file really the right file structure to store all of that data? And so we've been looking at, you know, different options and different file formats for storing, you know, structured data like that in an efficient compressed way that also lets you keep the schema of the schema export defined and structured in a way. And then also making sure that we keep that same right now, we hash the schemas and stuff so that they all stay. You know?\u003C/p>\u003Cp>You could only use the schema to apply to this instance because you just did it with it and yada yada. So having that in there too, you know, do we have a metadata file inside of that export that, you know, talks about what the export is? You know, do we have it? Do these become an extension type, you know, that can be used throughout the instance in different places? You know, there's a whole bunch of different options there.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah. Absolutely. It\u003C/p>\u003Cp>Speaker 1: was a great intro. Yeah. It was a very good yeah.\u003C/p>\u003Cp>Speaker 0: Exactly. Exactly. Yeah. So the first order of business to your point, you know, figuring out what does that file format look like. We know some of the requirements now based on this discussion that we just looked at.\u003C/p>\u003Cp>We know some of the the downsides of the current format. So that's a great step. Then, of course, the second big step will be figuring out, you know, how do you go from that sort of source of truth overview into applying it for realsies. Right? So we have that sort of diff step in between.\u003C/p>\u003Cp>So for those unaware right now, if you upload a schema snapshot into the Directus API, it will compare it to the current state of the database and then return, you you know, the the the list of differences, basically. So it's a diff, not a list of changes. As in a step step by step list, it's just a diff, like an a a versus b. And then that diff is then uploaded to an apply endpoint, which will basically, you know, apply the changes required to get rid of the diff, right, to make sure that the that the 2 are in sync, that the instance is in sync with your file export. So based on that, Connor, we've done some research on what needs to happen on that diff endpoint itself too.\u003C/p>\u003Cp>You wanna you wanna share some insights on what we know now, at least, are some of the requirements to make that work properly with all of these new new additional features that we're trying to add in.\u003C/p>\u003Cp>Speaker 2: Yeah. So with that dipping endpoint, some of the things that we are looking at is, number 1, if you wanna bring in data. Right? You know, how do you diff large amounts of data? Are you able to diff large amounts of data?\u003C/p>\u003Cp>That's one of the research things that's on the list. You know? Right now, we have an import and export service to import and export data. Looking at Attica drive run options. You know?\u003C/p>\u003Cp>Can you import this data? Can you export this data, for that diffing stuff? You know, if you have a really big file, so you do have 300,000 collections and fields, you know, that's gonna take a long time to make changes to the database and to go through and find that diff. And so having some type of long task runner on the instance that's able to sit there and work through that to that, diff or making that diff or distributable or whatever it is. You know, having such a long running service of the background of your instance, I can handle that.\u003C/p>\u003Cp>And then also if you're going through and you're applying all these big changes or diffing it or whatever, you don't want people in your instance changing the stuff as you're trying to change stuff. So implementing some sort of maintenance mode on your instance that basically locks it down and puts it, hey. We're making changes right now. You know, you can't it doesn't let anyone else change the schema or anything or the data or whatever you want it to do. We also, have been looking at, you know, for asset data, you know, pulling in you know, do you pull it in from the distributable file, or do you pull it in from the data the asset source?\u003C/p>\u003Cp>You know, do you pull it in from the s three bucket directly? You know, do you use it like that, or do you package it into the distributable? Or, but, basically, for the diffing part, the other part is that if you have a really, really big distributable or schema thing, whatever it ends up being called, you also upload downloading it from one instance, uploading it to another instance just to download another big thing, just to upload the other big thing back again is a lot of moving back and forth of all this different stuff. And so the other thing is when you upload that schema, whatever, it diffs it. Instead of it downloading the diff back to you, then you having to send it back up, it being able to just keep the diff on the instance and you just being able to tell it to apply the storage diff that it already has.\u003C/p>\u003Cp>And so you don't have to have that all that network changing back and forth, and then, you know, Internet goes out, then you're screwed. You know? But that's one of the things that we've also been looking at for the diffing. And then another thing is, you know, different types of strategies of diffing and importing. So that, you know, do you just wanna up cert stuff?\u003C/p>\u003Cp>Do you just wanna add new things and you wanna ignore everything else that has conflicts? Or do you want it to only apply if there are no conflicts, you know, or do you want it to overwrite everything, you know, so it doesn't matter if there's conflict. We're gonna rewrite over it with everything, you know. And then instead of just returning a singular diff that just compares the 2 different schemas and it just says, hey this is what's different. You know putting in more migration like making it more of a step type thing.\u003C/p>\u003Cp>So it works through migration steps. And, you know, oh, you need to do this. You need to do this. You need to do this. You need to do this.\u003C/p>\u003Cp>And basically a workflow that the thing can work through and and guide those long running task runners on what to do and how to configure your instance. And I\u003C/p>\u003Cp>Speaker 0: think last but not least, the having some sort of format to expose potential conflicts for manual resolution. Right? So if one of the strategies has to be that it's up to the end user to pick and choose what to do. So imagine if you go, you know, from a dev to prod, life cycle, right, where you're not so much delete everything and insert everything, you wouldn't wanna do that in a prod obviously, And if you have an absurd strategy, but there is a conflict, right, you you have, like, a foreign key that doesn't work anymore or something like that. There needs to be some sort of format in whatever this diff looks like or this migration step format looks like that just has, a list of Here's the steps with the known conflict What do you wanna do?\u003C/p>\u003Cp>Right? How do you wanna modify that that step, those steps to get around the conflict? Right? Do you wanna upload, you wanna upload new data, or do you wanna, ignore that particular step, or do you wanna ignore those records? You know?\u003C/p>\u003Cp>So to your point, if we need some sort of driver and to check if you can import all of the data, it's sort of a requirement in order to be able to extract, you know, potential conflicts. So we need to have some sort of way to search through the data you're trying to apply, in order to know how to deal with conflicts. Right? So now that we're talking about all of this, what we're what we started to notice is that we're not so much talking about, you know, configuration as code specifically or templating specifically. What we're basically shaping here is a system that works for multiple things, right?\u003C/p>\u003Cp>Depending on how you use it. So if you were to make, a snapshot of everything, just full stop everything, and you import it as as apply everything, what you're talking about now is basically backup restore. Right? If you're exporting a small fragment and you're importing that into another project, you're basically talking about templating. Right?\u003C/p>\u003Cp>If you're exporting just the schema part and no data and you apply that to a new project, you're you're talking about seeding or something. You know what I mean? Like preparing preparing, a database basically, a a new project for what you wanted to do. And the question now is, how does that how does that all tie together how does that tie back into the configuration as code parts specifically, Connor? Because what we're talking about now is, you know, a new sort of format, generated by Directus that you can save somewhere, you know, which is fairly still, it's still fairly proprietary because it will have to be heavily compressed and, you know, directors needs to know what the format is.\u003C/p>\u003Cp>So what is the current thinking on tying it back into the code side of this question? Right?\u003C/p>\u003Cp>Speaker 2: Yeah. So if we went the route of having some sort of distributable file structure folder structure that is some proprietary format or is encrypted or compressed or whatever, you know, you're not gonna be able to sit there and write code that is a compressed file. You know, you're gonna have to have write something that generates that file. So one thought that we've been having is following the lead of some other types of, you know, companies like AWS and their SDKs. So, basically, having some type of SDK that you can write and configure your instance with, and then you tell the CLI or whatever to execute that, read those different set the code that you've written, and then it will make a direct distributable file, diff file, whatever it is, from the code that you've read.\u003C/p>\u003Cp>So if you wanna go through and you wanna define all of your collections and your fields or whatever, and you can go in and define that in all your files and your code and then execute that code, it comes up. It generates that file that you can then use to apply those changes, import those changes, diff those changes to any of your target instances that you want to.\u003C/p>\u003Cp>Speaker 0: Jonathan, if you might be wanna pull up, I think one one piece of inspiration that we were looking at for that part specifically was AWS's, what do we call it? CDK, I think, code development kit. If you wanna quickly Google that, it could be it could prove like, it could put some flavor to that to that point. So the way AWS has that, they basically made a JavaScript library that you can use to code, like, configuration. And then what it does under the hood is it effectively converts it into, a CloudFormation template, I wanna say, and then applies it immediately.\u003C/p>\u003Cp>Right? So under the hoot, you don't really notice the difference, but it's effectively a 1, 2 jump. Right? So it converts it into their proprietary thing in the middle first and then just applies that as is. What am I searching for?\u003C/p>\u003Cp>Sorry. CDK, the cloud development kit. If you wanna pull up the GitHub repo for that, maybe I have a link somewhere. I'm just curious if they have some some examples somewhere. It's been a minute since I've played with this, but it's an interesting, idea.\u003C/p>\u003Cp>There was a Directus community library, a little while back that that tried doing a similar thing, but it would run it against the API endpoints. It wasn't as flexible yet because we didn't have we don't have, you know, GreenStep. Is this branch? You know. If you wanna do AWS before that, because I think CD case is all different.\u003C/p>\u003Cp>Yeah. There we go. There's a link in the chat as Open it up. Here we go. Here we go.\u003C/p>\u003Cp>Here we go. Is it gonna go? There it\u003C/p>\u003Cp>Speaker 1: goes.\u003C/p>\u003Cp>Speaker 0: So this is an interesting reference for people that wanna look it up. At home, it's basically, you know what was that? Distracted by the chat immediately. Using something like CDK would mean that changes would need to be replicated from the UI to the generation scripts. Changes would need to be replicated.\u003C/p>\u003Cp>That's a great point. Yeah. How does that go both ways? Right? Because if you have that one format in the middle, directors can recreate that format in the middle.\u003C/p>\u003Cp>The directors wouldn't be able to recreate arbitrary JavaScript, basically. Right? So when you opt into something like that, I think it becomes a one way street by definition because we cannot figure out what parts of your JS file are, or your code because they also have some other languages, but you get the idea. And we don't know what parts of that file are auto generatable and what parts are, human created. Right?\u003C/p>\u003Cp>There's so there's no way to auto generate that back into a manually created file. So at that point, that's a great point, but it's the it really becomes, you know, it becomes a a one way street at that point. Some of the data community did a sort of proof of concept library to do this for, which is very interesting. So if you wanna pull up the direct to community schema builder kit repo, I just sent a link in the chat there. It was very much inspired by a similar idea where you have a JavaScript file that you use to sort of define.\u003C/p>\u003Cp>It's almost, you know, a declaration file rather than, you know, JavaScript, but it is still just JavaScript that runs from top to bottom. But you could define your schema and how it's applied as, you know, individual build steps, in a JavaScript. So this is where it gets real heavy on the code part of the codeless configuration. Right? And not so much just the, moving stuff around.\u003C/p>\u003Cp>So in in terms of big picture stuff, I really see this as the final step of whatever these changes are that we're discussing. We'll have to start with what is that new format in the middle, how is it generated, how is it used, and then see this as a way to sort of generate it into that format and then apply it automatically. Right? But, yeah, that that JavaScript syntax is an interesting interesting idea. So, yeah, I see some folk typing.\u003C/p>\u003Cp>This is one of those very typical director's projects where there is about 600,000 different opinions on the ideal way of doing this. And I think we we saw it in the chat immediately. Right? Shout out to a person that was like, isn't it not just the database template? Why why bother?\u003C/p>\u003Cp>Right? Which I can totally get behind that, but then there's a 180 or so of votes or something like that on the on the discussion. So apparently, you know, that is not an opinion that's shared.\u003C/p>\u003Cp>Speaker 1: Yeah. What what's interesting is, right, because, technically, the most basic example would be something like a database migration, generally, in the beginning, you know, for the configuration as code, for example. So there's another, a similar project director, the CMS like project that I've checked out and see how they did it. And, so they handled this a little bit differently. They don't have, like, a DSL type, you know, language that defines your infrastructure or whatever.\u003C/p>\u003Cp>But, they went the route of as soon as a person or user, via the UI creates some type of change in the tables or the collections or however you want to call it, the instance automatically generates a migration file locally for that specific change. And, there's then a mode in the instance where you can disable any, any ability for other users to change the actual instance. So you can actually just rely on the migration files, which is an approach that you could take, you know, because a migration file then could technically do anything you'd like, you know, with with regards to, you know, the collections, the fields, whatever, even inserting, items. But, then, you know, because we're a directors, we want to that's a little too easy for us because we would like to include some type of things like, alright. How about you, locally, you develop some type of new feature, new new table, new collection, new fields.\u003C/p>\u003Cp>And then in order for that to work in the way that you want it to work, you need an item. You know? You need to include a new new row, data row, or an asset. This is the thing now. Because, you know, assets are not inside of the database.\u003C/p>\u003Cp>So we want to include assets, for example, or maybe this, of course. Nothing is, you know, set in stone, but, you know, including assets, for example. So you want to make some changes, and you need to include some assets for your changes to be even, you know, useful. So you would then have to, you know, do your changes, test it locally, include everything with the correct file name, with the correct row, whatever, or other metadata of an asset, for example. But then on production, you would have to replicate that again.\u003C/p>\u003Cp>So you get this back to back to step 1. Right? So this is kinda it kinda sucks, with a migration part. So even then, if we want to include this, then we get back to the issue at hand that we were talking about. Right?\u003C/p>\u003Cp>We want to have a process that could export something, and you can recreate that between instances and so forth and so forth. I would, I just wanted to mention that for the others in the chat because, it's not just about, you know, just adding a field because that's basically, you know, that's a basic thing, which we could solve. And I think the Director Schema Builder kit is basically that. Right? You you generate some type of syntax, which generates some type of migration.\u003C/p>\u003Cp>But you have to keep in mind then, of course, right, different database vendors, we have to abstract that. Because, for example, you know, in SQLite, if I remember correctly please correct me. If I remember correctly, like, you can't alter a table and introduce, like, a foreign key. You're forced to drop the table, actually, and recreate it in order to add a foreign key, for example. Other databases can do that as CLI can.\u003C/p>\u003Cp>So there's lots of different\u003C/p>\u003Cp>Speaker 0: honest there, I'm pretty too sure that the last minor release of SQLite didn't come out too long ago. They finally do have that alter table sort of baked in. Although then, you, of course, you have the you have the side effects that it depends on your native build of SQLite on your machine, which may or may not have had. So generally speaking, historically, you've been absolutely right. It's been a nightmare and a half to to do that.\u003C/p>\u003Cp>Speaker 1: Lots of fun. So I just wanted to make sure that, people in chat, you know, it's it's not about just adding a thing. It's it's a little more involved than then. And, you know, including assets and, like, the other sent. Right?\u003C/p>\u003Cp>Maybe there's proprietary information or whatever, and you're not allowed to leave it on your hard drive. Maybe you want to zip it, encrypt it, compress it. There's lots and lots and lots of different steps that we have to, kick off there. So alright. Oh, we got some in the chat interaction.\u003C/p>\u003Cp>Cool. Cool. Cool.\u003C/p>\u003Cp>Speaker 0: So alright. Top to bottom. First question. What speaks against using JSON or YAML files? Instead of JavaScript, this way the changes in the webpack could also be synced back to the files easily.\u003C/p>\u003Cp>So for what it's worth, the the, formats that we're talking about being generated from directives would most likely be, you know, in some sort of structured format. Not quite sure if that's JSON or YAML yet or if we have to find some sort of optimized, file format to do that. Because the the the risk of JSON and YAML exports, once you start including data, you know, we no longer know how much data you wanna include. Like if we're treating this as you could use this for backup restore we could talk about a large large amount of data right at which point we need to have a very optimized structural format that may or may not be usable in that point. Right?\u003C/p>\u003Cp>Connor, remind me, we found like an Apache file format that could be interesting for this. I think what was it called? Parquet or something. Right?\u003C/p>\u003Cp>Speaker 2: It was called Parquet.\u003C/p>\u003Cp>Speaker 0: Parquet. Yeah. That would be an interesting file format for something like that. Or potentially using a SQLite database as as the exported file. Like, that's a completely different, direction.\u003C/p>\u003Cp>But But you get the idea. We need to have some sort of optimized compressed file format because the export file could get really large. Now, it might be an option for the way, you know, you save one of those to just save it in a sort of raw mode. Right? Where it doesn't save it compressed, at which point it's just it could be human readable YAML or or JSON, including, you know, the ability to properly, source control it.\u003C/p>\u003Cp>On the migration note, I think you answered that before, Daniel, like, exactly right. If you're doing auto generated migrations, it's really only for the database schema part. Like, we can't really know on your behalf if you consider, insights dashboards part of configuration or flows or something. Right? So it's it's gonna be it's gonna be tricky because people different people have different export requirements.\u003C/p>\u003Cp>And if you go from dev to prod, all bets are off. Right? You you never quite know what the the idea is. Creating internal libraries fiber to schema works with native access to directives rather than the API using integrations and help creating a lot of complex repetitive action. I can imagine.\u003C/p>\u003Cp>Yeah. Because you can write a little JavaScript for loop and just block. You have 10,000 collections. Right? But, you know, the lack of two way integration between the UI does cause issues which is the unfortunate side effect of using, you know, a programming language rather than a declarative language like yaml or JSON, for doing schema modifications like that.\u003C/p>\u003Cp>You're gonna lose that two way integration. That being said, you know, if Directus has a don't allow me to change the schema environment variable flag, whatever, you could make that, you could do that on purpose. Right? For a production instance, for example, I can totally imagine that you disable any sort of schema modifications just for security reasons, and and availability reasons and only allow those changes to happen through whatever system we're we're cooking up here. Right?\u003C/p>\u003Cp>I think default value filtering could help make the YAML auth more manageable. Fully agree. You know, we should only store the stuff that we need to know, and storing default values feels like a waste of waste of space. Then Azure is working on some sort of YAML based metadata authoring, announcing Azure data delivery net auth sales. What marketing email?\u003C/p>\u003Cp>Very curious. Haven't I haven't heard of that one before. If you wanna keep the GitOps thing, it should really be a text format. Good point from Tim. Which may or may not be answered from by Dominic here.\u003C/p>\u003Cp>If you split up between schema and content in different formats. Right? Maybe the configuration piece is all human readable file formats. But if you have a data export that is maybe there's a file size threshold. Right?\u003C/p>\u003Cp>If you have a very large CSV export maybe there's just a smart point where it's like, oh, you're trying to save 10,000 rows We're gonna flip it automatically into a compressed, non readable format so you get the best of both worlds. Right? Potentially. Cool. Alright.\u003C/p>\u003Cp>Cool. We tried.\u003C/p>\u003Cp>Speaker 1: I it's something similar. Yeah. Just to, you know, chime in to regards to what you just said. So with the, like, yeah, like, yeah, we have to split that up preferably, you know, or at least, you know, required to have this as text based so you can, you know, use it in version control, whatever. And, for, like, including items, for example, you know, a ZIP is is a nice thing that you could use, but then, of course, you know, maybe this includes then items that are not, like like an old version, for example, and, you want to insert something in in where a field doesn't exist anymore, and and lots and lots and lots of other, you know, stuff.\u003C/p>\u003Cp>And, you can then, of course, if we then have all of the different points that we want to persist, right, like flows, permissions, the general config as it is, you could include this then also in the export with the items, so you can do both at the same time or see if it differs and then cancel the thing. But, yeah, it's, it's a fun thing. You know? There's lots and lots of things that could go wrong. There's so many.\u003C/p>\u003Cp>Speaker 0: No no matter what, we wanna make sure that the output file is a single distributable. Right? We do wanna make sure because we on the one hand, we're saying we have to split it up into multiple files in order to make it efficient and easy to work with, But, at the same time, we also wanna make sure that you have a singular thing, singular file that you can send over, to somebody else. Right? Either through the API, so you just have a single download or a single upload, or as a file, maybe packaged through, you know, the marketplace.\u003C/p>\u003Cp>Shout out. Wink wink. Notch notch. Or as, just to email it to somebody for a like air. Right?\u003C/p>\u003Cp>Put it in a GitHub issue as a zip. So there needs to be some sort of both. Right? But I could also imagine that, you know, the API, lets you download it as just a zip, right, that you can just double click to open if you're on macOS or do whatever else it takes on other platforms, to unzip it. Looking at you, Daniel, I'm sure there's a 2 step process.\u003C/p>\u003Cp>For those who, out out of the loop, he's, this is the year of Linux on the desktop, evangelist within the team.\u003C/p>\u003Cp>Speaker 1: Yes. This year. Here it is. This is the year. Mark my words.\u003C/p>\u003Cp>And this year is the year of the Linux desktop. This year. Yes.\u003C/p>\u003Cp>Speaker 0: But long story short, we it's it's we're we're in that weird in between that we need both. Right? We need both the single file and multiple files. So we'll most likely have to come out with some sort of zip, gzip, something like that in between. Cool.\u003C/p>\u003Cp>Prisma migrations are an interesting way of doing things. They have a custom format, which is more concise than directed YAML, and then some CLI tools that create actual SQL migrations in the syncs the environment. Yeah. Great example. Right.\u003C/p>\u003Cp>Good example. They have they basically do with that shadow shadow database, if I'm not mistaken, that's how they keep track of those migrations step by step by step. Yep. Right? And then with the the CLI tool, it can compare your custom migration format with what they already have tracked so far.\u003C/p>\u003Cp>And then, you know, apply apply the diff based on that. Still has the similar I think a similar one way issue, though. Right? Because the Prisma migrations that you write manually, I don't think they can sort of update those from the others from the other side, so to speak, that a two way binding. Good point, though.\u003C/p>\u003Cp>Good point. Alright.\u003C/p>\u003Cp>Speaker 1: Could remember. I had a problem with in the past. So, sadly, I can't remember it right now, but it was very painful. So, it's not a 100%. Yeah.\u003C/p>\u003Cp>It's not a 100 perfect thing. Nice. I've seen people you know, there's there's lots of other, what is it called? Drizzle drizzle right now?\u003C/p>\u003Cp>Speaker 0: Oh, there's a couple of RMS like that. Yeah. Yeah.\u003C/p>\u003Cp>Speaker 1: So there's there's lots of sauces that we could, you know, yoink some code from, but be inspired. Let's say be inspired.\u003C/p>\u003Cp>Speaker 0: Yeah. I don't think we've been yoinked. I don't think we've legitimately ever yoinked code before.\u003C/p>\u003Cp>Speaker 1: Borrow. Borrow. See.\u003C/p>\u003Cp>Speaker 0: Strategies is interesting there. But the the the main difference and then you you touched on it perfectly before. The main difference there is the just keeping track of the database schema versus having that ability to just sort of manual template in between moving stuff between dev and prod that is not just schema, but also data and how to deal with that, which at any point needs to be, something you can do manually. Right? It doesn't have to be manual all the time, but it needs to be something you can do manually Right?\u003C/p>\u003Cp>Cool. Well, with all that being said, looking at the clock here Connor, back to you. We've discussed quite a lot of the research up until this point, some of the requirements. You and I have also sort of been daydreaming about potential ways to implement this moving forwards. You wanna quickly touch on sort of the different, different phases and different sort of parts that we wanna touch as part of this bigger bigger effort?\u003C/p>\u003Cp>Speaker 2: Yeah. So the first thing we wanna do is we wanna figure out how this is gonna look, the distributable, the diffing. What is that file structure? What's the file format? You know, what are the requirements for that?\u003C/p>\u003Cp>Do we have encryption? Do we have multiple files? Do we have one file? Do we have whatever? Like, we've already discussed here.\u003C/p>\u003Cp>First thing is defining what that'll look like in totality, covering all use cases so that as we progress through the the phases of this project and making it happen, we can keep that spec that we have for those files in mind, and we can make sure that we cover everything whenever we could work through the different phases. But the that is phase 0 to finding out that spec, figuring it out what it looks like. Phase 2 or phase 1 would be upgrading the schema service foundationally right now. So making sure we have all the bug fixes with the schema service taken care of, you know, adding the new different, you know, different strategies that to it that might be needed. You know?\u003C/p>\u003Cp>Do we have, you know, add in those exports, export filters, you know, only export this stuff, you know, that stuff. So getting that schema service, adding some more features to it. And then the next phase is working on data importing. And so adding in the different features that would be needed to make the data importing work, You know, dry running, imported data, importing strategies. You know, do we have a import strategy where right now everything just gets up sorted?\u003C/p>\u003Cp>Do we wanna have an import strategy where if you import the data, it drops all the current date data in the table and re imports everything fresh? You know, adding those different options. Some other just things, ideas that have been thrown in the mix is, you know, if you're moving between one instance to another instance and you don't wanna bring any of your IDs, your primary keys, your foreign keys, you know, adding some type of way to anonymize that as it goes from one instance to another. So the new instance makes new IDs for everything. Or adding, you know, the ability to for, like, templates, you know, if I wanna bring in a template for project management and have a due date in one of the items, but I made the template a year ago, then the due date's gonna be from a year ago.\u003C/p>\u003Cp>So, you know, being able to have dynamic data that gets brought in on import. And so, you know, oh, I want you to import this as a date, but make it for in 2 hours after the import is when it's set. And then the next phase after that, working on data importing is getting into putting it all together. So we've worked on the spec for the thing. We worked on the schema service.\u003C/p>\u003Cp>We worked on data importing. Now we need to bring it all together into this this new overhaul configuration, multimodal thing, and have it work on bring all those pieces together and make it work, basically. And then after that, once we've gotten the specs spec'd out, the schema service upgraded, the data importing upgraded, we brought it all together, and it now works foundationally. Now we need to figure out how do we use this to implement these different use cases? So for templating, you know, what do we build inside of the data studio admin app from a user interface and API perspective to make templating work?\u003C/p>\u003Cp>What do we do from the configuration as code to use the stuff that we've already built? So how do we implement the feature set that we want in the configuration as code use case. You know, are there any other use cases? You know, back up being and restoring an instance. Do we have the feature set that we wanna use for that?\u003C/p>\u003Cp>And then that should, in theory, wrap up the project after that, after we figure out and implement those use cases. So there's quite a few steps here. Quite a lot of different items and it's going to take a long time. But at the end, we should have something pretty cool.\u003C/p>\u003Cp>Speaker 1: Ideally. Yeah.\u003C/p>\u003Cp>Speaker 0: And I think I think Daniel's facial expressions told told the whole story. No. But that'll I mean, it'll make sense to me. It sounds it sounds like a large overwhelming amount of stuff as it usually does. But by breaking it up, you know, step by step like this, we actually have a pretty solid idea, you know, start to finish of of what is involved to getting that across the finish line.\u003C/p>\u003Cp>And, yeah, as per usual, you know, there's a lot of, what's the word? Direct this magic going on under the hood to to tie it all together. But I'm always just very excited and glad that we're able to sort of re envision this as we have one underlying core foundational engine that can power all of those different use cases. Right? Rather than trying to tack on a new sort of templating piece and a new sort of code first configuration piece and a new sort of other piece.\u003C/p>\u003Cp>Right? That just increases the tech that makes it hard to maintain. It makes it those different flavors incompatible. You know, somebody will ask, how do we do a template as code? And we're like, well, you can't because code is not for templating.\u003C/p>\u003Cp>You know, that kind of stuff. It really reminds me of how we built flows. Right? That we had the hook extension first. And if you configure a new flow, you're effectively just building together a hook extension, and it's all the exact same underlying, underlying logic and event based system, which is also why, you know, flows you can do it say the same stuff as you can in a hook.\u003C/p>\u003Cp>Right? Well, same events. I mean, you can, of course, in a hook, you can code it yourself. So I'm very glad that we're making this, you know, a foundational upgrade to the schema snapshotting engine rather than trying to make it yet a new thing cool cool cool well that all being said I see we're at time here. Let me just quickly briefly peek at the chat.\u003C/p>\u003Cp>Do we missed anything? Pascal mentioned dynamic collection of field names will be cool. EG importing third party templates so you can choose your own target names. Good point. I think a similar goes for the conflict resolution piece that we talked about.\u003C/p>\u003Cp>Right? If you're trying to import won't be complex. Very, very, very true. But rest assured, it will be complex, I'm sure. Cool.\u003C/p>\u003Cp>With all that being said, this will be, life on Drax. Io/tv/requestreview in about a week week's time. Also, last episode, you can find there too. Should be if you're watching this on DirectTV, it's probably somewhere over there, there, there, or down there. Is this the point where we say, like, and subscribe?\u003C/p>\u003Cp>No. We don't have that yet. It's exactly we thank you all for joining.\u003C/p>","Welcome everybody once more to a wonderful request review session here where we go over feature requests and figure it out. Now what do we do? I'm afraid we ramble on for about an hour about the technical complexities. Remember, the goal here is to basically divergently discuss, you know, what is the feature request, what are we trying to do, what is it trying to achieve, And how do we think we can make it happen in a very sort of direct to see way? What are we talking about this week? Yes. We're talking about configuration. Configuration as code. Let's let's figure out how to take schema endpoints to the max and actually Schema endpoints to the max. Manage the entire project as code. So this is really with a focus on GitOps. Right? Where you have a sort of centralized repository of static files that is the single source of truth for all configuration of the running project. Which as you might guess, they get complicated fairly quick. Hello. And as per usual, we'll be eyeing the chat. So if you have any questions in between or any suggestions or any good thoughts, please do please do put it in the chat. I already saw his name fly by. Well, most likely I have a very special guest for you today, because our very own Connor has been researching, you know, some of this for a little while now. But before we dive into the research results there, let's discuss a little bit of the requirements that are presented here in the current feature requests. Right? Because the one thing we know now, you know, the current state of affairs, we have that schema snapshot and apply endpoint, that we use and sort of recommend for, you know, moving bits of schema, to and from dev to prod, that sort of thing. But as people have pointed out, you know, that is still for schema only. Right? So we know one of the big requirements for this is gonna be you need to figure out additional configuration, additional additional, data points maybe from your own tables, you know, environment migrations, like you mentioned there, which includes, you know, what about roles? What about flows? What about presets? What about translation strings, etcetera? This well, one of the complexities for this is figuring out, you know, what is configuration within the context of directives in the first place. Right? Which is a discussion topic that I have had some trouble with just going through myself already, which is what is configuration? You know, are your roles and the way you configured permission configured, you know, permissions configuration, probably. But the users within those roles, probably not. But then users with static tokens, maybe. Right? If you have your own tables, maybe you have a single, you know, app settings, singleton collection that you use for configuration is that now configuration that is part of code first configuration. Right? Even though it's not a system table and you're not configuring directives, you might still be configuring other things. Although, that's where the fun starts. So maybe we could scroll down a little bit, Jonathan, just take a quick peek through, the other the motivation and the requirements here. So, you know, as we kinda touched on already, the same here from, Erif van Oort. Pretty sure that would be a Dutch user. It's it's about things like permission logic, you know, keep the local dev environment in sync, source control is the source of truth. Right? You wanna make sure that you can spin up new Directus instances not completely empty, but start it from, you know, a template that is in your repo. If there's an issue, you can easily, you know, share, the configuration of your platform. Daniel, if you would kindly mute, you're being very annoying. From the replying system, this is immediately where where it gets complicated. Right? It's like, what is configuration? Right? What is configuration? When it comes to import export, how to define what gets imported, what gets exported? Basically, the same question to me. Right? How does it get imported? You know, are you merging stuff? Are you overwriting stuff? What happens if you try to insert something that already exists? You know? How do you deal with conflicts? Very good question. So if you wanna scroll down a little bit further to see what else is in here. Yeah. No. I don't know about that point. That's a good question, but a very, very long one to answer properly. But the gist is, you know, if you work with multiple people with different setups and if somebody changes your database schema, for example, how do you synchronize the state between your instance and another instance? You can do that with our schema endpoint. You can we already have that, capability. But, technically, you would want to or ideally let's say ideally, you would want to set up your configuration as code, because then you have a single source of truth. If you're developing a new feature, for example, you need a new table, you need new fields, you want to test something, you wanna try something, but then you, you know, delete some fields, How do you get the changes synchronized between different setups? And even the problem gets even larger if you have an organization, for example, with, let's say, I don't know, one dev department of, like, 8 people, for example. Stuff gets really gnarly really quick. How do you synchronize then between 8 people, for example, between different branches, different features, different collections, different fields? You know. Let alone a test team of 200. Right? Yeah. And this this is, you know, for a very small team, it can get quite gnarly pretty quickly. But, you know Yeah. There's a couple other things there too. Right? When it comes to the git repo flow specifically is that any change to the schema of the project is now sort of, like, version controlled, so you know what happened when and you can roll back. And you have accountability because you know who made the change, through that sort of git first approach. Right? The other main thing there too, I think, is from a database template, you don't have files, which is one thing we'll touch on in a and the second thing is it's database vendor specific. That's another thing. Right? Like, you could plop the whole SQLite file in a repo, but, you know, if you wanna move if you have a local dev instance that uses SQLite and you wanna go push your change into production in Postgres, now you have a workflow trouble. Right? Even if you have local Postgres, the server Postgres, you might go, you know, I don't know, Postgres 10 to 13 or something. If there's a version mismatch, you know, there's there's things to to consider there. Of course, there's third party tools. I see Ansible mentioned here that you can use to sort of move databases across, sort of thing. This would really be sort of direct as native way to move configuration around. Right? Which I personally see it as an improvement or an upgrade to the schema snapshot system that we have rather than a completely new thing. Just the real question just becomes, you know, how do we add more stuff into that so you can use it for this? That's that's really the, to me, the the underlying discussion. Right. Jonathan, if you wanna scroll down a little bit further, you can see if there's any other points. Wanna make sure we don't forget. Export considerations, multiple files. I think that's a very important requirement because we've already seen some of the schema snapshots just get bonkers large. Right? Because if you have a 1,000 collections, a total of 25 100 fields sounds insane, but it happens in the wild. The one export file, you know, is is megabytes and megabytes and megabytes worth of of JSON. Tens if not 100, which gets unwieldy pretty quick. It also makes it more difficult to import, by the way, because we're not really able to stream it all that well and then it becomes a very large file. So you have to read it into memory and then and then use it. Let's see. Selective export. That, I think, is a tricky one. Right? How do you know what you're exporting if you, consider your roles and permissions part of this, but you have one admin dev role that you don't care about for your production instance, How do you pick and choose? Right? Pick and choose what to what to include, what what not to include, and handling, you know, sensitive data. Very good point. You know, is this gonna be plain text in a static file? Tricky. Right? Tricky in a repo. If you scroll down a little bit further, The modular files of extensions, single file per collection, you know, we kinda test. Does does it actually make more sense to have selective import versus export? Great question. Great great question. Maybe. Maybe. Yeah. It's it's like if you have Go. I'm sorry. Go ahead. I I don't have to remember to mute and unmute myself between every sentence. That's fine. Yeah. I can see both being very useful. Right? For example, if if you have a very, very large, instance with, like, Greg mentioned, right, like, a 1,000 collections, And on your dev instance, you only want to add one thing, do you really need to export, like, this whole thing that's, like, I don't know, 10 megabytes or whatever? Maybe, you know, maybe, it would be enough to just export that table with its fields, and you'd be good to go, because then you could import that partial instance, maybe. But, yeah, for for import or for export, both could be useful. But, yeah, like we said, it's just lots lots and lots of stuff to talk about there. Yeah. TBD is is the honest answer. I I also feel like both is probably where we need to end up with that. Because to your point, if you have a large project and you only care about a small subset of that as a sort of templatable piece, you know, you don't want to export everything and have a bunch of unneeded data in your repo muddying up, you know, the workflow and the reviews. Because then also imagine that you make an export and then now you have a PR of, like, 16,000 lines of stuff that you don't really need. Right? But, yeah, let's see. Extend, you know, existing schema files. That's an interesting one. Merging multiple together, importing snippets from other files, maybe, you know, from nested collections. So that's all about, you know, the the file structure for the project. Saving the non defaults, I think that is more of a technical requirement to me. Right? It's like we don't have to save default values from Directus in the schema snapshot because they're the default values. Dynamic configuration sync, it's just whenever you make a change in the studio, it auto exports basically, which feels heavy, personally. Feels a bit heavy, but could could potentially work depending on the file format. But then again, how do you choose what to export on the automated one? Right? So TBD. That's also why it's a could have. I mean, they've they've thought about it luckily. Automatic real time sync, sort of similar idea. Right? But an option in the Data Studio API triggered chrono periodically high. So the one thing I do notice in the requirements list here is that there's a lot of talk about how to get it out of Directus and in what file format, but not so much the other way around. Right? How do you get it back in? So if you have something in your repo, whatever that something is, what does that code look like and how do you get that back into the Directus instance? Right? This might be a good point actually, a nice little segue. Like I hinted at at the beginning, our very own Connor has been doing quite a lot of homework on this just to figure out, you know, the, the format and some of the ideas around this and how it could work. So if let me see if I can find him. Where is this little so many here. Look at that. Hello, Connor. What have you been what have you been up to recently? I have been up to quite a bit involving this config as code and how it plays into all the other different parts of direct disc that we wanna do. Let me get my notes up. Here we go. So you said you wanted me to talk about the structure of the exports? I think it'd be cool if you wanna give a quick overview of sort of the the research process itself. Like, what are the things that you've been looking into? What have been the considerations or requirements? And sort of the things that you found. And then dive into some initial conclusions. Sure. So what I have been going through and researching is basically we have a couple of different feature requests from config as code to templates to migrating between instances to migrating between different databases. And all of it sort of involves, you know, moving configuration between instances, moving data between instances, and moving files and assets between instances. And that is a very big task when you're trying to be database agnostic and you're trying to be efficient. You're trying to have you're trying to support multiple different use cases where sometimes you wanna overwrite everything, sometimes you just wanna bring in some stuff, sometimes you only wanna take out some stuff. Sometimes you wanna stream it all. Sometimes you wanna have the file small. So there's a lot of very different considerations that go into it, and then making it all happen with one sort of directest way of making it all magical. It becomes a very big rabbit hole that you start diving down into. And so one of the stuff one of the things that we've that I have been looking at is, you know, what are all the different use cases for it, and what are the requirements for all those different use cases? And so configuration as code is one of the use cases on there. It's not completely fleshed out yet because that was not on it's one of the later goals of what I've been working on. But with it brings, you know, how do you integrate it with CICD, you know, GitHub, GitLab. You know, do you have your own hosted GitHub, You know, self hosted GitHub. You know? Where do all your stuff is stored? And so there's a whole bunch of different parts of it. Right now with the schema service, we go and we give you you export a schema of your stuff. It exports everything. You diff it against your instance, and then you apply that diff, and it gives you, like, the changes if you can do it or not. And right now, that's really it. There's not really too much to the schema service outside of that right now, and adding in all these different layers and features. The schema service is definitely gonna have to take a new look to it. And so one of the things that we've been looking at is, you know, that initial export of a schema, you know, making it more of a distributable type of folder structure file structure, whether it's a compressed zip file or some type of other special file. But basically redefining how that schema export looks to be able to hold all these different configuration items, to be able to hold data, to be able to hold assets, and defining that structure and, you know, you know, as Ryke mentioned earlier, you know, do you want that stuff stored in plain text or do you want it to be stored in, you know, some type of encrypted format or do you want it to be compressed? So there's a lot of different variables there. And then once you take that distributable that gets made, it can be I mean, for some instances, if you've pulled out data and assets and configuration, you know, that thing could be huge. And so we wanna go we wanna bring that into the this new instance of the target instance. Right? And so we need to different and change it. And so bringing all that in and processing in is a whole another thing. You know, do you wanna bring in all of it? You know, you have all the export controls. Do you wanna have import controls to how it gets implied and, you know, how it gets imported? And so I've been going through and documenting all those different ways that we can do stuff, you know, what is dependent, you know, if we wanna do this, then, you know, we have to do that, you know. And so we've been looking at one of the things this week, you know, is what type of file format for all of this type of stuff, for how it gets really big. You know, if it gets a lot of data, if somebody has a 1,000 collections and 4,000 fields, you know, is a CSV file, a JSON file really the right file structure to store all of that data? And so we've been looking at, you know, different options and different file formats for storing, you know, structured data like that in an efficient compressed way that also lets you keep the schema of the schema export defined and structured in a way. And then also making sure that we keep that same right now, we hash the schemas and stuff so that they all stay. You know? You could only use the schema to apply to this instance because you just did it with it and yada yada. So having that in there too, you know, do we have a metadata file inside of that export that, you know, talks about what the export is? You know, do we have it? Do these become an extension type, you know, that can be used throughout the instance in different places? You know, there's a whole bunch of different options there. Yeah. Yeah. Absolutely. It was a great intro. Yeah. It was a very good yeah. Exactly. Exactly. Yeah. So the first order of business to your point, you know, figuring out what does that file format look like. We know some of the requirements now based on this discussion that we just looked at. We know some of the the downsides of the current format. So that's a great step. Then, of course, the second big step will be figuring out, you know, how do you go from that sort of source of truth overview into applying it for realsies. Right? So we have that sort of diff step in between. So for those unaware right now, if you upload a schema snapshot into the Directus API, it will compare it to the current state of the database and then return, you you know, the the the list of differences, basically. So it's a diff, not a list of changes. As in a step step by step list, it's just a diff, like an a a versus b. And then that diff is then uploaded to an apply endpoint, which will basically, you know, apply the changes required to get rid of the diff, right, to make sure that the that the 2 are in sync, that the instance is in sync with your file export. So based on that, Connor, we've done some research on what needs to happen on that diff endpoint itself too. You wanna you wanna share some insights on what we know now, at least, are some of the requirements to make that work properly with all of these new new additional features that we're trying to add in. Yeah. So with that dipping endpoint, some of the things that we are looking at is, number 1, if you wanna bring in data. Right? You know, how do you diff large amounts of data? Are you able to diff large amounts of data? That's one of the research things that's on the list. You know? Right now, we have an import and export service to import and export data. Looking at Attica drive run options. You know? Can you import this data? Can you export this data, for that diffing stuff? You know, if you have a really big file, so you do have 300,000 collections and fields, you know, that's gonna take a long time to make changes to the database and to go through and find that diff. And so having some type of long task runner on the instance that's able to sit there and work through that to that, diff or making that diff or distributable or whatever it is. You know, having such a long running service of the background of your instance, I can handle that. And then also if you're going through and you're applying all these big changes or diffing it or whatever, you don't want people in your instance changing the stuff as you're trying to change stuff. So implementing some sort of maintenance mode on your instance that basically locks it down and puts it, hey. We're making changes right now. You know, you can't it doesn't let anyone else change the schema or anything or the data or whatever you want it to do. We also, have been looking at, you know, for asset data, you know, pulling in you know, do you pull it in from the distributable file, or do you pull it in from the data the asset source? You know, do you pull it in from the s three bucket directly? You know, do you use it like that, or do you package it into the distributable? Or, but, basically, for the diffing part, the other part is that if you have a really, really big distributable or schema thing, whatever it ends up being called, you also upload downloading it from one instance, uploading it to another instance just to download another big thing, just to upload the other big thing back again is a lot of moving back and forth of all this different stuff. And so the other thing is when you upload that schema, whatever, it diffs it. Instead of it downloading the diff back to you, then you having to send it back up, it being able to just keep the diff on the instance and you just being able to tell it to apply the storage diff that it already has. And so you don't have to have that all that network changing back and forth, and then, you know, Internet goes out, then you're screwed. You know? But that's one of the things that we've also been looking at for the diffing. And then another thing is, you know, different types of strategies of diffing and importing. So that, you know, do you just wanna up cert stuff? Do you just wanna add new things and you wanna ignore everything else that has conflicts? Or do you want it to only apply if there are no conflicts, you know, or do you want it to overwrite everything, you know, so it doesn't matter if there's conflict. We're gonna rewrite over it with everything, you know. And then instead of just returning a singular diff that just compares the 2 different schemas and it just says, hey this is what's different. You know putting in more migration like making it more of a step type thing. So it works through migration steps. And, you know, oh, you need to do this. You need to do this. You need to do this. You need to do this. And basically a workflow that the thing can work through and and guide those long running task runners on what to do and how to configure your instance. And I think last but not least, the having some sort of format to expose potential conflicts for manual resolution. Right? So if one of the strategies has to be that it's up to the end user to pick and choose what to do. So imagine if you go, you know, from a dev to prod, life cycle, right, where you're not so much delete everything and insert everything, you wouldn't wanna do that in a prod obviously, And if you have an absurd strategy, but there is a conflict, right, you you have, like, a foreign key that doesn't work anymore or something like that. There needs to be some sort of format in whatever this diff looks like or this migration step format looks like that just has, a list of Here's the steps with the known conflict What do you wanna do? Right? How do you wanna modify that that step, those steps to get around the conflict? Right? Do you wanna upload, you wanna upload new data, or do you wanna, ignore that particular step, or do you wanna ignore those records? You know? So to your point, if we need some sort of driver and to check if you can import all of the data, it's sort of a requirement in order to be able to extract, you know, potential conflicts. So we need to have some sort of way to search through the data you're trying to apply, in order to know how to deal with conflicts. Right? So now that we're talking about all of this, what we're what we started to notice is that we're not so much talking about, you know, configuration as code specifically or templating specifically. What we're basically shaping here is a system that works for multiple things, right? Depending on how you use it. So if you were to make, a snapshot of everything, just full stop everything, and you import it as as apply everything, what you're talking about now is basically backup restore. Right? If you're exporting a small fragment and you're importing that into another project, you're basically talking about templating. Right? If you're exporting just the schema part and no data and you apply that to a new project, you're you're talking about seeding or something. You know what I mean? Like preparing preparing, a database basically, a a new project for what you wanted to do. And the question now is, how does that how does that all tie together how does that tie back into the configuration as code parts specifically, Connor? Because what we're talking about now is, you know, a new sort of format, generated by Directus that you can save somewhere, you know, which is fairly still, it's still fairly proprietary because it will have to be heavily compressed and, you know, directors needs to know what the format is. So what is the current thinking on tying it back into the code side of this question? Right? Yeah. So if we went the route of having some sort of distributable file structure folder structure that is some proprietary format or is encrypted or compressed or whatever, you know, you're not gonna be able to sit there and write code that is a compressed file. You know, you're gonna have to have write something that generates that file. So one thought that we've been having is following the lead of some other types of, you know, companies like AWS and their SDKs. So, basically, having some type of SDK that you can write and configure your instance with, and then you tell the CLI or whatever to execute that, read those different set the code that you've written, and then it will make a direct distributable file, diff file, whatever it is, from the code that you've read. So if you wanna go through and you wanna define all of your collections and your fields or whatever, and you can go in and define that in all your files and your code and then execute that code, it comes up. It generates that file that you can then use to apply those changes, import those changes, diff those changes to any of your target instances that you want to. Jonathan, if you might be wanna pull up, I think one one piece of inspiration that we were looking at for that part specifically was AWS's, what do we call it? CDK, I think, code development kit. If you wanna quickly Google that, it could be it could prove like, it could put some flavor to that to that point. So the way AWS has that, they basically made a JavaScript library that you can use to code, like, configuration. And then what it does under the hood is it effectively converts it into, a CloudFormation template, I wanna say, and then applies it immediately. Right? So under the hoot, you don't really notice the difference, but it's effectively a 1, 2 jump. Right? So it converts it into their proprietary thing in the middle first and then just applies that as is. What am I searching for? Sorry. CDK, the cloud development kit. If you wanna pull up the GitHub repo for that, maybe I have a link somewhere. I'm just curious if they have some some examples somewhere. It's been a minute since I've played with this, but it's an interesting, idea. There was a Directus community library, a little while back that that tried doing a similar thing, but it would run it against the API endpoints. It wasn't as flexible yet because we didn't have we don't have, you know, GreenStep. Is this branch? You know. If you wanna do AWS before that, because I think CD case is all different. Yeah. There we go. There's a link in the chat as Open it up. Here we go. Here we go. Here we go. Is it gonna go? There it goes. So this is an interesting reference for people that wanna look it up. At home, it's basically, you know what was that? Distracted by the chat immediately. Using something like CDK would mean that changes would need to be replicated from the UI to the generation scripts. Changes would need to be replicated. That's a great point. Yeah. How does that go both ways? Right? Because if you have that one format in the middle, directors can recreate that format in the middle. The directors wouldn't be able to recreate arbitrary JavaScript, basically. Right? So when you opt into something like that, I think it becomes a one way street by definition because we cannot figure out what parts of your JS file are, or your code because they also have some other languages, but you get the idea. And we don't know what parts of that file are auto generatable and what parts are, human created. Right? There's so there's no way to auto generate that back into a manually created file. So at that point, that's a great point, but it's the it really becomes, you know, it becomes a a one way street at that point. Some of the data community did a sort of proof of concept library to do this for, which is very interesting. So if you wanna pull up the direct to community schema builder kit repo, I just sent a link in the chat there. It was very much inspired by a similar idea where you have a JavaScript file that you use to sort of define. It's almost, you know, a declaration file rather than, you know, JavaScript, but it is still just JavaScript that runs from top to bottom. But you could define your schema and how it's applied as, you know, individual build steps, in a JavaScript. So this is where it gets real heavy on the code part of the codeless configuration. Right? And not so much just the, moving stuff around. So in in terms of big picture stuff, I really see this as the final step of whatever these changes are that we're discussing. We'll have to start with what is that new format in the middle, how is it generated, how is it used, and then see this as a way to sort of generate it into that format and then apply it automatically. Right? But, yeah, that that JavaScript syntax is an interesting interesting idea. So, yeah, I see some folk typing. This is one of those very typical director's projects where there is about 600,000 different opinions on the ideal way of doing this. And I think we we saw it in the chat immediately. Right? Shout out to a person that was like, isn't it not just the database template? Why why bother? Right? Which I can totally get behind that, but then there's a 180 or so of votes or something like that on the on the discussion. So apparently, you know, that is not an opinion that's shared. Yeah. What what's interesting is, right, because, technically, the most basic example would be something like a database migration, generally, in the beginning, you know, for the configuration as code, for example. So there's another, a similar project director, the CMS like project that I've checked out and see how they did it. And, so they handled this a little bit differently. They don't have, like, a DSL type, you know, language that defines your infrastructure or whatever. But, they went the route of as soon as a person or user, via the UI creates some type of change in the tables or the collections or however you want to call it, the instance automatically generates a migration file locally for that specific change. And, there's then a mode in the instance where you can disable any, any ability for other users to change the actual instance. So you can actually just rely on the migration files, which is an approach that you could take, you know, because a migration file then could technically do anything you'd like, you know, with with regards to, you know, the collections, the fields, whatever, even inserting, items. But, then, you know, because we're a directors, we want to that's a little too easy for us because we would like to include some type of things like, alright. How about you, locally, you develop some type of new feature, new new table, new collection, new fields. And then in order for that to work in the way that you want it to work, you need an item. You know? You need to include a new new row, data row, or an asset. This is the thing now. Because, you know, assets are not inside of the database. So we want to include assets, for example, or maybe this, of course. Nothing is, you know, set in stone, but, you know, including assets, for example. So you want to make some changes, and you need to include some assets for your changes to be even, you know, useful. So you would then have to, you know, do your changes, test it locally, include everything with the correct file name, with the correct row, whatever, or other metadata of an asset, for example. But then on production, you would have to replicate that again. So you get this back to back to step 1. Right? So this is kinda it kinda sucks, with a migration part. So even then, if we want to include this, then we get back to the issue at hand that we were talking about. Right? We want to have a process that could export something, and you can recreate that between instances and so forth and so forth. I would, I just wanted to mention that for the others in the chat because, it's not just about, you know, just adding a field because that's basically, you know, that's a basic thing, which we could solve. And I think the Director Schema Builder kit is basically that. Right? You you generate some type of syntax, which generates some type of migration. But you have to keep in mind then, of course, right, different database vendors, we have to abstract that. Because, for example, you know, in SQLite, if I remember correctly please correct me. If I remember correctly, like, you can't alter a table and introduce, like, a foreign key. You're forced to drop the table, actually, and recreate it in order to add a foreign key, for example. Other databases can do that as CLI can. So there's lots of different honest there, I'm pretty too sure that the last minor release of SQLite didn't come out too long ago. They finally do have that alter table sort of baked in. Although then, you, of course, you have the you have the side effects that it depends on your native build of SQLite on your machine, which may or may not have had. So generally speaking, historically, you've been absolutely right. It's been a nightmare and a half to to do that. Lots of fun. So I just wanted to make sure that, people in chat, you know, it's it's not about just adding a thing. It's it's a little more involved than then. And, you know, including assets and, like, the other sent. Right? Maybe there's proprietary information or whatever, and you're not allowed to leave it on your hard drive. Maybe you want to zip it, encrypt it, compress it. There's lots and lots and lots of different steps that we have to, kick off there. So alright. Oh, we got some in the chat interaction. Cool. Cool. Cool. So alright. Top to bottom. First question. What speaks against using JSON or YAML files? Instead of JavaScript, this way the changes in the webpack could also be synced back to the files easily. So for what it's worth, the the, formats that we're talking about being generated from directives would most likely be, you know, in some sort of structured format. Not quite sure if that's JSON or YAML yet or if we have to find some sort of optimized, file format to do that. Because the the the risk of JSON and YAML exports, once you start including data, you know, we no longer know how much data you wanna include. Like if we're treating this as you could use this for backup restore we could talk about a large large amount of data right at which point we need to have a very optimized structural format that may or may not be usable in that point. Right? Connor, remind me, we found like an Apache file format that could be interesting for this. I think what was it called? Parquet or something. Right? It was called Parquet. Parquet. Yeah. That would be an interesting file format for something like that. Or potentially using a SQLite database as as the exported file. Like, that's a completely different, direction. But But you get the idea. We need to have some sort of optimized compressed file format because the export file could get really large. Now, it might be an option for the way, you know, you save one of those to just save it in a sort of raw mode. Right? Where it doesn't save it compressed, at which point it's just it could be human readable YAML or or JSON, including, you know, the ability to properly, source control it. On the migration note, I think you answered that before, Daniel, like, exactly right. If you're doing auto generated migrations, it's really only for the database schema part. Like, we can't really know on your behalf if you consider, insights dashboards part of configuration or flows or something. Right? So it's it's gonna be it's gonna be tricky because people different people have different export requirements. And if you go from dev to prod, all bets are off. Right? You you never quite know what the the idea is. Creating internal libraries fiber to schema works with native access to directives rather than the API using integrations and help creating a lot of complex repetitive action. I can imagine. Yeah. Because you can write a little JavaScript for loop and just block. You have 10,000 collections. Right? But, you know, the lack of two way integration between the UI does cause issues which is the unfortunate side effect of using, you know, a programming language rather than a declarative language like yaml or JSON, for doing schema modifications like that. You're gonna lose that two way integration. That being said, you know, if Directus has a don't allow me to change the schema environment variable flag, whatever, you could make that, you could do that on purpose. Right? For a production instance, for example, I can totally imagine that you disable any sort of schema modifications just for security reasons, and and availability reasons and only allow those changes to happen through whatever system we're we're cooking up here. Right? I think default value filtering could help make the YAML auth more manageable. Fully agree. You know, we should only store the stuff that we need to know, and storing default values feels like a waste of waste of space. Then Azure is working on some sort of YAML based metadata authoring, announcing Azure data delivery net auth sales. What marketing email? Very curious. Haven't I haven't heard of that one before. If you wanna keep the GitOps thing, it should really be a text format. Good point from Tim. Which may or may not be answered from by Dominic here. If you split up between schema and content in different formats. Right? Maybe the configuration piece is all human readable file formats. But if you have a data export that is maybe there's a file size threshold. Right? If you have a very large CSV export maybe there's just a smart point where it's like, oh, you're trying to save 10,000 rows We're gonna flip it automatically into a compressed, non readable format so you get the best of both worlds. Right? Potentially. Cool. Alright. Cool. We tried. I it's something similar. Yeah. Just to, you know, chime in to regards to what you just said. So with the, like, yeah, like, yeah, we have to split that up preferably, you know, or at least, you know, required to have this as text based so you can, you know, use it in version control, whatever. And, for, like, including items, for example, you know, a ZIP is is a nice thing that you could use, but then, of course, you know, maybe this includes then items that are not, like like an old version, for example, and, you want to insert something in in where a field doesn't exist anymore, and and lots and lots and lots of other, you know, stuff. And, you can then, of course, if we then have all of the different points that we want to persist, right, like flows, permissions, the general config as it is, you could include this then also in the export with the items, so you can do both at the same time or see if it differs and then cancel the thing. But, yeah, it's, it's a fun thing. You know? There's lots and lots of things that could go wrong. There's so many. No no matter what, we wanna make sure that the output file is a single distributable. Right? We do wanna make sure because we on the one hand, we're saying we have to split it up into multiple files in order to make it efficient and easy to work with, But, at the same time, we also wanna make sure that you have a singular thing, singular file that you can send over, to somebody else. Right? Either through the API, so you just have a single download or a single upload, or as a file, maybe packaged through, you know, the marketplace. Shout out. Wink wink. Notch notch. Or as, just to email it to somebody for a like air. Right? Put it in a GitHub issue as a zip. So there needs to be some sort of both. Right? But I could also imagine that, you know, the API, lets you download it as just a zip, right, that you can just double click to open if you're on macOS or do whatever else it takes on other platforms, to unzip it. Looking at you, Daniel, I'm sure there's a 2 step process. For those who, out out of the loop, he's, this is the year of Linux on the desktop, evangelist within the team. Yes. This year. Here it is. This is the year. Mark my words. And this year is the year of the Linux desktop. This year. Yes. But long story short, we it's it's we're we're in that weird in between that we need both. Right? We need both the single file and multiple files. So we'll most likely have to come out with some sort of zip, gzip, something like that in between. Cool. Prisma migrations are an interesting way of doing things. They have a custom format, which is more concise than directed YAML, and then some CLI tools that create actual SQL migrations in the syncs the environment. Yeah. Great example. Right. Good example. They have they basically do with that shadow shadow database, if I'm not mistaken, that's how they keep track of those migrations step by step by step. Yep. Right? And then with the the CLI tool, it can compare your custom migration format with what they already have tracked so far. And then, you know, apply apply the diff based on that. Still has the similar I think a similar one way issue, though. Right? Because the Prisma migrations that you write manually, I don't think they can sort of update those from the others from the other side, so to speak, that a two way binding. Good point, though. Good point. Alright. Could remember. I had a problem with in the past. So, sadly, I can't remember it right now, but it was very painful. So, it's not a 100%. Yeah. It's not a 100 perfect thing. Nice. I've seen people you know, there's there's lots of other, what is it called? Drizzle drizzle right now? Oh, there's a couple of RMS like that. Yeah. Yeah. So there's there's lots of sauces that we could, you know, yoink some code from, but be inspired. Let's say be inspired. Yeah. I don't think we've been yoinked. I don't think we've legitimately ever yoinked code before. Borrow. Borrow. See. Strategies is interesting there. But the the the main difference and then you you touched on it perfectly before. The main difference there is the just keeping track of the database schema versus having that ability to just sort of manual template in between moving stuff between dev and prod that is not just schema, but also data and how to deal with that, which at any point needs to be, something you can do manually. Right? It doesn't have to be manual all the time, but it needs to be something you can do manually Right? Cool. Well, with all that being said, looking at the clock here Connor, back to you. We've discussed quite a lot of the research up until this point, some of the requirements. You and I have also sort of been daydreaming about potential ways to implement this moving forwards. You wanna quickly touch on sort of the different, different phases and different sort of parts that we wanna touch as part of this bigger bigger effort? Yeah. So the first thing we wanna do is we wanna figure out how this is gonna look, the distributable, the diffing. What is that file structure? What's the file format? You know, what are the requirements for that? Do we have encryption? Do we have multiple files? Do we have one file? Do we have whatever? Like, we've already discussed here. First thing is defining what that'll look like in totality, covering all use cases so that as we progress through the the phases of this project and making it happen, we can keep that spec that we have for those files in mind, and we can make sure that we cover everything whenever we could work through the different phases. But the that is phase 0 to finding out that spec, figuring it out what it looks like. Phase 2 or phase 1 would be upgrading the schema service foundationally right now. So making sure we have all the bug fixes with the schema service taken care of, you know, adding the new different, you know, different strategies that to it that might be needed. You know? Do we have, you know, add in those exports, export filters, you know, only export this stuff, you know, that stuff. So getting that schema service, adding some more features to it. And then the next phase is working on data importing. And so adding in the different features that would be needed to make the data importing work, You know, dry running, imported data, importing strategies. You know, do we have a import strategy where right now everything just gets up sorted? Do we wanna have an import strategy where if you import the data, it drops all the current date data in the table and re imports everything fresh? You know, adding those different options. Some other just things, ideas that have been thrown in the mix is, you know, if you're moving between one instance to another instance and you don't wanna bring any of your IDs, your primary keys, your foreign keys, you know, adding some type of way to anonymize that as it goes from one instance to another. So the new instance makes new IDs for everything. Or adding, you know, the ability to for, like, templates, you know, if I wanna bring in a template for project management and have a due date in one of the items, but I made the template a year ago, then the due date's gonna be from a year ago. So, you know, being able to have dynamic data that gets brought in on import. And so, you know, oh, I want you to import this as a date, but make it for in 2 hours after the import is when it's set. And then the next phase after that, working on data importing is getting into putting it all together. So we've worked on the spec for the thing. We worked on the schema service. We worked on data importing. Now we need to bring it all together into this this new overhaul configuration, multimodal thing, and have it work on bring all those pieces together and make it work, basically. And then after that, once we've gotten the specs spec'd out, the schema service upgraded, the data importing upgraded, we brought it all together, and it now works foundationally. Now we need to figure out how do we use this to implement these different use cases? So for templating, you know, what do we build inside of the data studio admin app from a user interface and API perspective to make templating work? What do we do from the configuration as code to use the stuff that we've already built? So how do we implement the feature set that we want in the configuration as code use case. You know, are there any other use cases? You know, back up being and restoring an instance. Do we have the feature set that we wanna use for that? And then that should, in theory, wrap up the project after that, after we figure out and implement those use cases. So there's quite a few steps here. Quite a lot of different items and it's going to take a long time. But at the end, we should have something pretty cool. Ideally. Yeah. And I think I think Daniel's facial expressions told told the whole story. No. But that'll I mean, it'll make sense to me. It sounds it sounds like a large overwhelming amount of stuff as it usually does. But by breaking it up, you know, step by step like this, we actually have a pretty solid idea, you know, start to finish of of what is involved to getting that across the finish line. And, yeah, as per usual, you know, there's a lot of, what's the word? Direct this magic going on under the hood to to tie it all together. But I'm always just very excited and glad that we're able to sort of re envision this as we have one underlying core foundational engine that can power all of those different use cases. Right? Rather than trying to tack on a new sort of templating piece and a new sort of code first configuration piece and a new sort of other piece. Right? That just increases the tech that makes it hard to maintain. It makes it those different flavors incompatible. You know, somebody will ask, how do we do a template as code? And we're like, well, you can't because code is not for templating. You know, that kind of stuff. It really reminds me of how we built flows. Right? That we had the hook extension first. And if you configure a new flow, you're effectively just building together a hook extension, and it's all the exact same underlying, underlying logic and event based system, which is also why, you know, flows you can do it say the same stuff as you can in a hook. Right? Well, same events. I mean, you can, of course, in a hook, you can code it yourself. So I'm very glad that we're making this, you know, a foundational upgrade to the schema snapshotting engine rather than trying to make it yet a new thing cool cool cool well that all being said I see we're at time here. Let me just quickly briefly peek at the chat. Do we missed anything? Pascal mentioned dynamic collection of field names will be cool. EG importing third party templates so you can choose your own target names. Good point. I think a similar goes for the conflict resolution piece that we talked about. Right? If you're trying to import won't be complex. Very, very, very true. But rest assured, it will be complex, I'm sure. Cool. With all that being said, this will be, life on Drax. Io/tv/requestreview in about a week week's time. Also, last episode, you can find there too. Should be if you're watching this on DirectTV, it's probably somewhere over there, there, there, or down there. Is this the point where we say, like, and subscribe? No. We don't have that yet. It's exactly we thank you all for joining.","published",[149,159,168],{"people_id":150},{"id":151,"first_name":152,"last_name":153,"avatar":154,"bio":155,"links":156},"23ebcf2c-4374-4f5c-8198-f8ad497fd856","Rijk","van Zanten","7ef9652f-3835-432c-a43a-c5fe13001f31","CTO of Directus",[157],{"url":135,"service":158},"website",{"people_id":160},{"id":161,"first_name":162,"last_name":163,"avatar":164,"bio":165,"links":166},"0d906492-75f0-45d9-abf7-ab779bf1ed08","Jonathan","Wagner","5062e4df-a198-4b40-af47-42362d3c0551","Sales Engineering Manager at Directus",[167],{"url":138,"service":158},{"people_id":169},{"id":170,"first_name":171,"last_name":172,"avatar":173,"bio":174,"links":175},"07ec688d-251d-4efe-bc17-73848402d43b","Daniel","Biegler","8897b70f-c524-460a-8990-58cc5c3be886","Engineer at Directus",[176],{"url":141,"service":158},[],{"id":179,"number":180,"year":181,"episodes":182,"show":195},"6aa046f1-bd53-4510-9af0-c0f3daaf4415",1,"2024",[183,122,184,185,186,187,188,189,190,191,192,193,194],"daed2c08-703a-43d6-ac97-aacac61be4c0","0b5f4343-1494-455b-b41a-25811c151242","b2b01569-d8c6-49a7-adaa-429fe84f204f","b63afbe1-6418-4e9e-b1da-4890979789f0","69ad81e8-5e1d-4b85-9fa9-3b767a3a3478","5c9c888c-f527-4608-a2f7-56f156d00980","243daa59-3772-4ebe-b212-c2a09a4a0b71","d66c1e46-cc57-49fe-a914-2e440bbc1576","12c8f72d-22fa-4ffa-a9d1-57047216fd1a","8896c934-aa2c-43b6-9342-8275682ab8b2","84c7b3ac-fd85-4539-8f39-3247118bcbf2","044b7c89-aaec-43b2-9d6d-6743a0fb5afd",{"title":196,"tile":197},"Request Review","73687d01-3734-4c28-aef7-e6fa8db4cf1e",{"id":184,"slug":199,"season":179,"vimeo_id":200,"description":201,"tile":202,"length":203,"resources":204,"people":207,"episode_number":211,"published":212,"title":213,"video_transcript_html":214,"video_transcript_text":215,"content":8,"seo":8,"status":147,"episode_people":216,"recommendations":220},"flows-log","912263898","In this recording of our live event on February 8 2024, Rijk, Jonathan, and Daniel discuss improvements to our no-code automation builder - Flows.","f89ae85c-4df4-4b9e-86db-8ca73f5980ba",55,[205],{"name":130,"url":206},"https://github.com/directus/directus/discussions/15870",[208,209,210],{"name":134,"url":135},{"name":137,"url":138},{"name":140,"url":141},3,"2024-02-15","Improvements to Flows Debugging","\u003Cp>Speaker 0: Alright. Yeah. So, Jonathan, you already, briefly introduced that seeing all the hiccups, I don't know if we're gonna cut that. But just to restart it, we're gonna be talking about flows today. We're really gonna be starting with this 15 8 70 about improving the activity panel.\u003C/p>\u003Cp>But, realistically, as far as I'm concerned, it's sort of a smaller piece of a bigger discussion around upgrades to flows. Right? So for those out of the loop, we introduced that flow system as a way to do event based, actions and operations, kinda like you could with hooks, or custom endpoints, but doing it in a no code type of way. Right? So you configure when you want your flow to trigger and then step by step configure individual operations, individual nodes of sort of this this path, that you wanna execute.\u003C/p>\u003Cp>The original version of that was really much designed as a, start with the basics, see what people wanna use it for, see where what what the best improvements are gonna be. So we shipped it a little bit lightweight, you know, with a handful of operations and have been more and more over time. But it has been becoming increasingly obvious that there's, a lot of improvements that would be really nice to have, especially when it comes to debugging, sort of helping configuring some of these pieces. So with that being said\u003C/p>\u003Cp>Speaker 1: Yes.\u003C/p>\u003Cp>Speaker 0: Discussion. So for Daniel, who's flying blind here, we're looking at the one about the logs first. Right? So if you're currently running a flow, optionally if you have it enabled, it's enabled by default. It will keep track of the data that sort of went through the various operations, and then saves it to, a logs tab that you can see on the right hand side of the flow.\u003C/p>\u003Cp>This is basically the primary way at the moment where you can debug what is happening under the hood, because otherwise, you know, there it's it's the way where you can see what data came actually through the trigger and how you've modified the data points in between. So over the Jonathan, you're you're showing it out what that looks like now on your own instance. What are some of the points in this initial discussion that triggered this this conversation? You've got\u003C/p>\u003Cp>Speaker 2: a lot of the ease and access to that information, as well as the ability to control, like, durations. Some of the problems we run into with flows, especially if someone once they get them into production, if they still have the activity and logging turned on, you can fill up your activity revisions tables really quickly. And there's you know, the mechanisms for cleaning that up are, you know, another flow, or direct database access or other kinds of things where you've gotta manage your activity and revision logs and so forth. Common recommendation that I make is once you've done your testing and you've got your flows functioning is to disable the activity and logging. But there are cases where you may want that and logging just for audit trackability on actions and things that people are doing with flows.\u003C/p>\u003Cp>So, I think some of that is just the general trigger and management of that. Other things that, tend to cause struggle for people in flows is the way that you access this instead of being able to say, look at the data from the log directly in something that I'm working on. There's been a again, there's a number of ways that we can kind of shake and look at this. But the general thing is this kinda lives over here. I can't edit this while I'm looking at this, so the the interactions with logs and the work that you're doing is kind of a it's a bit of a hindrance when you're doing the development phase of flow development.\u003C/p>\u003Cp>So it's just a more a nuisance than anything else. But the the inability to say edit something here, I have to actually come here to check. So if I'm looking for variables or data or what are the things looking like in here, what's my payload? Now I've gotta remember this data structure or copy this data somewhere else because I wanna actually do something with it. I wanna interact with it.\u003C/p>\u003Cp>I wanna use those variables. So it's kind of a it's a weird transition kind of back and forth. Some of the systems that we we got in some inbound activity on some other tickets and feedback that we got from some clients on the enterprise side, was you know, they showed showed us some other systems that have similar no code kinds of flow capabilities. And logs, instead of living in, like, a side panel in a weird way, kind of live with the operations themselves. So there's just some general ideas and thoughts around how that interaction in the UX kind of works.\u003C/p>\u003Cp>Data wise, I don't think anybody's complaining about the information that shows up in here. That tends to be you know, it's fairly easy to work with and deal with. I think it's more the kind of user interactions with data.\u003C/p>\u003Cp>Speaker 1: Yeah. For me, personally, I've ran into this when I tried to set up a flow which triggers another flow, which then leads you to, you know, switching between those 2. And then you have to check that again, but then you forgot on the first flow, forgot something, and you have to switch back. And, you know, so people do run into this, and, I've experienced this myself. So, generally, if I if I experience this, the general user will probably also experience this.\u003C/p>\u003Cp>So this is a very valid point, in my opinion, and, we should we should improve this a little bit. You're muted, Jonathan. Sorry. You're muted.\u003C/p>\u003Cp>Speaker 0: You look very passionate, though. I'll give you that. But\u003C/p>\u003Cp>Speaker 2: Sorry. I'm not gonna mute. So so what that does is, you know and that's why we saw this very same thing as we started doing some internal research on just this ticket. We rapidly diverged into we've got 4 or 5 other kinds of spec, and that's why we it's kinda leading into this call today. We were talking about the fact that there's some general overall flows improvements that I think we'd like to see across the boards.\u003C/p>\u003Cp>But the logging and activity, you know, that's that kinda triggered this action was really about that the way that those logs interact and how you have to it's kind of a separation of what is a common function.\u003C/p>\u003Cp>Speaker 1: Yeah. Definitely. Totally agree. And, like the other comment on the chat said, switching is painful. Yes.\u003C/p>\u003Cp>An auto refresh for logs would go a very long way because I I do think that these two things are very, service the same niche, service the same pain. Because you you want to have up to date logs and see what is different, what do I have to work with, what can I work with, and not being able to have that on auto refresh, for example? Is the same thing as context switching back and forth, and you want to refresh and whatever. So I think these two pieces are very similar and and and try to work on the same pain, basically. So, good point from the chat.\u003C/p>\u003Cp>Thank you very much.\u003C/p>\u003Cp>Speaker 0: So there's a lot of lot of, I saw the word divergent on the screen. Just made me think there's a lot of divergent ideas happening at the same time. Right? We're talking about a lot of smaller optimizations like the auto refresh or some sort of way to make that context switching a little bit less painful. If we take a deeper look at this discussion, though, that sort of triggered everything, are there any particular pain points about the way we describe or show those logs that is currently sort of, something that should be improved?\u003C/p>\u003Cp>Speaker 2: I'm kinda skimming through this guy.\u003C/p>\u003Cp>Speaker 1: See some people typing in the chat. Please let us know if you have experience in using flows and would like to, contribute. We're here for you.\u003C/p>\u003Cp>Speaker 2: I think on the Trigger log\u003C/p>\u003Cp>Speaker 0: is the hardest to understand.\u003C/p>\u003Cp>Speaker 2: I'm still on. So on that log side, the other key thing that's being pointed out here is there are differences in the inbound payloads based on whether you're doing a create, update, or delete operation. You do have some variation in the trigger payload bodies. And, again, I don't know that it's a bad thing, but it is one of the things that's called out as a there's differences in the pathing, say, to the collection or the item, based on what type of event triggered the action. Yep.\u003C/p>\u003Cp>Speaker 0: Yeah. Very good one. That that makes a lot of sense, and it's it's it's explainable from a technical perspective, but also makes sense where it's where where the pain point's coming from. So earlier on, when we just did the hooks initially, the decision was made to differentiate between create a single thing and create multiple things, which in turn means that sometimes you get a single ID as a sort of, string or a number, and sometimes you get an array of ID strings or a number, which is basically just a bigger discussion around what does that hook payload look like. Right?\u003C/p>\u003Cp>Because everything is based on the same internal, event system. So, you know, a flow is triggered based on the same hook that you get from, a hook extension, for example. It's all the same thing. Then, the difference between, you know, trigger dot keys, trigger dot body dot keys, etcetera, sometimes it's payload, sometimes it's not, That really depends on the type of trigger, which, again, you know, doesn't necessarily make it make it better, but it is, you know, explainable where it's coming from, where. If you're having an endpoint, you know, with a a trigger request, now you're dealing with a sort of user payload that was submitted that could be anything.\u003C/p>\u003Cp>Right? If you're dealing with a hook, it's a pre pre known format for what our hooks fire. But, you know, it's there's a difference. There's gonna be a difference, and that is that's definitely tricky. Somebody said, if hooks supported loops, we probably wouldn't need the differentiation for 1 versus multiple.\u003C/p>\u003Cp>Hook supported loops. I'm not entirely sure what you mean by that because a hook is just a bit of JavaScript, so you could loop over whatever you want. Right? Oh, flows. Oh.\u003C/p>\u003Cp>Oh, I see. Gotcha. Gotcha. Gotcha. Gotcha.\u003C/p>\u003Cp>Yeah. So we're basically saying, you know, if you have a way to just say do this flow against every item in the triggered whatever trigger keys, then we can basically just drop the one hook and make everything an array all the time, sort of get rid of some of that confusion, which for the record Yeah. Makes a ton of sense to me. I mean, every every insert into the database could be 1 or multiple things. And if you do one thing, it's just an area of one thing.\u003C/p>\u003Cp>Right? It's basically it's it's easy to explain. So I I do agree with that sort of general sentiment. Even though for the record, that would be a quite a big breaking change and totally wreck everybody's existing flows and extensions. So TBD TBD.\u003C/p>\u003Cp>As mentioned, the extension shed will be very possible to create a repeat operation for all elements in an array extension with exposing a single function from the flows manager. Yeah. Yeah. No. Absolutely.\u003C/p>\u003Cp>Yeah. That's very true. Very true. Cool. There is one more, thing in the chat here from our very own Brian saying remembering the key names for operations is a pretty big headache for me.\u003C/p>\u003Cp>Why are the keys only showing on hover? Where can I not copy them? Which has been a thing all over the app. And just trying to remember what that key name is. That's a very good point.\u003C/p>\u003Cp>And I think, you know, overall in the app, we've sort of on the side of making things sort of, what's the right word? User friendly is such an empty thing, but, like, look pretty for nontechnical users. Right? So a lot of stuff gets, what we call title formatted. Family friendly.\u003C/p>\u003Cp>Yeah. Exactly. We we we title format a lot of that stuff, so it looks prettier in the UI. But totally. Yeah.\u003C/p>\u003Cp>For things like flow logs, you know, which is inherently a very technical thing, flows is only available for admin users, which are generally speaking, a little bit more of the on the technical side. Those should most likely just be the technical keys. Right? Render them in monospace and just lean into it. Because why why show them as a title formatted version and then have it only show on hover that you can then not copy paste because it's a tooltip.\u003C/p>\u003Cp>Right? It that that makes a ton of sense, and that is just a perfect sort of tiny little tiny little tweak that's a huge quality of life improvement. Jonathan, I hope that was in the notes somewhere now.\u003C/p>\u003Cp>Speaker 2: Yep. I'm taking some notes in in a separate section, over in the the actual internal notion doc that we've got running on this guy. So I'm trying to capture pretty much it.\u003C/p>\u003Cp>Speaker 0: Another good point here too is, like, why even have a pretty name if you just have the key? You know? Good point. Good point. I think having some sort of description is a nice, you know, nice to have where at least you can write, like, a mini description, but you wouldn't really use the name for that anyways necessarily.\u003C/p>\u003Cp>Cool. Okay. Just looking at the discussion, though, because I see we're sort of, what, 1 eighths of the way on the scroll on the page. So I'm kinda curious to see if there's any other points in this particular discussion that we haven't really touched on. That's easy to forget.\u003C/p>\u003Cp>So these ones we just looked at.\u003C/p>\u003Cp>Speaker 1: I think, the suggestion by our own\u003C/p>\u003Cp>Speaker 2: Oh, the the JSON object wrapping. Object wrapping was the other big thing. Again, having this in the side panel, data here, you can start to see if you get long things. It it actually goes off screen and ends up, you know, and you I don't even think it enables a scroll. I'm not sure you can even get to long data that's being displayed in the log there.\u003C/p>\u003Cp>So some form of wrap, at least, if we're gonna continue in this panel. I think, ultimately, I think we'd like to maybe move this out of the panel anyway. It'll end up somewhere else in the UX. But something for us to keep in mind is that JSON wrapping, and we have that captured as well in a I\u003C/p>\u003Cp>Speaker 0: was just gonna say that it's there it it's I can already also see there's no way to search or filter through logs. Right? That is another, you know, in. The only thing we show about the log is just a time stamp and then a rel as a time stamp. But I'd I'd say an obvious next step is to sort of move that out of the sidebar into just a proper, you know, layout like we do with other things all over the place.\u003C/p>\u003Cp>So you get the searching, you get the filtering, and we can have a proper detailed view, or draw our although a detail page is probably gonna be nicer for that. Save a little bit more space, for things like that overflowing scroll and just presenting it nicer. Although, then, you know, we create a new problem, which is now the logs live outside of the flow where you configure them. So at that point, you know, we also wanna make sure that we have some sort of way where you can render that maybe as a split view, maybe you can sort of 50 50 between the flow that you're creating and then seeing the logs of that flow with this layout. Because I do believe that keeping them in context or at least having some sort of link back and forth, from, you know, where you're configuring the the law, the flow and configuring the log is gonna be important.\u003C/p>\u003Cp>Speaker 1: From\u003C/p>\u003Cp>Speaker 0: the chat, having logs and operations might be helpful for that. Yes. Yes. Yes and no. It's it's I'm I'm, to me, the logs on operations is an addition, not a replacement.\u003C/p>\u003Cp>I think personally it's very valuable that you can see, you know, the full, execution path in a log. You can see, you know, at 2 pm today we started with this, we started with it, that one failed, and then we did this, and then here's the data that went through it and what we concluded with. I do think it's important to have that sort of consolidated together, but the the logs and operations, I do agree, would have really be a really nice addition to that where you can just look like, okay. In the last couple of runs, here's the day that it came in and went out of this particular box in your flow. Right?\u003C/p>\u003Cp>I don't know if that's gonna be a deep link or maybe just an info.\u003C/p>\u003Cp>Speaker 2: Almost feels like you could have, like, a split panel down here that had the normal, like, search layout capabilities of searching through the logs, could be an interesting use case for this where you'd Mhmm. Similar to Visual Studio and other kinds of console logs where when you're doing this kind of development, you've got logging information you can see down here. Because I also think some of the other nice things with flows would be able to have, like, step through. Right? Being able to step through and see data at certain points, what's going on as you're debugging and working on the flow itself.\u003C/p>\u003Cp>But then as you say, having an actual, you know, log panel or maybe there's a, like, in a sub ten under flows where you can go to the logs and just see logs full screen, be able to search. Because once my flow is operating, I think the the DERF, pointed out that they're they like logs turned on so when users report issues hours later, they can go back and search the logs. And in that case, I'm not looking at the flow. I'm looking at what's the data, what's the flow, what happened, what errors, what things do I see. So\u003C/p>\u003Cp>Speaker 1: cool.\u003C/p>\u003Cp>Speaker 0: Because the nice thing by putting the again, this for for those new to the stream, this is very much a session of divergently thinking. Right? We're just gonna go brainstorm a bunch, like, what will be the ideal state and then take it back towards the end to, like, okay. What is an actual realistic next step? So I can also imagine, you know, closing your eyes thinking about it, if we have a proper detail page, like a full full screen view for the logs for one particular run.\u003C/p>\u003Cp>Right? We could actually render sort of a smaller, graphical representation of your flow as you have configured it, in in here, and then just show you. You can sort of replay it, so to speak. So we can just show you step 1, and then that's the data that came in and out based on the logs that you're looking at. Right?\u003C/p>\u003Cp>So instead of just having a long list top to bottom, you could effectively just get a visual replay, of that particular flow operation. I think that could be an interesting one too. Because then you really get to visualize the the execution of that flow because you're effectively trying to follow a path through your flow with the log. But right now, the log is always gonna be a a sort of,\u003C/p>\u003Cp>Speaker 2: what's the\u003C/p>\u003Cp>Speaker 0: right word, a one dimensional list, top to bottom. Right? Chronological. But, you're thinking about your flow, potentially, in more of a graphical tree. Right?\u003C/p>\u003Cp>Where it's like you have option b, and then you branch off, etcetera, which is not really, represented in the logs in any sort of way right now. From the chat, somebody you could almost use the exact same UI as the flow editing page for that. Yeah. No. Yeah.\u003C/p>\u003Cp>Yeah. Exactly. Maybe a little bit smaller so you have the whole thing, you know, in view at the same time, and we just can highlight whichever box we're currently showing you. Yeah. I think that could be a very interesting visual representation.\u003C/p>\u003Cp>It becomes very interactive at that point. Right? Where it's almost like, you know, the logs will just be represented with some sort of timeline bar where it's like, okay. You read operation 1 out of 16 steps, and then you can just go to the next one. Then the visual representation up top just sort of, like, highlights which box we're on.\u003C/p>\u003Cp>Right? And then just go to the next one, go to the next one, go to the next one. I still think we would have to have some sort of way where it's just, like, show me the whole thing as a JSON blob. Don't make me go through this this pretty thing, but I'm pretty hopeful that that could help, really help debug what's going on here. Speaking of which, why is the flow edit page the default?\u003C/p>\u003Cp>The view page seems kinda pointless. Yeah. Agreed. Done. So easy it can be sometimes.\u003C/p>\u003Cp>I I think the the honest answer is that kinda came from flows being initially designed as using the exact same UI and UX and order of operations as dashboards, where that is, very different, obviously, where dashboard is definitely read read only by default, and then only if you wanna change something to your panels for the order of the dashboard or that kind of stuff that you really go edit it. But in this particular case, I agree because it's like, what is really the difference between read and editing it? The only difference for read is that you can't, you know, click a button, but there's nothing that's really, you know, different between the two states. Right? So just getting rid of that read only view, I mean, it it makes sense to me, honestly.\u003C/p>\u003Cp>Food for thought. It's not not something we're just gonna we'll have to double check if that all makes sense, but just you know?\u003C/p>\u003Cp>Speaker 1: Well, let's say let's play with that thought a little bit. So let's say we make a new page, detailed page, which features everything that we want. Would we like to then be more stringent on how it looks? So maybe take away the ability to point, to to place nodes wherever you want so we can, you know, for example, make it, smaller for the detail page because we probably don't have the same amount of space as of right now. Is that something that we would like to do?\u003C/p>\u003Cp>I don't know. Like, how important is that even?\u003C/p>\u003Cp>Speaker 0: I think for the flow detail page, for the if you're talking about the flow detail page, it's not important. It's it's really more of a visual aid. This is like if you're mentally, you know, visioning what your flow looked like when you created it, you can see it sort of, like, go through the go through the motion. So it's really more meant as a graphical element that is more for, it's kinda like a map. It's kinda what I see it like.\u003C/p>\u003Cp>Right? It's it's a nice to have, not really a requirement. What I was kinda thinking we could potentially get away with though is instead of rendering the full boxes with everything in it, which just be the name or just an empty box for that matter. What we could do, though, it's I don't know if you've ever ever played with this, but if you have a dashboard set up Jonathan, do you happen to have a dashboard in this this demo instance? Plenty.\u003C/p>\u003Cp>You gotta love it. Dashboards and sort of the underlying panels and the the viewport stuff. Yeah. Exactly. It has a button that just sort of zoom out the fit.\u003C/p>\u003Cp>And also a full screen button for those who don't. Pro tip for you. So you can full screen and make it fit on your your, fit on your screen. But we could use that exact same thing for doing just sort of a mini representation of, of your flow at the top of this flow's detail log page. Right?\u003C/p>\u003Cp>Because you don't need to see all the exact details. It's mostly just as a graphical map for going through your flow. This could be a very\u003C/p>\u003Cp>Speaker 2: interesting trick. Similar similar tools I've seen, you can actually zoom in or out. You can actually zoom your view so you can see more of your flow or all of your flow. And then when you double click or click on something, then you get back into the item view or the detailed view. So back in our flows example here.\u003C/p>\u003Cp>Right? You'd be able to, you know, have a zoom option or be able to scroll in or out and zoom your view similar to a map style the way we do with the geospatial kinds of things. So very similar to that would be fun.\u003C/p>\u003Cp>Speaker 1: I wish I could see the screen right now, but, I thought there's something\u003C/p>\u003Cp>Speaker 0: Oh, right. About exhibit. I forgot.\u003C/p>\u003Cp>Speaker 1: For example, yeah, if if we decide to, you know, like, restructure the visuals or want to provide a different type of view, for example, like, we don't have to have boxes. We could also have, like, circles, for example, which could be smaller, take out the space. Maybe they're horizontal or something. I'm not sure if you showed this, right now. I'm sorry for that.\u003C/p>\u003Cp>But\u003C/p>\u003Cp>Speaker 0: Oh, no. No. No. No. It's I just I just love the the very bit of a sidebar.\u003C/p>\u003Cp>I just absolutely love the the artistic way you brought that just now. Maybe instead of rectangles, they could be circles. Yes. But yeah. No.\u003C/p>\u003Cp>You're absolutely right. Because we the nice thing is we know the exact dimensions of each box. Right? So we could theoretically, for a graphical overview, render each box at a half height and offset all of the, x and y positions by, you know, the number of boxes high To sort of recreate the diagram in a more compact view.\u003C/p>\u003Cp>Speaker 1: For sure.\u003C/p>\u003Cp>Speaker 0: Yeah. Good point. So let's let's noodle on this idea for logs on the actual operation box. Right? So would that be a, sort of additional button, I guess?\u003C/p>\u003Cp>Because I'm also sort of operating under this. It's edit mode by default all the time. So I could imagine that maybe next to the edit button, there's just some sort of logs link.\u003C/p>\u003Cp>Speaker 2: And now\u003C/p>\u003Cp>Speaker 0: that I'm thinking about it even more.\u003C/p>\u003Cp>Speaker 2: Pre filter the logs to the particular one you're looking at, all that kind of fun stuff.\u003C/p>\u003Cp>Speaker 0: Yeah. Because now that I'm thinking about it, we save the logs as one row with a bunch of JSON in it for the whole flow execution. Right? That's the, that's the log item that we have. So there's no real way to just say, give me all of the logs for just the update data operation, because you're looking about all of the, you know, all of the, all of the data from the whole flow, and then we have to sort of filter it back down.\u003C/p>\u003Cp>But, you know, we could consider, although the amount of data is gonna get out of control real quick, but we could consider saving, you know, a log row in the database for each operation step instead of for the whole flow. But we gotta make sure that we have some sort of automated retention setting and and a bunch of indexes, indices for that, to not completely wreck your database. Because if we're now creating 5 new records per flow instead of 1, then you're talking about millions and millions of data points, which, you know, depending on your database might not matter.\u003C/p>\u003Cp>Speaker 1: This has been a point in the discussion. The person that started then was already interested in some type of way to for example, similar to the activities, issue that we had, recently where, you know, big or large instances have trouble with, I don't know, I don't know, 2,000,000 activity entries and some type of schedule that clears logs or, for example, you only want to retain those who are at most, I don't know, 30 days old, for example. Some type of rule maybe which could, lighten the load a little bit would be helpful, I think.\u003C/p>\u003Cp>Speaker 0: Oh, a 100%. Yeah. Yeah. The 100%. The the main reason we haven't had that before, I haven't done that yet, is because we didn't have support for this use yet.\u003C/p>\u003Cp>Right? And that's really a requirement. Because if you wanna delete with a filter on a timestamp and you don't have an index on that timestamp, now it might take your database, you know, minutes to just go through a table of that size. So that's that's also where that sort of sprint came through to, like, support indices and now collaborating with the the contributor there to to get that across the finish line. Because then once we have proper support for indices because because I do wanna make sure that, you know, whatever we do for the system stuff needs to be available to the end user.\u003C/p>\u003Cp>Right? So once we have support for indices, we can then enable indices for the system tables for, like, the timestamp in activity and and same for flow logs and stuff. And then the second piece of that is gonna be a setting that says, what do you want your, you know, low log retention to be? And it should be an environment variable, I guess, or maybe it's a per flow option. T b t b d on that.\u003C/p>\u003Cp>Right? Maybe there's an environment variable for the the maximum number you can set, and then in the app, there's a drop down, whatever. We'll have to figure it out. But, yeah, to your point, if you can configure, I only wanna keep 7 days worth of flow logs for this flow. I feel like that's a must have for any sort of upgrade that we're gonna be doing the flows, because it's so easy to just now accidentally end up with a couple of 100.\u003C/p>\u003Cp>Actually, that reminds me, I think, Kevin the other day did a fantastic little flow that he sent over where he was using it for, for beddlesnakes. But I also saw his flow logs in the sidebar already be in the 100, and I was like, how how long have you been running this right now? Right? You're gonna have 1,000 tens of 1,000 of those, so we need to have some sort of retention for that, which I could imagine, you know, if you have a flow that you know you're running a lot and you want it to be lowers, think it has to be per flow.\u003C/p>\u003Cp>Speaker 1: Food for thought. Food for thought for sure.\u003C/p>\u003Cp>Speaker 0: So then when we're thinking about, you know, one other thing I know about the debugging of, flow specifically is that right now to test anything, you have to trigger the actual thing that triggers this. Right? So, as of right now, if, Jonathan wanted to test this ready for review flow in any way, he would have to save this, quit, go to his articles collection, and then manually trigger it. Right? Oh, it's\u003C/p>\u003Cp>Speaker 2: so painful. It is so painful to debug these things, because you have to do just that. Right? I have to go I have to go save, then I have to go to our to my systems over here. I have to go to articles.\u003C/p>\u003Cp>I mean, at this point, normally, I've got an you're already kind of nabbed here, but now I've gotta open an article, and then I've gotta go through the actions of whatever I'm testing. So ready for review case. I have to push the button, then we go back, and then we're gonna go back to flows. We go back to ready for review. Now I have to go to logs, and now I can look at logs and see what's going on, test flow or not.\u003C/p>\u003Cp>So you're in here, you're traveling through, and you're looking at your payloads, and at your options wherever you are in the debug process, then you're closing out. Now you're going back to edit. So many mouse\u003C/p>\u003Cp>Speaker 0: It's a pain is what I'm hearing. It's a pain in the butt. Flow is\u003C/p>\u003Cp>Speaker 2: awesome, but it is it is painful to do the administration.\u003C/p>\u003Cp>Speaker 0: That's why we're here.\u003C/p>\u003Cp>Speaker 2: That's why we're here.\u003C/p>\u003Cp>Speaker 1: Yeah. So one thing I think just wanna add another point from from him. Yeah. Sorry.\u003C/p>\u003Cp>Speaker 0: Go. Go. Go. Go. No.\u003C/p>\u003Cp>Please. Please. Yeah. Yeah.\u003C/p>\u003Cp>Speaker 1: Because, this this then touches the issue of alright, so let's you you want to create a flow. You want to try it out. You have to try it out in order to actually make it work. But then comes the thing, alright, so let's say I have an existing flow, and it's running. It should continue on running, but I want to, I want to edit it without changing the actual running flow, which would technically be something like, alright.\u003C/p>\u003Cp>Let's duplicate this flow so I can play around with it. And then, after I'm done with my modification, then I switch them or, basically, disable the previous one and enable the next one. So some type of let's let's call it, like, very, very lightly versioning. Yes. Oh, the chat.\u003C/p>\u003Cp>The chat is exactly it.\u003C/p>\u003Cp>Speaker 0: Once they\u003C/p>\u003Cp>Speaker 1: have the\u003C/p>\u003Cp>Speaker 0: whole tag.\u003C/p>\u003Cp>Speaker 1: Right. So so if, yeah, if you have a couple of, like, important very important flows running in production systems, you don't want to, you know, edit them willy nilly. So that's the that's the another thing, which is can be can be painful because as of right now, there's technically no duplicate button, which is, not as nice as it could be.\u003C/p>\u003Cp>Speaker 0: And, it's it's a difficult one, isn't it? Because I'm also thinking about some of the triggers. Would that make sense? Because if you make a new version of a an endpoint, right, and an endpoint has a UUID to run it. But then if you create a duplicate, now the endpoint is different.\u003C/p>\u003Cp>So then if you wanna make that your production one, now you have a different endpoint. Right? So it's like the the duplicate and then kill the old one flow pun intended of updating flows might not work for every trigger. Also, if you create a new version of a flow that uses, like, an event hook for, like, an item save on articles or something, like, in this example. Yeah.\u003C/p>\u003Cp>It would have to be some sort of dedicated versioning because if you create a duplicate, then now you're firing both of your flows. Right?\u003C/p>\u003Cp>Speaker 1: You know, a very, like, naive solution to that event problem would be to have the duplicated flow immediately be set to disabled. So if you duplicate duplicate something, the duplicate one is disabled, so it doesn't fire. And then you can play around with it, change something, and then enable it yourself. Yeah.\u003C/p>\u003Cp>Speaker 0: Somebody in the chat that or somebody's plural in the chat, we're also saying, you know, you can obviously have a local dev instance or a dev copy where you could configure a new flow and then sort of import it at once so you don't, you know, mess anything up on production. Very true. Very true. Wanna make sure, you know, we keep we can Yeah. All the use ASM license.\u003C/p>\u003Cp>Speaker 1: This this then goes hand in hand with the next thing that I wanted to bring up with with the same thing. So let's say you have your dev instance, and then you want to sync your synchronize your flow to the production instance. Alright? There's no version control, basically. If you don't use, like, an actual extension, which you can put into your Git, version control, you would have to manually duplicate or use the API or something to to get the flow over or synchronize between your two instances.\u003C/p>\u003Cp>Mhmm.\u003C/p>\u003Cp>Speaker 0: Well, luckily, we talked about that particular problem 2 weeks ago in the last episode.\u003C/p>\u003Cp>Speaker 1: That's exactly right.\u003C/p>\u003Cp>Speaker 0: Like and subscribe. Exactly. The link right below the like button. Is that what they say?\u003C/p>\u003Cp>Speaker 1: Yeah. I'm gonna have the sidebar here.\u003C/p>\u003Cp>Speaker 0: Exactly. Circling back, though, because we we have, I think, 2 or 3 different, UX points with, like, a couple of different solutions. So one thing that I think is a must have for sort of this flows upgrade project that we're putting together here is that there needs to be a button on the trigger where you can manually trigger that trigger. The difficulty will be what data do we trigger it with, and how can we help you make that data realistic? Right?\u003C/p>\u003Cp>That is gonna be the difficulty there, which is kind of what we haven't done before. Because right now, with this manual trigger on an article, you know, the data format for that, like we talked about with the chat a little bit earlier, it's very specific to that particular article and that particular, data model for the article if it's like a filter hook or something. So if you have a manual play button, so to speak, we need to have some sort of way where you can sort of fake what that event looks like. We can pre pre generate a couple of ones based on your instance, I guess. Because, like, we know for the manual trigger that you get the primary key of an article.\u003C/p>\u003Cp>Right? That's what you would trigger with. So when you hit a play button, I imagine that we would have to show some sort of modal or a dialogue that allows you to put in a JSON object, I guess, with what that sort of, fake data looks like. And for some of the triggers, I think we can prepare that cause we can we can pre fill, you know, a fake article manual run cause we know the format, collection, keys, etcetera. For some of them, you don't need to do anything at all.\u003C/p>\u003Cp>Like a cron thing, you can just play because there's no real, you know, trigger data coming in, maybe a timestamp, but I'm not a 100% sure. For an endpoint, though, there there's no way for us to know what that looks like. Right? Because the payload is user edit, so we don't really know do with that.\u003C/p>\u003Cp>Speaker 2: So so some of the things I've seen with other tools, then this goes this even goes back 20 plus years ago using WebLogic's BEA workflow engines. You could actually take data from previous runs, so from the logs essentially. You could actually generate your payloads dynamically from that, or have say, I wanna capture this data, and I wanna actually reuse that as the test mechanism and essentially create your test data from an existing run. And then, ultimately, you could even edit at that point, you could edit the payload. So if you wanted to change some of the data attribution for subsequent runs, you could actually just quickly edit.\u003C/p>\u003Cp>So as part of the you know, that kind of play operation, you would actually get some dialogue capability and say, I wanna use either an existing or, in some way, create or save your payload and then fire that in. And in that way, you know, we don't have to be trying to generate it. We can just say, you can either run it normally, go and trigger the action. You know. Another way that I've done some of it is, like, have an incognito window with log you know, I I don't have to actually do what I just showed you, which is exit my flow.\u003C/p>\u003Cp>I can actually do the testing from another user. You can there there are some ways you can simplify or streamline that just a little bit. And you may be testing that permission set anyway. Then you may have permissions and restrictions, and you may be testing that as part of your application anyway. You'd have a test user with those permission.\u003C/p>\u003Cp>But being able to capture that information from an existing log or flow, run would be super cool and helpful. And that's some of what we've captured some of that already.\u003C/p>\u003Cp>Speaker 0: We have the logs. I mean, we have all of that data to your point.\u003C/p>\u003Cp>Speaker 2: Like,\u003C/p>\u003Cp>Speaker 0: if if there was a previous run, we'd know exactly, you know, what what information was there anyway. So we could definitely use that to to prefill it. Is this sort of manual trigger for well, manual trigger is a bad bad name because we already have a thing called manual trigger. But it's this sort of play button. Let's call it that.\u003C/p>\u003Cp>Is that something you'd wanna do for the whole flow, like, on the trigger level, or is that something you'd also wanna be able to do per operation itself? Because somebody before was like, you know, I used the run script operation a lot. I can also imagine that it would be really useful then to just just be able to say, okay. Just replay my run script a couple of times. But what I was just about to say showed up in the chat.\u003C/p>\u003Cp>Shout out to the Dev once more. Operations depend on a lot of context from what happened before. Right? Yep.\u003C/p>\u003Cp>Speaker 2: That would be more like that step through capability, right, where you you can step through and, you know, in debug mode, essentially pause. Right? The you know, you have your your break point. And at that point, you stop. You can actually analyze and look at the data.\u003C/p>\u003Cp>So, I mean, this does become very much ID style kind because that's what this really is. We're we're doing ID development. The idea is you would have these kinds of breakpoint capabilities to be able to stop, look at what data and parameters, be able to look back through the current context, what does it look like. Am I getting what I expect? Okay.\u003C/p>\u003Cp>Okay. Then step through debug. And it gets complex because we've got this mix of operations, and then you got run script that has, you know, code. Right? You know?\u003C/p>\u003Cp>Now do I have another debug capability with stop points and break points inside the job? You know?\u003C/p>\u003Cp>Speaker 0: Yeah.\u003C/p>\u003Cp>Speaker 2: Debugging that the TypeScript JavaScript in there directly. It gets very complex in that sense. But I think just the ability to be able to have a I wanna be able break point at the run script, be able to step to the next operation, then check the data and see I'll be able to multiple breakpoint options. We could get more creative down the line. I think if we can just put it at the operation level, that could it would be kinda cool if you could, you know, essentially step back, edit the data, or do something to the input, and then feed it back through, step back, step forward in in the operations themselves.\u003C/p>\u003Cp>Like, it's more complex\u003C/p>\u003Cp>Speaker 0: than And another thing with all of this, of course, is that, these operations may very well have side of on purpose because you might ping a different endpoint. You might save something's database, whatever. Right? So if you're manually playing, you know, with Flow or even individual operations for debugging purposes or whatever else, those side effects will still be in effect. Right?\u003C/p>\u003Cp>And if you're trying to debug a whole flow, you know, operation 2 might be dependent on a successful response from operation number 1. Like, number 1, save something to the database. Number 2, reads it, you know, now that it has been saved or something. I'm just trying to come up with an example. Because I was thinking, you know, it it be important to think through some sort of dry run as well where you can just like, okay.\u003C/p>\u003Cp>This would have now saved to the database. Right? And then, oh, this would have now made a request to the API, but we didn't actually do that. We didn't want to make a Stripe subscription as part of our little testing thing right now, right, to just test if everything works. So that'll be interest interesting.\u003C/p>\u003Cp>Some sort of dry running, thing behind it. I I do think it's important to think through at least because of the side effects. But to again, shout out to the derp. Can't really do a universal drive run because you never know what, you know, arbitrary stuff is being used.\u003C/p>\u003Cp>Speaker 2: Yeah. You'd almost have to have, like, a successful run or at least a set of data and payload options that you could edit up to say, you know, this is what I'm kind of expecting as I process through my input outputs.\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 2: So So you'd have to have input output capabilities. What do you expect? And then as\u003C/p>\u003Cp>Speaker 0: you say,\u003C/p>\u003Cp>Speaker 2: dry run mode, not save, but often, you know, maybe the change that I'm making, I then getting that and then doing something. So I'm sending it to that I'm sending a webhook out data payload that comes back, you know, depending on how complex you make a flow. Mhmm. My general recommendation is if flows get that complex, go write a\u003C/p>\u003Cp>Speaker 0: hook. But it's Well\u003C/p>\u003Cp>Speaker 1: Well it\u003C/p>\u003Cp>Speaker 0: it there is gonna be a point.\u003C/p>\u003Cp>Speaker 2: It's a balancing act. Right? There there are I I like flows for admin kinds of functions where state management user control. When I get into really complex conditional lock, do doing this in these boxes so much faster to write TypeScript, be done with a hook on the backside. That's my Yeah.\u003C/p>\u003Cp>Speaker 1: The the similar to yeah. What I wanted to say is this sounds, like, very similar to some type of bash script or something. You know, some some type of scripting, where as soon as you need conditions or something, it's probably better to make it a program. Because, who likes to write bash? Basically, you know, it it gets it gets gnarly really quickly.\u003C/p>\u003Cp>So as soon as you need sophisticated stuff, some type of conditions that branch into something and do something with arrays or something, it's probably better to actually use a programming language. And,\u003C/p>\u003Cp>Speaker 0: I, yeah, I don't know. It really depends on a lot of things because one one is like, do you even know TypeScript or JavaScript in the first place? I think that's an important one. I think by, you know, doing it manually as as code, you also have a lot more opportunities to break it bad. You know, make it insecure, just make it an optimized break stuff tremendously hard.\u003C/p>\u003Cp>And also in, the chat somebody pointed out, you know, with flows, you do get the the logs and sort of the ability to explore the steps and what went wrong. So it's it's an interesting take, Jonathan, because what we're saying is, like, as as of today, you know, when it gets complicated, you wanna switch to code because the flows, UX, and UI just doesn't isn't good enough yet for complicated stuff. Right? So Right.\u003C/p>\u003Cp>Speaker 2: Yeah. Yeah. It is.\u003C/p>\u003Cp>Speaker 0: But I I'm hopeful.\u003C/p>\u003Cp>Speaker 2: If if we make this kind of what we're talking about, if we get to that point in these next few iterations on maybe it does become I don't actually need hooks as often. Right? I could be you know, hooks could just be when I need external library.\u003C/p>\u003Cp>Speaker 0: Yeah. Exactly.\u003C/p>\u003Cp>Speaker 2: Allow within flow at the moment.\u003C/p>\u003Cp>Speaker 0: I'm just I'm very I'm very hopeful that by by some of these seemingly small but very big changes, like adding the manual trigger so you can at least just try it out, I think we can actually get this to a point where\u003C/p>\u003Cp>Speaker 2: HOOX\u003C/p>\u003Cp>Speaker 0: is gonna be the more complicated one to do, you know, because it's like, yes, you have a black box and do whatever, but you don't get all of the debugging niceties and and all of the comments. Not that it's a battle. I mean, at the end of the day, it's just pick whatever you want, what's worked best for you. But I think that we can reverse that take, honestly, that flows could be I think that might be my personal challenge. Make flows the default for Jonathan.\u003C/p>\u003Cp>That's my measure of success.\u003C/p>\u003Cp>Speaker 2: Writes the worst TypeScript he's ever seen.\u003C/p>\u003Cp>Speaker 0: If he says I prefer flows over hooks, it means we did a good job.\u003C/p>\u003Cp>Speaker 2: Perfect. I love it.\u003C/p>\u003Cp>Speaker 0: Project Jonathan is what we call it. Cool. Okay.\u003C/p>\u003Cp>Speaker 2: If you can keep me from writing code, it's always a good.\u003C/p>\u003Cp>Speaker 0: It's always a good idea. So there's there's definitely a couple of unresolved You know, as per usual, I with the eye on the clock. As per usual, we're gonna be wrapping up. We've been taking a lot of notes during all of this, and it's, again, very divergently thinking. We're gonna be compiling all of these notes and ideas from everything that we just talked about and everything on the chat.\u003C/p>\u003Cp>Thank you for that. Into, you know, a proper RFC document and figure out what does this project upgrade flows, x y z, you know. What does that look like? What do we see as the must have? What can we get done in a sort of initial sprint?\u003C/p>\u003Cp>There have been, I think, like, 4 or 5 different discussions that are all sort of asking for the same thing, which is, like, improve Flows debugging. But they're all various sort of smaller bits and pieces of a, to me, bigger Flows 2 point o type of upgrade. So we're gonna be trying to compile those and sort of merge them into one sort of flows project, if that makes sense. Just as a quick summary, I think just the the upgrades to the logs that we talked about, you know, making them, like, explorable. I guess that's the right the right word.\u003C/p>\u003Cp>Giving them its own page, giving it more space, making it easier to step through, you know, the execution of a flow. Definitely a big must have. And the ability to trigger a flow from where you're editing it with some sort of, you know, preloaded payload from a previous run or something you can manually adjust so you can actually try out what the hell it is that you're building without having to go all over the app. I think it's gonna be a very, very important thing to have. Last question from from the chat here before we wrap up.\u003C/p>\u003Cp>Someone had said that many of these ideas could be implemented by the community in a custom module? No. Yes. Community can build whatever they want as a custom module at the end of the day. That is very true.\u003C/p>\u003Cp>And with the, you know, upcoming release of, for marketplace project, I'm also very curious to see, you know, what people will do with this type of stuff. Right? Somebody could, definitely, make a flow flows log module and just make it super custom or super proprietary or super flexible, whatever they want Whatever they want, really. Cool. Okay.\u003C/p>\u003Cp>There was one more that snuck in with an upvote. One final thing. Can we have access to the flow manager from the flows service so we can do more things with flows from extensions? May may may. No problems.\u003C/p>\u003Cp>So so It depends. It depends. Now I'll I'll, it's oddly tucked away compared to the extension manager, which should also similarly be tucked away, actually. Anyways, no. The the the reason for the hesitation is more around updates that I kinda wanna do to those services in the first place.\u003C/p>\u003Cp>Right? So I know I wanna do some sweeping upgrades to what those services look like and how they work in the first place. So I'm hesitant to to add new stuff onto the services service that we know might change already. Right? So I I don't wanna introduce something new that's gonna change then fast after, if that makes sense.\u003C/p>\u003Cp>That being said, yes, I do wanna make sure that extensions do have access to all of those types of internal things, which is also a reason why we've been moving a lot more stuff into individual libraries. Right? So I don't know. For for those who have been watching the repo like a hawk, we just moved sort of the environment variable extraction and handling different library with the same sort of idea, right, where an extension can just import the Directus ENV library and then use the same sort of ENV handling that, Directus does itself. We're we're basically doing that across the board, and the services is one of those things that I wanna do like that.\u003C/p>\u003Cp>And I can definitely imagine there being eventually being, you know, a Directus flows library that contains, you know, this flows manager state, so that extensions just can import it just like the direct as API would with TypeScript typing. I I heard you. Community, I heard you. With proper TypeScript typing and everything as well. So that's that's the only reason why I'm a little hesitant to promise anything better, as of right now.\u003C/p>\u003Cp>Any cool. That marks 11 sharp on the clock on my end. I think this was a very interesting session so far. This is a little bit more divergently rambling, than doing an existing discussion, but I think we've touched on a couple of very, very important points. Any any other closing thoughts?\u003C/p>\u003Cp>Speaker 2: If I could remember the dad joke from Bryant this morning, I would tell it, but I suck at remembering jokes. So\u003C/p>\u003Cp>Speaker 0: Well, tune in next time for for\u003C/p>\u003Cp>Speaker 2: John's phone dad joke.\u003C/p>\u003Cp>Speaker 0: With all that being said, thank you to the audience. Thank you for watching. If you're watching this on Directus TV, Directus dot a 0/tv, Hi. Be sure to watch the previous versions of this video down below. And we'll see you guys in 2 weeks, I think.\u003C/p>\u003Cp>Or are we skipping that one? Because it's sleep week, I think. Maybe oh, TBD. There will be one.\u003C/p>\u003Cp>Speaker 2: I think our next one's in March, because the I think or no. No. It's actually the other way around. We have one more month. We're skipping the first one in March.\u003C/p>\u003Cp>That was the\u003C/p>\u003Cp>Speaker 0: See you guys in 2 weeks then.\u003C/p>\u003Cp>Speaker 2: I remember now. My poor old brain catching up.\u003C/p>\u003Cp>Speaker 0: It's all in the event tab. Exactly. Keep your eye on it.\u003C/p>\u003Cp>Speaker 2: It's all in the event tab.\u003C/p>\u003Cp>Speaker 0: Thanks, everybody.\u003C/p>\u003Cp>Speaker 2: Kevin's keeping us honest. Cheers, everyone. Bye.\u003C/p>\u003Cp>Speaker 0: Bye bye.\u003C/p>","Alright. Yeah. So, Jonathan, you already, briefly introduced that seeing all the hiccups, I don't know if we're gonna cut that. But just to restart it, we're gonna be talking about flows today. We're really gonna be starting with this 15 8 70 about improving the activity panel. But, realistically, as far as I'm concerned, it's sort of a smaller piece of a bigger discussion around upgrades to flows. Right? So for those out of the loop, we introduced that flow system as a way to do event based, actions and operations, kinda like you could with hooks, or custom endpoints, but doing it in a no code type of way. Right? So you configure when you want your flow to trigger and then step by step configure individual operations, individual nodes of sort of this this path, that you wanna execute. The original version of that was really much designed as a, start with the basics, see what people wanna use it for, see where what what the best improvements are gonna be. So we shipped it a little bit lightweight, you know, with a handful of operations and have been more and more over time. But it has been becoming increasingly obvious that there's, a lot of improvements that would be really nice to have, especially when it comes to debugging, sort of helping configuring some of these pieces. So with that being said Yes. Discussion. So for Daniel, who's flying blind here, we're looking at the one about the logs first. Right? So if you're currently running a flow, optionally if you have it enabled, it's enabled by default. It will keep track of the data that sort of went through the various operations, and then saves it to, a logs tab that you can see on the right hand side of the flow. This is basically the primary way at the moment where you can debug what is happening under the hood, because otherwise, you know, there it's it's the way where you can see what data came actually through the trigger and how you've modified the data points in between. So over the Jonathan, you're you're showing it out what that looks like now on your own instance. What are some of the points in this initial discussion that triggered this this conversation? You've got a lot of the ease and access to that information, as well as the ability to control, like, durations. Some of the problems we run into with flows, especially if someone once they get them into production, if they still have the activity and logging turned on, you can fill up your activity revisions tables really quickly. And there's you know, the mechanisms for cleaning that up are, you know, another flow, or direct database access or other kinds of things where you've gotta manage your activity and revision logs and so forth. Common recommendation that I make is once you've done your testing and you've got your flows functioning is to disable the activity and logging. But there are cases where you may want that and logging just for audit trackability on actions and things that people are doing with flows. So, I think some of that is just the general trigger and management of that. Other things that, tend to cause struggle for people in flows is the way that you access this instead of being able to say, look at the data from the log directly in something that I'm working on. There's been a again, there's a number of ways that we can kind of shake and look at this. But the general thing is this kinda lives over here. I can't edit this while I'm looking at this, so the the interactions with logs and the work that you're doing is kind of a it's a bit of a hindrance when you're doing the development phase of flow development. So it's just a more a nuisance than anything else. But the the inability to say edit something here, I have to actually come here to check. So if I'm looking for variables or data or what are the things looking like in here, what's my payload? Now I've gotta remember this data structure or copy this data somewhere else because I wanna actually do something with it. I wanna interact with it. I wanna use those variables. So it's kind of a it's a weird transition kind of back and forth. Some of the systems that we we got in some inbound activity on some other tickets and feedback that we got from some clients on the enterprise side, was you know, they showed showed us some other systems that have similar no code kinds of flow capabilities. And logs, instead of living in, like, a side panel in a weird way, kind of live with the operations themselves. So there's just some general ideas and thoughts around how that interaction in the UX kind of works. Data wise, I don't think anybody's complaining about the information that shows up in here. That tends to be you know, it's fairly easy to work with and deal with. I think it's more the kind of user interactions with data. Yeah. For me, personally, I've ran into this when I tried to set up a flow which triggers another flow, which then leads you to, you know, switching between those 2. And then you have to check that again, but then you forgot on the first flow, forgot something, and you have to switch back. And, you know, so people do run into this, and, I've experienced this myself. So, generally, if I if I experience this, the general user will probably also experience this. So this is a very valid point, in my opinion, and, we should we should improve this a little bit. You're muted, Jonathan. Sorry. You're muted. You look very passionate, though. I'll give you that. But Sorry. I'm not gonna mute. So so what that does is, you know and that's why we saw this very same thing as we started doing some internal research on just this ticket. We rapidly diverged into we've got 4 or 5 other kinds of spec, and that's why we it's kinda leading into this call today. We were talking about the fact that there's some general overall flows improvements that I think we'd like to see across the boards. But the logging and activity, you know, that's that kinda triggered this action was really about that the way that those logs interact and how you have to it's kind of a separation of what is a common function. Yeah. Definitely. Totally agree. And, like the other comment on the chat said, switching is painful. Yes. An auto refresh for logs would go a very long way because I I do think that these two things are very, service the same niche, service the same pain. Because you you want to have up to date logs and see what is different, what do I have to work with, what can I work with, and not being able to have that on auto refresh, for example? Is the same thing as context switching back and forth, and you want to refresh and whatever. So I think these two pieces are very similar and and and try to work on the same pain, basically. So, good point from the chat. Thank you very much. So there's a lot of lot of, I saw the word divergent on the screen. Just made me think there's a lot of divergent ideas happening at the same time. Right? We're talking about a lot of smaller optimizations like the auto refresh or some sort of way to make that context switching a little bit less painful. If we take a deeper look at this discussion, though, that sort of triggered everything, are there any particular pain points about the way we describe or show those logs that is currently sort of, something that should be improved? I'm kinda skimming through this guy. See some people typing in the chat. Please let us know if you have experience in using flows and would like to, contribute. We're here for you. I think on the Trigger log is the hardest to understand. I'm still on. So on that log side, the other key thing that's being pointed out here is there are differences in the inbound payloads based on whether you're doing a create, update, or delete operation. You do have some variation in the trigger payload bodies. And, again, I don't know that it's a bad thing, but it is one of the things that's called out as a there's differences in the pathing, say, to the collection or the item, based on what type of event triggered the action. Yep. Yeah. Very good one. That that makes a lot of sense, and it's it's it's explainable from a technical perspective, but also makes sense where it's where where the pain point's coming from. So earlier on, when we just did the hooks initially, the decision was made to differentiate between create a single thing and create multiple things, which in turn means that sometimes you get a single ID as a sort of, string or a number, and sometimes you get an array of ID strings or a number, which is basically just a bigger discussion around what does that hook payload look like. Right? Because everything is based on the same internal, event system. So, you know, a flow is triggered based on the same hook that you get from, a hook extension, for example. It's all the same thing. Then, the difference between, you know, trigger dot keys, trigger dot body dot keys, etcetera, sometimes it's payload, sometimes it's not, That really depends on the type of trigger, which, again, you know, doesn't necessarily make it make it better, but it is, you know, explainable where it's coming from, where. If you're having an endpoint, you know, with a a trigger request, now you're dealing with a sort of user payload that was submitted that could be anything. Right? If you're dealing with a hook, it's a pre pre known format for what our hooks fire. But, you know, it's there's a difference. There's gonna be a difference, and that is that's definitely tricky. Somebody said, if hooks supported loops, we probably wouldn't need the differentiation for 1 versus multiple. Hook supported loops. I'm not entirely sure what you mean by that because a hook is just a bit of JavaScript, so you could loop over whatever you want. Right? Oh, flows. Oh. Oh, I see. Gotcha. Gotcha. Gotcha. Gotcha. Yeah. So we're basically saying, you know, if you have a way to just say do this flow against every item in the triggered whatever trigger keys, then we can basically just drop the one hook and make everything an array all the time, sort of get rid of some of that confusion, which for the record Yeah. Makes a ton of sense to me. I mean, every every insert into the database could be 1 or multiple things. And if you do one thing, it's just an area of one thing. Right? It's basically it's it's easy to explain. So I I do agree with that sort of general sentiment. Even though for the record, that would be a quite a big breaking change and totally wreck everybody's existing flows and extensions. So TBD TBD. As mentioned, the extension shed will be very possible to create a repeat operation for all elements in an array extension with exposing a single function from the flows manager. Yeah. Yeah. No. Absolutely. Yeah. That's very true. Very true. Cool. There is one more, thing in the chat here from our very own Brian saying remembering the key names for operations is a pretty big headache for me. Why are the keys only showing on hover? Where can I not copy them? Which has been a thing all over the app. And just trying to remember what that key name is. That's a very good point. And I think, you know, overall in the app, we've sort of on the side of making things sort of, what's the right word? User friendly is such an empty thing, but, like, look pretty for nontechnical users. Right? So a lot of stuff gets, what we call title formatted. Family friendly. Yeah. Exactly. We we we title format a lot of that stuff, so it looks prettier in the UI. But totally. Yeah. For things like flow logs, you know, which is inherently a very technical thing, flows is only available for admin users, which are generally speaking, a little bit more of the on the technical side. Those should most likely just be the technical keys. Right? Render them in monospace and just lean into it. Because why why show them as a title formatted version and then have it only show on hover that you can then not copy paste because it's a tooltip. Right? It that that makes a ton of sense, and that is just a perfect sort of tiny little tiny little tweak that's a huge quality of life improvement. Jonathan, I hope that was in the notes somewhere now. Yep. I'm taking some notes in in a separate section, over in the the actual internal notion doc that we've got running on this guy. So I'm trying to capture pretty much it. Another good point here too is, like, why even have a pretty name if you just have the key? You know? Good point. Good point. I think having some sort of description is a nice, you know, nice to have where at least you can write, like, a mini description, but you wouldn't really use the name for that anyways necessarily. Cool. Okay. Just looking at the discussion, though, because I see we're sort of, what, 1 eighths of the way on the scroll on the page. So I'm kinda curious to see if there's any other points in this particular discussion that we haven't really touched on. That's easy to forget. So these ones we just looked at. I think, the suggestion by our own Oh, the the JSON object wrapping. Object wrapping was the other big thing. Again, having this in the side panel, data here, you can start to see if you get long things. It it actually goes off screen and ends up, you know, and you I don't even think it enables a scroll. I'm not sure you can even get to long data that's being displayed in the log there. So some form of wrap, at least, if we're gonna continue in this panel. I think, ultimately, I think we'd like to maybe move this out of the panel anyway. It'll end up somewhere else in the UX. But something for us to keep in mind is that JSON wrapping, and we have that captured as well in a I was just gonna say that it's there it it's I can already also see there's no way to search or filter through logs. Right? That is another, you know, in. The only thing we show about the log is just a time stamp and then a rel as a time stamp. But I'd I'd say an obvious next step is to sort of move that out of the sidebar into just a proper, you know, layout like we do with other things all over the place. So you get the searching, you get the filtering, and we can have a proper detailed view, or draw our although a detail page is probably gonna be nicer for that. Save a little bit more space, for things like that overflowing scroll and just presenting it nicer. Although, then, you know, we create a new problem, which is now the logs live outside of the flow where you configure them. So at that point, you know, we also wanna make sure that we have some sort of way where you can render that maybe as a split view, maybe you can sort of 50 50 between the flow that you're creating and then seeing the logs of that flow with this layout. Because I do believe that keeping them in context or at least having some sort of link back and forth, from, you know, where you're configuring the the law, the flow and configuring the log is gonna be important. From the chat, having logs and operations might be helpful for that. Yes. Yes. Yes and no. It's it's I'm I'm, to me, the logs on operations is an addition, not a replacement. I think personally it's very valuable that you can see, you know, the full, execution path in a log. You can see, you know, at 2 pm today we started with this, we started with it, that one failed, and then we did this, and then here's the data that went through it and what we concluded with. I do think it's important to have that sort of consolidated together, but the the logs and operations, I do agree, would have really be a really nice addition to that where you can just look like, okay. In the last couple of runs, here's the day that it came in and went out of this particular box in your flow. Right? I don't know if that's gonna be a deep link or maybe just an info. Almost feels like you could have, like, a split panel down here that had the normal, like, search layout capabilities of searching through the logs, could be an interesting use case for this where you'd Mhmm. Similar to Visual Studio and other kinds of console logs where when you're doing this kind of development, you've got logging information you can see down here. Because I also think some of the other nice things with flows would be able to have, like, step through. Right? Being able to step through and see data at certain points, what's going on as you're debugging and working on the flow itself. But then as you say, having an actual, you know, log panel or maybe there's a, like, in a sub ten under flows where you can go to the logs and just see logs full screen, be able to search. Because once my flow is operating, I think the the DERF, pointed out that they're they like logs turned on so when users report issues hours later, they can go back and search the logs. And in that case, I'm not looking at the flow. I'm looking at what's the data, what's the flow, what happened, what errors, what things do I see. So cool. Because the nice thing by putting the again, this for for those new to the stream, this is very much a session of divergently thinking. Right? We're just gonna go brainstorm a bunch, like, what will be the ideal state and then take it back towards the end to, like, okay. What is an actual realistic next step? So I can also imagine, you know, closing your eyes thinking about it, if we have a proper detail page, like a full full screen view for the logs for one particular run. Right? We could actually render sort of a smaller, graphical representation of your flow as you have configured it, in in here, and then just show you. You can sort of replay it, so to speak. So we can just show you step 1, and then that's the data that came in and out based on the logs that you're looking at. Right? So instead of just having a long list top to bottom, you could effectively just get a visual replay, of that particular flow operation. I think that could be an interesting one too. Because then you really get to visualize the the execution of that flow because you're effectively trying to follow a path through your flow with the log. But right now, the log is always gonna be a a sort of, what's the right word, a one dimensional list, top to bottom. Right? Chronological. But, you're thinking about your flow, potentially, in more of a graphical tree. Right? Where it's like you have option b, and then you branch off, etcetera, which is not really, represented in the logs in any sort of way right now. From the chat, somebody you could almost use the exact same UI as the flow editing page for that. Yeah. No. Yeah. Yeah. Exactly. Maybe a little bit smaller so you have the whole thing, you know, in view at the same time, and we just can highlight whichever box we're currently showing you. Yeah. I think that could be a very interesting visual representation. It becomes very interactive at that point. Right? Where it's almost like, you know, the logs will just be represented with some sort of timeline bar where it's like, okay. You read operation 1 out of 16 steps, and then you can just go to the next one. Then the visual representation up top just sort of, like, highlights which box we're on. Right? And then just go to the next one, go to the next one, go to the next one. I still think we would have to have some sort of way where it's just, like, show me the whole thing as a JSON blob. Don't make me go through this this pretty thing, but I'm pretty hopeful that that could help, really help debug what's going on here. Speaking of which, why is the flow edit page the default? The view page seems kinda pointless. Yeah. Agreed. Done. So easy it can be sometimes. I I think the the honest answer is that kinda came from flows being initially designed as using the exact same UI and UX and order of operations as dashboards, where that is, very different, obviously, where dashboard is definitely read read only by default, and then only if you wanna change something to your panels for the order of the dashboard or that kind of stuff that you really go edit it. But in this particular case, I agree because it's like, what is really the difference between read and editing it? The only difference for read is that you can't, you know, click a button, but there's nothing that's really, you know, different between the two states. Right? So just getting rid of that read only view, I mean, it it makes sense to me, honestly. Food for thought. It's not not something we're just gonna we'll have to double check if that all makes sense, but just you know? Well, let's say let's play with that thought a little bit. So let's say we make a new page, detailed page, which features everything that we want. Would we like to then be more stringent on how it looks? So maybe take away the ability to point, to to place nodes wherever you want so we can, you know, for example, make it, smaller for the detail page because we probably don't have the same amount of space as of right now. Is that something that we would like to do? I don't know. Like, how important is that even? I think for the flow detail page, for the if you're talking about the flow detail page, it's not important. It's it's really more of a visual aid. This is like if you're mentally, you know, visioning what your flow looked like when you created it, you can see it sort of, like, go through the go through the motion. So it's really more meant as a graphical element that is more for, it's kinda like a map. It's kinda what I see it like. Right? It's it's a nice to have, not really a requirement. What I was kinda thinking we could potentially get away with though is instead of rendering the full boxes with everything in it, which just be the name or just an empty box for that matter. What we could do, though, it's I don't know if you've ever ever played with this, but if you have a dashboard set up Jonathan, do you happen to have a dashboard in this this demo instance? Plenty. You gotta love it. Dashboards and sort of the underlying panels and the the viewport stuff. Yeah. Exactly. It has a button that just sort of zoom out the fit. And also a full screen button for those who don't. Pro tip for you. So you can full screen and make it fit on your your, fit on your screen. But we could use that exact same thing for doing just sort of a mini representation of, of your flow at the top of this flow's detail log page. Right? Because you don't need to see all the exact details. It's mostly just as a graphical map for going through your flow. This could be a very interesting trick. Similar similar tools I've seen, you can actually zoom in or out. You can actually zoom your view so you can see more of your flow or all of your flow. And then when you double click or click on something, then you get back into the item view or the detailed view. So back in our flows example here. Right? You'd be able to, you know, have a zoom option or be able to scroll in or out and zoom your view similar to a map style the way we do with the geospatial kinds of things. So very similar to that would be fun. I wish I could see the screen right now, but, I thought there's something Oh, right. About exhibit. I forgot. For example, yeah, if if we decide to, you know, like, restructure the visuals or want to provide a different type of view, for example, like, we don't have to have boxes. We could also have, like, circles, for example, which could be smaller, take out the space. Maybe they're horizontal or something. I'm not sure if you showed this, right now. I'm sorry for that. But Oh, no. No. No. No. It's I just I just love the the very bit of a sidebar. I just absolutely love the the artistic way you brought that just now. Maybe instead of rectangles, they could be circles. Yes. But yeah. No. You're absolutely right. Because we the nice thing is we know the exact dimensions of each box. Right? So we could theoretically, for a graphical overview, render each box at a half height and offset all of the, x and y positions by, you know, the number of boxes high To sort of recreate the diagram in a more compact view. For sure. Yeah. Good point. So let's let's noodle on this idea for logs on the actual operation box. Right? So would that be a, sort of additional button, I guess? Because I'm also sort of operating under this. It's edit mode by default all the time. So I could imagine that maybe next to the edit button, there's just some sort of logs link. And now that I'm thinking about it even more. Pre filter the logs to the particular one you're looking at, all that kind of fun stuff. Yeah. Because now that I'm thinking about it, we save the logs as one row with a bunch of JSON in it for the whole flow execution. Right? That's the, that's the log item that we have. So there's no real way to just say, give me all of the logs for just the update data operation, because you're looking about all of the, you know, all of the, all of the data from the whole flow, and then we have to sort of filter it back down. But, you know, we could consider, although the amount of data is gonna get out of control real quick, but we could consider saving, you know, a log row in the database for each operation step instead of for the whole flow. But we gotta make sure that we have some sort of automated retention setting and and a bunch of indexes, indices for that, to not completely wreck your database. Because if we're now creating 5 new records per flow instead of 1, then you're talking about millions and millions of data points, which, you know, depending on your database might not matter. This has been a point in the discussion. The person that started then was already interested in some type of way to for example, similar to the activities, issue that we had, recently where, you know, big or large instances have trouble with, I don't know, I don't know, 2,000,000 activity entries and some type of schedule that clears logs or, for example, you only want to retain those who are at most, I don't know, 30 days old, for example. Some type of rule maybe which could, lighten the load a little bit would be helpful, I think. Oh, a 100%. Yeah. Yeah. The 100%. The the main reason we haven't had that before, I haven't done that yet, is because we didn't have support for this use yet. Right? And that's really a requirement. Because if you wanna delete with a filter on a timestamp and you don't have an index on that timestamp, now it might take your database, you know, minutes to just go through a table of that size. So that's that's also where that sort of sprint came through to, like, support indices and now collaborating with the the contributor there to to get that across the finish line. Because then once we have proper support for indices because because I do wanna make sure that, you know, whatever we do for the system stuff needs to be available to the end user. Right? So once we have support for indices, we can then enable indices for the system tables for, like, the timestamp in activity and and same for flow logs and stuff. And then the second piece of that is gonna be a setting that says, what do you want your, you know, low log retention to be? And it should be an environment variable, I guess, or maybe it's a per flow option. T b t b d on that. Right? Maybe there's an environment variable for the the maximum number you can set, and then in the app, there's a drop down, whatever. We'll have to figure it out. But, yeah, to your point, if you can configure, I only wanna keep 7 days worth of flow logs for this flow. I feel like that's a must have for any sort of upgrade that we're gonna be doing the flows, because it's so easy to just now accidentally end up with a couple of 100. Actually, that reminds me, I think, Kevin the other day did a fantastic little flow that he sent over where he was using it for, for beddlesnakes. But I also saw his flow logs in the sidebar already be in the 100, and I was like, how how long have you been running this right now? Right? You're gonna have 1,000 tens of 1,000 of those, so we need to have some sort of retention for that, which I could imagine, you know, if you have a flow that you know you're running a lot and you want it to be lowers, think it has to be per flow. Food for thought. Food for thought for sure. So then when we're thinking about, you know, one other thing I know about the debugging of, flow specifically is that right now to test anything, you have to trigger the actual thing that triggers this. Right? So, as of right now, if, Jonathan wanted to test this ready for review flow in any way, he would have to save this, quit, go to his articles collection, and then manually trigger it. Right? Oh, it's so painful. It is so painful to debug these things, because you have to do just that. Right? I have to go I have to go save, then I have to go to our to my systems over here. I have to go to articles. I mean, at this point, normally, I've got an you're already kind of nabbed here, but now I've gotta open an article, and then I've gotta go through the actions of whatever I'm testing. So ready for review case. I have to push the button, then we go back, and then we're gonna go back to flows. We go back to ready for review. Now I have to go to logs, and now I can look at logs and see what's going on, test flow or not. So you're in here, you're traveling through, and you're looking at your payloads, and at your options wherever you are in the debug process, then you're closing out. Now you're going back to edit. So many mouse It's a pain is what I'm hearing. It's a pain in the butt. Flow is awesome, but it is it is painful to do the administration. That's why we're here. That's why we're here. Yeah. So one thing I think just wanna add another point from from him. Yeah. Sorry. Go. Go. Go. Go. No. Please. Please. Yeah. Yeah. Because, this this then touches the issue of alright, so let's you you want to create a flow. You want to try it out. You have to try it out in order to actually make it work. But then comes the thing, alright, so let's say I have an existing flow, and it's running. It should continue on running, but I want to, I want to edit it without changing the actual running flow, which would technically be something like, alright. Let's duplicate this flow so I can play around with it. And then, after I'm done with my modification, then I switch them or, basically, disable the previous one and enable the next one. So some type of let's let's call it, like, very, very lightly versioning. Yes. Oh, the chat. The chat is exactly it. Once they have the whole tag. Right. So so if, yeah, if you have a couple of, like, important very important flows running in production systems, you don't want to, you know, edit them willy nilly. So that's the that's the another thing, which is can be can be painful because as of right now, there's technically no duplicate button, which is, not as nice as it could be. And, it's it's a difficult one, isn't it? Because I'm also thinking about some of the triggers. Would that make sense? Because if you make a new version of a an endpoint, right, and an endpoint has a UUID to run it. But then if you create a duplicate, now the endpoint is different. So then if you wanna make that your production one, now you have a different endpoint. Right? So it's like the the duplicate and then kill the old one flow pun intended of updating flows might not work for every trigger. Also, if you create a new version of a flow that uses, like, an event hook for, like, an item save on articles or something, like, in this example. Yeah. It would have to be some sort of dedicated versioning because if you create a duplicate, then now you're firing both of your flows. Right? You know, a very, like, naive solution to that event problem would be to have the duplicated flow immediately be set to disabled. So if you duplicate duplicate something, the duplicate one is disabled, so it doesn't fire. And then you can play around with it, change something, and then enable it yourself. Yeah. Somebody in the chat that or somebody's plural in the chat, we're also saying, you know, you can obviously have a local dev instance or a dev copy where you could configure a new flow and then sort of import it at once so you don't, you know, mess anything up on production. Very true. Very true. Wanna make sure, you know, we keep we can Yeah. All the use ASM license. This this then goes hand in hand with the next thing that I wanted to bring up with with the same thing. So let's say you have your dev instance, and then you want to sync your synchronize your flow to the production instance. Alright? There's no version control, basically. If you don't use, like, an actual extension, which you can put into your Git, version control, you would have to manually duplicate or use the API or something to to get the flow over or synchronize between your two instances. Mhmm. Well, luckily, we talked about that particular problem 2 weeks ago in the last episode. That's exactly right. Like and subscribe. Exactly. The link right below the like button. Is that what they say? Yeah. I'm gonna have the sidebar here. Exactly. Circling back, though, because we we have, I think, 2 or 3 different, UX points with, like, a couple of different solutions. So one thing that I think is a must have for sort of this flows upgrade project that we're putting together here is that there needs to be a button on the trigger where you can manually trigger that trigger. The difficulty will be what data do we trigger it with, and how can we help you make that data realistic? Right? That is gonna be the difficulty there, which is kind of what we haven't done before. Because right now, with this manual trigger on an article, you know, the data format for that, like we talked about with the chat a little bit earlier, it's very specific to that particular article and that particular, data model for the article if it's like a filter hook or something. So if you have a manual play button, so to speak, we need to have some sort of way where you can sort of fake what that event looks like. We can pre pre generate a couple of ones based on your instance, I guess. Because, like, we know for the manual trigger that you get the primary key of an article. Right? That's what you would trigger with. So when you hit a play button, I imagine that we would have to show some sort of modal or a dialogue that allows you to put in a JSON object, I guess, with what that sort of, fake data looks like. And for some of the triggers, I think we can prepare that cause we can we can pre fill, you know, a fake article manual run cause we know the format, collection, keys, etcetera. For some of them, you don't need to do anything at all. Like a cron thing, you can just play because there's no real, you know, trigger data coming in, maybe a timestamp, but I'm not a 100% sure. For an endpoint, though, there there's no way for us to know what that looks like. Right? Because the payload is user edit, so we don't really know do with that. So so some of the things I've seen with other tools, then this goes this even goes back 20 plus years ago using WebLogic's BEA workflow engines. You could actually take data from previous runs, so from the logs essentially. You could actually generate your payloads dynamically from that, or have say, I wanna capture this data, and I wanna actually reuse that as the test mechanism and essentially create your test data from an existing run. And then, ultimately, you could even edit at that point, you could edit the payload. So if you wanted to change some of the data attribution for subsequent runs, you could actually just quickly edit. So as part of the you know, that kind of play operation, you would actually get some dialogue capability and say, I wanna use either an existing or, in some way, create or save your payload and then fire that in. And in that way, you know, we don't have to be trying to generate it. We can just say, you can either run it normally, go and trigger the action. You know. Another way that I've done some of it is, like, have an incognito window with log you know, I I don't have to actually do what I just showed you, which is exit my flow. I can actually do the testing from another user. You can there there are some ways you can simplify or streamline that just a little bit. And you may be testing that permission set anyway. Then you may have permissions and restrictions, and you may be testing that as part of your application anyway. You'd have a test user with those permission. But being able to capture that information from an existing log or flow, run would be super cool and helpful. And that's some of what we've captured some of that already. We have the logs. I mean, we have all of that data to your point. Like, if if there was a previous run, we'd know exactly, you know, what what information was there anyway. So we could definitely use that to to prefill it. Is this sort of manual trigger for well, manual trigger is a bad bad name because we already have a thing called manual trigger. But it's this sort of play button. Let's call it that. Is that something you'd wanna do for the whole flow, like, on the trigger level, or is that something you'd also wanna be able to do per operation itself? Because somebody before was like, you know, I used the run script operation a lot. I can also imagine that it would be really useful then to just just be able to say, okay. Just replay my run script a couple of times. But what I was just about to say showed up in the chat. Shout out to the Dev once more. Operations depend on a lot of context from what happened before. Right? Yep. That would be more like that step through capability, right, where you you can step through and, you know, in debug mode, essentially pause. Right? The you know, you have your your break point. And at that point, you stop. You can actually analyze and look at the data. So, I mean, this does become very much ID style kind because that's what this really is. We're we're doing ID development. The idea is you would have these kinds of breakpoint capabilities to be able to stop, look at what data and parameters, be able to look back through the current context, what does it look like. Am I getting what I expect? Okay. Okay. Then step through debug. And it gets complex because we've got this mix of operations, and then you got run script that has, you know, code. Right? You know? Now do I have another debug capability with stop points and break points inside the job? You know? Yeah. Debugging that the TypeScript JavaScript in there directly. It gets very complex in that sense. But I think just the ability to be able to have a I wanna be able break point at the run script, be able to step to the next operation, then check the data and see I'll be able to multiple breakpoint options. We could get more creative down the line. I think if we can just put it at the operation level, that could it would be kinda cool if you could, you know, essentially step back, edit the data, or do something to the input, and then feed it back through, step back, step forward in in the operations themselves. Like, it's more complex than And another thing with all of this, of course, is that, these operations may very well have side of on purpose because you might ping a different endpoint. You might save something's database, whatever. Right? So if you're manually playing, you know, with Flow or even individual operations for debugging purposes or whatever else, those side effects will still be in effect. Right? And if you're trying to debug a whole flow, you know, operation 2 might be dependent on a successful response from operation number 1. Like, number 1, save something to the database. Number 2, reads it, you know, now that it has been saved or something. I'm just trying to come up with an example. Because I was thinking, you know, it it be important to think through some sort of dry run as well where you can just like, okay. This would have now saved to the database. Right? And then, oh, this would have now made a request to the API, but we didn't actually do that. We didn't want to make a Stripe subscription as part of our little testing thing right now, right, to just test if everything works. So that'll be interest interesting. Some sort of dry running, thing behind it. I I do think it's important to think through at least because of the side effects. But to again, shout out to the derp. Can't really do a universal drive run because you never know what, you know, arbitrary stuff is being used. Yeah. You'd almost have to have, like, a successful run or at least a set of data and payload options that you could edit up to say, you know, this is what I'm kind of expecting as I process through my input outputs. Mhmm. So So you'd have to have input output capabilities. What do you expect? And then as you say, dry run mode, not save, but often, you know, maybe the change that I'm making, I then getting that and then doing something. So I'm sending it to that I'm sending a webhook out data payload that comes back, you know, depending on how complex you make a flow. Mhmm. My general recommendation is if flows get that complex, go write a hook. But it's Well Well it it there is gonna be a point. It's a balancing act. Right? There there are I I like flows for admin kinds of functions where state management user control. When I get into really complex conditional lock, do doing this in these boxes so much faster to write TypeScript, be done with a hook on the backside. That's my Yeah. The the similar to yeah. What I wanted to say is this sounds, like, very similar to some type of bash script or something. You know, some some type of scripting, where as soon as you need conditions or something, it's probably better to make it a program. Because, who likes to write bash? Basically, you know, it it gets it gets gnarly really quickly. So as soon as you need sophisticated stuff, some type of conditions that branch into something and do something with arrays or something, it's probably better to actually use a programming language. And, I, yeah, I don't know. It really depends on a lot of things because one one is like, do you even know TypeScript or JavaScript in the first place? I think that's an important one. I think by, you know, doing it manually as as code, you also have a lot more opportunities to break it bad. You know, make it insecure, just make it an optimized break stuff tremendously hard. And also in, the chat somebody pointed out, you know, with flows, you do get the the logs and sort of the ability to explore the steps and what went wrong. So it's it's an interesting take, Jonathan, because what we're saying is, like, as as of today, you know, when it gets complicated, you wanna switch to code because the flows, UX, and UI just doesn't isn't good enough yet for complicated stuff. Right? So Right. Yeah. Yeah. It is. But I I'm hopeful. If if we make this kind of what we're talking about, if we get to that point in these next few iterations on maybe it does become I don't actually need hooks as often. Right? I could be you know, hooks could just be when I need external library. Yeah. Exactly. Allow within flow at the moment. I'm just I'm very I'm very hopeful that by by some of these seemingly small but very big changes, like adding the manual trigger so you can at least just try it out, I think we can actually get this to a point where HOOX is gonna be the more complicated one to do, you know, because it's like, yes, you have a black box and do whatever, but you don't get all of the debugging niceties and and all of the comments. Not that it's a battle. I mean, at the end of the day, it's just pick whatever you want, what's worked best for you. But I think that we can reverse that take, honestly, that flows could be I think that might be my personal challenge. Make flows the default for Jonathan. That's my measure of success. Writes the worst TypeScript he's ever seen. If he says I prefer flows over hooks, it means we did a good job. Perfect. I love it. Project Jonathan is what we call it. Cool. Okay. If you can keep me from writing code, it's always a good. It's always a good idea. So there's there's definitely a couple of unresolved You know, as per usual, I with the eye on the clock. As per usual, we're gonna be wrapping up. We've been taking a lot of notes during all of this, and it's, again, very divergently thinking. We're gonna be compiling all of these notes and ideas from everything that we just talked about and everything on the chat. Thank you for that. Into, you know, a proper RFC document and figure out what does this project upgrade flows, x y z, you know. What does that look like? What do we see as the must have? What can we get done in a sort of initial sprint? There have been, I think, like, 4 or 5 different discussions that are all sort of asking for the same thing, which is, like, improve Flows debugging. But they're all various sort of smaller bits and pieces of a, to me, bigger Flows 2 point o type of upgrade. So we're gonna be trying to compile those and sort of merge them into one sort of flows project, if that makes sense. Just as a quick summary, I think just the the upgrades to the logs that we talked about, you know, making them, like, explorable. I guess that's the right the right word. Giving them its own page, giving it more space, making it easier to step through, you know, the execution of a flow. Definitely a big must have. And the ability to trigger a flow from where you're editing it with some sort of, you know, preloaded payload from a previous run or something you can manually adjust so you can actually try out what the hell it is that you're building without having to go all over the app. I think it's gonna be a very, very important thing to have. Last question from from the chat here before we wrap up. Someone had said that many of these ideas could be implemented by the community in a custom module? No. Yes. Community can build whatever they want as a custom module at the end of the day. That is very true. And with the, you know, upcoming release of, for marketplace project, I'm also very curious to see, you know, what people will do with this type of stuff. Right? Somebody could, definitely, make a flow flows log module and just make it super custom or super proprietary or super flexible, whatever they want Whatever they want, really. Cool. Okay. There was one more that snuck in with an upvote. One final thing. Can we have access to the flow manager from the flows service so we can do more things with flows from extensions? May may may. No problems. So so It depends. It depends. Now I'll I'll, it's oddly tucked away compared to the extension manager, which should also similarly be tucked away, actually. Anyways, no. The the the reason for the hesitation is more around updates that I kinda wanna do to those services in the first place. Right? So I know I wanna do some sweeping upgrades to what those services look like and how they work in the first place. So I'm hesitant to to add new stuff onto the services service that we know might change already. Right? So I I don't wanna introduce something new that's gonna change then fast after, if that makes sense. That being said, yes, I do wanna make sure that extensions do have access to all of those types of internal things, which is also a reason why we've been moving a lot more stuff into individual libraries. Right? So I don't know. For for those who have been watching the repo like a hawk, we just moved sort of the environment variable extraction and handling different library with the same sort of idea, right, where an extension can just import the Directus ENV library and then use the same sort of ENV handling that, Directus does itself. We're we're basically doing that across the board, and the services is one of those things that I wanna do like that. And I can definitely imagine there being eventually being, you know, a Directus flows library that contains, you know, this flows manager state, so that extensions just can import it just like the direct as API would with TypeScript typing. I I heard you. Community, I heard you. With proper TypeScript typing and everything as well. So that's that's the only reason why I'm a little hesitant to promise anything better, as of right now. Any cool. That marks 11 sharp on the clock on my end. I think this was a very interesting session so far. This is a little bit more divergently rambling, than doing an existing discussion, but I think we've touched on a couple of very, very important points. Any any other closing thoughts? If I could remember the dad joke from Bryant this morning, I would tell it, but I suck at remembering jokes. So Well, tune in next time for for John's phone dad joke. With all that being said, thank you to the audience. Thank you for watching. If you're watching this on Directus TV, Directus dot a 0/tv, Hi. Be sure to watch the previous versions of this video down below. And we'll see you guys in 2 weeks, I think. Or are we skipping that one? Because it's sleep week, I think. Maybe oh, TBD. There will be one. I think our next one's in March, because the I think or no. No. It's actually the other way around. We have one more month. We're skipping the first one in March. That was the See you guys in 2 weeks then. I remember now. My poor old brain catching up. It's all in the event tab. Exactly. Keep your eye on it. It's all in the event tab. Thanks, everybody. Kevin's keeping us honest. Cheers, everyone. Bye. Bye bye.",[217,218,219],"adfbc5b5-8825-4bfc-91b7-2702779b8af9","e9fd3c4b-6d03-4791-b250-59c07d49f7a4","6cf228e3-12c4-472f-bc3a-a9cfe6f16cb3",[],{"reps":222},[223,279],{"name":224,"sdr":8,"link":225,"countries":226,"states":228},"John Daniels","https://meet.directus.io/meetings/john2144/john-contact-form-meeting",[227],"United States",[229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278],"Michigan","Indiana","Ohio","West Virginia","Kentucky","Virginia","Tennessee","North Carolina","South Carolina","Georgia","Florida","Alabama","Mississippi","New York","MI","IN","OH","WV","KY","VA","TN","NC","SC","GA","FL","AL","MS","NY","Connecticut","CT","Delaware","DE","Maine","ME","Maryland","MD","Massachusetts","MA","New Hampshire","NH","New Jersey","NJ","Pennsylvania","PA","Rhode Island","RI","Vermont","VT","Washington DC","DC",{"name":280,"link":281,"countries":282},"Michelle Riber","https://meetings.hubspot.com/mriber",[283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,448,449,450,451,452,453,454,455,456,457,458,459,460,461,462,463,464,465,466,467,468,469,470,260,471,472],"Albania","ALB","Algeria","DZA","Andorra","AND","Angola","AGO","Austria","AUT","Belgium","BEL","Benin","BEN","Bosnia and Herzegovina","BIH","Botswana","BWA","Bulgaria","BGR","Burkina Faso","BFA","Burundi","BDI","Cameroon","CMR","Cape Verde","CPV","Central African Republic","CAF","Chad","TCD","Comoros","COM","Côte d'Ivoire","CIV","Croatia","HRV","Czech Republic","CZE","Democratic Republic of Congo","COD","Denmark","DNK","Djibouti","DJI","Egypt","EGY","Equatorial Guinea","GNQ","Eritrea","ERI","Estonia","EST","Eswatini","SWZ","Ethiopia","ETH","Finland","FIN","France","FRA","Gabon","GAB","Gambia","GMB","Ghana","GHA","Greece","GRC","Guinea","GIN","Guinea-Bissau","GNB","Hungary","HUN","Iceland","ISL","Ireland","IRL","Italy","ITA","Kenya","KEN","Latvia","LVA","Lesotho","LSO","Liberia","LBR","Libya","LBY","Liechtenstein","LIE","Lithuania","LTU","Luxembourg","LUX","Madagascar","MDG","Malawi","MWI","Mali","MLI","Malta","MLT","Mauritania","MRT","Mauritius","MUS","Moldova","MDA","Monaco","MCO","Montenegro","MNE","Morocco","MAR","Mozambique","MOZ","Namibia","NAM","Niger","NER","Nigeria","NGA","North Macedonia","MKD","Norway","NOR","Poland","POL","Portugal","PRT","Republic of Congo","COG","Romania","ROU","Rwanda","RWA","San Marino","SMR","São Tomé and Príncipe","STP","Senegal","SEN","Serbia","SRB","Seychelles","SYC","Sierra Leone","SLE","Slovakia","SVK","Slovenia","SVN","Somalia","SOM","South Africa","ZAF","South Sudan","SSD","Spain","ESP","Sudan","SDN","Sweden","SWE","Tanzania","TZA","Togo","TGO","Tunisia","TUN","Uganda","UGA","United Kingdom","GBR","Vatican City","VAT","Zambia","ZMB","Zimbabwe","ZWE","UK","Germany","Netherlands","Switzerland","CH","NL",1773850429230]