[{"data":1,"prerenderedAt":450},["ShallowReactive",2],{"footer-primary":3,"footer-secondary":93,"footer-description":119,"learning-things-i-love-to-hate-edge-computing":121,"learning-things-i-love-to-hate-edge-computing-next":178,"sales-reps":198},{"items":4},[5,29,49,69],{"id":6,"title":7,"url":8,"page":8,"children":9},"522e608a-77b0-4333-820d-d4f44be2ade1","Solutions",null,[10,15,20,25],{"id":11,"title":12,"url":8,"page":13},"fcafe85a-a798-4710-9e7a-776fe413aae5","Headless CMS",{"permalink":14},"/solutions/headless-cms",{"id":16,"title":17,"url":8,"page":18},"79972923-93cf-4777-9e32-5c9b0315fc10","Backend-as-a-Service",{"permalink":19},"/solutions/backend-as-a-service",{"id":21,"title":22,"url":8,"page":23},"0fa8d0c1-7b64-4f6f-939d-d7fdb99fc407","Product Information",{"permalink":24},"/solutions/product-information-management",{"id":26,"title":27,"url":28,"page":8},"63946d54-6052-4780-8ff4-91f5a9931dcc","100+ Things to Build","https://directus.io/blog/100-tools-apps-and-platforms-you-can-build-with-directus",{"id":30,"title":31,"url":8,"page":8,"children":32},"8ab4f9b1-f3e2-44d6-919b-011d91fe072f","Resources",[33,37,41,45],{"id":34,"title":35,"url":36,"page":8},"f951fb84-8777-4b84-9e91-996fe9d25483","Documentation","https://docs.directus.io",{"id":38,"title":39,"url":40,"page":8},"366febc7-a538-4c08-a326-e6204957f1e3","Guides","https://docs.directus.io/guides/",{"id":42,"title":43,"url":44,"page":8},"aeb9128e-1c5f-417f-863c-2449416433cd","Community","https://directus.chat",{"id":46,"title":47,"url":48,"page":8},"da1c2ed8-0a77-49b0-a903-49c56cb07de5","Release Notes","https://github.com/directus/directus/releases",{"id":50,"title":51,"url":8,"page":8,"children":52},"d61fae8c-7502-494a-822f-19ecff3d0256","Support",[53,57,61,65],{"id":54,"title":55,"url":56,"page":8},"8c43c781-7ebd-475f-a931-747e293c0a88","Issue Tracker","https://github.com/directus/directus/issues",{"id":58,"title":59,"url":60,"page":8},"d77bb78e-cf7b-4e01-932a-514414ba49d3","Feature Requests","https://github.com/directus/directus/discussions?discussions_q=is:open+sort:top",{"id":62,"title":63,"url":64,"page":8},"4346be2b-2c53-476e-b53b-becacec626a6","Community Chat","https://discord.com/channels/725371605378924594/741317677397704757",{"id":66,"title":67,"url":68,"page":8},"26c115d2-49f7-4edc-935e-d37d427fb89d","Cloud Dashboard","https://directus.cloud",{"id":70,"title":71,"url":8,"page":8,"children":72},"49141403-4f20-44ac-8453-25ace1265812","Organization",[73,78,84,88],{"id":74,"title":75,"url":76,"page":77},"1f36ea92-8a5e-47c8-914c-9822a8b9538a","About","/about",{"permalink":76},{"id":79,"title":80,"url":81,"page":82},"b84bf525-5471-4b14-a93c-225f6c386005","Careers","#",{"permalink":83},"/careers",{"id":85,"title":86,"url":87,"page":8},"86aabc3a-433d-434b-9efa-ad1d34be0a34","Brand Assets","https://drive.google.com/drive/folders/1lBOTba4RaA5ikqOn8Ewo4RYzD0XcymG9?usp=sharing",{"id":89,"title":90,"url":8,"page":91},"8d2fa1e3-198e-4405-81e1-2ceb858bc237","Contact",{"permalink":92},"/contact",{"items":94},[95,101,107,113],{"id":96,"title":97,"url":8,"page":98,"children":100},"8a1b7bfa-429d-4ffc-a650-2a5fdcf356da","Cloud Policies",{"permalink":99},"/cloud-policies",[],{"id":102,"title":103,"url":81,"page":104,"children":106},"bea848ef-828f-4306-8017-6b00ec5d4a0c","License",{"permalink":105},"/bsl",[],{"id":108,"title":109,"url":81,"page":110,"children":112},"4e914f47-4bee-42b7-b445-3119ee4196ef","Terms",{"permalink":111},"/terms",[],{"id":114,"title":115,"url":81,"page":116,"children":118},"ea69eda6-d317-4981-8421-fcabb1826bfd","Privacy",{"permalink":117},"/privacy",[],{"description":120},"\u003Cp>A composable backend to build your Headless CMS, BaaS, and more.&nbsp;\u003C/p>",{"id":122,"slug":123,"vimeo_id":124,"description":125,"tile":126,"length":127,"resources":128,"people":132,"episode_number":139,"published":140,"title":141,"video_transcript_html":142,"video_transcript_text":143,"content":8,"status":144,"episode_people":145,"recommendations":166,"season":167,"seo":8},"120171fc-b80d-4940-ba66-a16ea67a1aba","edge-computing","893709475","Kevin is joined by Pandelis to properly understand edge computing, and how they evolve from CDNs and distributed compute.","725ad1de-038d-4832-8344-e5d782c10974",60,[129],{"name":130,"url":131},"Pandelis' Twitter Thread","https://twitter.com/PandelisZ/status/1726957806820164032",[133,136],{"name":134,"url":135},"Pandelis Zembashis","https://twitter.com/PandelisZ",{"name":137,"url":138},"Kevin Lewis","https://directus.io/team/kevin-lewis",2,"2024-01-02","Edge Computing with Pandelis","\u003Cp>Speaker 0: Hello, and welcome to learning things I love to hate. This is a show where I basically stop putting off learning things that I have avoided for one reason or another, inviting in my friends and experts around the topics that I'm trying to learn. And today's topic is edge computing, a topic which has got a little spicy in web circles in the last few weeks. I'm inviting my lovely dear friend, Pandellus, here today. Pandellus, would you like to introduce yourself?\u003C/p>\u003Cp>Speaker 1: Yeah. Hi, Kevin. Nice to see you again. Yeah. I'm I'm Pandellus.\u003C/p>\u003Cp>I've been an engineer for the last few years working in web, working in in deploying a lot of the things that I claim to write. I've worked in web agencies where delivering content to users was super, super important. Back in the jQuery CDN days where when edge computing was basically only CDNs into kind of our modern edge computing world now where we can put entire runtimes, entire databases onto the edge. There's all things I've experimented with at various companies. So I I'm so happy when I saw your tweet.\u003C/p>\u003Cp>It's something I've been experimenting with a lot. And, as someone as a user of the web, I think it's something super important for for everyone to to to to to know about and experiment with.\u003C/p>\u003Cp>Speaker 0: Awesome. And, and yeah. So I tweeted out a few days before this recording. Can anyone just, like, explain edge computing to me? Because I really don't understand it.\u003C/p>\u003Cp>And you came in with a wonderful set of illustrations, which actually will just edit in here right now. These are the illustrations, to kind of explain a little bit about how Edge works. So I thought, hey, I'm just gonna invite you onto this show. This was already a topic I was planning on talking about. You preluded, I suppose, some of the things I wanted to discuss today.\u003C/p>\u003Cp>So just to just to open with this, I wanted to explain kind of where my knowledge is now, and hope that you might be able to take me and anyone watching who's in a similar spot through that journey of understanding more. And perhaps if we have some time at the end, we can do a little demo of this abstract concept. So what I know of edge computing, I think, really is around content delivery network, CDNs. We can maybe start a little bit about talking about this. And I know that the concept of what can be stored, you know, and replicated in multiple regions and serve to people based on where they are in the world has just grown more complex.\u003C/p>\u003Cp>We we're doing that with more things. And I suppose my biggest concern and the reason I haven't really done it yet is, 1, the idea that, you somehow need to replicate things really rapidly in lots of regions. And 2, unrelated, it just seems really complex, and I'm not sure how how much the complexity is worth, what might be a payoff that doesn't matter. I know there are some performance hardcore people who are like, of course, it matters. But, yeah, that's kind of my starting point.\u003C/p>\u003Cp>So maybe if you take us back, talk a little bit about CDNs and what they are and kinda how that's evolved to where we are now, that would be really good.\u003C/p>\u003Cp>Speaker 1: Yeah. Of course. Yeah. No. I mean, as engineers, we like to make our lives harder for sure.\u003C/p>\u003Cp>It's it's it's we we we do sometimes like to like to overengineer, and it and it's all as with everything in engineering, there's a million ways to do them. Not all of them are right, and a lot of them are right. So your mileage may vary with your needs of edge whatever. A lot of people don't even need the CDN, but it comes down to your users. Right?\u003C/p>\u003Cp>It always comes down to your users and the experience that you're trying to deliver. So let's let's start with CDNs, and what is the problem that that that CDNs solve? So if we go back to the mid 2000, when jQuery and, you know, serving libraries over the wire were were were all their age, we would, you know, blindly go to, CDN stack or whatever those CDN websites were and grab URLs for for jQuery and put them in our sites. Before build systems, before your web packs, before the the the modern JavaScript ecosystem back in days, CDNs were all their age because we wanted to deliver large JavaScript bundles to the user quickly. Internet was slower, and still is slow.\u003C/p>\u003Cp>Or this is h t p one days, right, before streaming h before multi, you know, multiple h t p two streams, and all that type of stuff. So it was very important or we people saw a lot of gain in having these large JavaScript bundles close to the user, meaning the end users could consume that JavaScript faster. And in the end, what that means is when they get their HTML through, it becomes interactive faster.\u003C/p>\u003Cp>Speaker 0: Could I ask you a question? Could I pause it? So when you're saying near to where people are, just to clarify, we mean that this one jQuery bundle or whatever is stored on multiple servers throughout the world. I request it via one URL, and then the application which will serve up that file will find the place nearest to where I'm making the request and serve it. And then the additional benefit of CDNs, I suppose, is if I visit 10 sites that all are trying to load the same large bundle, this might be more CDN specific, and I think it also might be a relic of the past, but it\u003C/p>\u003Cp>Speaker 1: it would not It is a relic of the past. That's that's that's one of the topics I was gonna touch on. In in the early days, it was when there was less CDNs around, we we could make the ex we could make the excuse that, okay, by using a shared CDN or, say, a common example was also Google Fonts. Mhmm. By using Google Fonts, by using a common CDN, it means that there's a higher likelihood that a user would already have those assets on their computer.\u003C/p>\u003Cp>Cached.\u003C/p>\u003Cp>Speaker 0: Okay.\u003C/p>\u003Cp>Speaker 1: Exactly. So it's a further cache. Ultimately, the the most edge location is the user's computer. So caching is always a tiering system and CDNs are a type of cache, and the ultimate caching is local. When you have a local cache, that's that's the ultimate edge.\u003C/p>\u003Cp>And really with edge compute, with edge with with the topic of delivering on the edge, what we're trying to achieve is native like performance, and the most edge you can get is being on device. So, arguably, the the most edge computing delivery method today is an app. Apps are on your device, and there's no more edge than the the the device that you're touching. And everything that we're doing on the web or or in edge compute land is to try and achieve native like performance.\u003C/p>\u003Cp>Speaker 0: Cool. That makes sense.\u003C/p>\u003Cp>Speaker 1: So, yeah, those are those are sort of CDNs, and that is kind of the the most basic form of putting things on on the edge that we had. So that and that was, and there was a legitimate benefit to to to to CDNs. And what we've tried to do in or what has happened in in the modern era is CDNs themselves have become more and more capable, to the point where they can actually run and execute code. 2 of the most popular and, CDNs around are are CloudFront from AWS and, Cloudflare. And Cloudflare are one of the pioneers of of edge computing as well, and their CDN offering just over time naturally has become more has gained features, has had feature creep, and eventually, we wanted to do more complicated things over time.\u003C/p>\u003Cp>Right? We we may wanna deliver a slightly different version of a bundle for certain users. Or if it gets hit with a with a certain URL parameter, we might wanna bust the cache or or run some sort of, run something on that server that's delivering it, maybe do an AB type test, where certain users got certain bundles or whatnot. So at\u003C/p>\u003Cp>Speaker 0: some point transformations as well?\u003C/p>\u003Cp>Speaker 1: File transformations became yep. Image image CDNs. So moving from kind of just JS bundles, image CDNs are, again, super popular because, on super useful because, the the 2 largest assets that get delivered are the bundle and the images. And images also very bandwidth intensive and benefit largely from being closer to the user. Sure.\u003C/p>\u003Cp>And we're gonna we're gonna say closer to the user a lot throughout this conversation. And what that what that practically means is that you are spending less time in flight over cables. Your server could be very far away. And the further away it is, the longer it will take to navigate, you know, the the, the the sea of cables that live under the sea, to get to your user. So the closer you can be, the less hops we can make, the the the the the better for for the end user, but also for our bank, CDNs, specifically, not necessarily edge compute, but CDNs, specifically, also benefit benefit us in that we send less data out because it's also a caching mechanism.\u003C/p>\u003Cp>Edge compute, not so much when we when we get when we get down to it, because one of those goals is to have as fresh content as possible. So when you're when you're when you're caching, you're limited by the freshness that you can deliver. So that that's that's sort of that that's sort of the niche that CDNs sold for a while. And, yeah, more and more doing more and more things on the edge or closer to the user or doing more compute at the CDN level means that we could do these kind of transformation things that you just mentioned with images. Right?\u003C/p>\u003Cp>So say I wanted an image 30% smaller, we could deliver 1 all the way back from our original server, or we could use the kind of already existing cached asset and do some computation on it and then also cache that. And that's this kind of level of level of tiering that that that being closer to the user kind of offers us is is we don't wanna keep hitting our main server, and wanna keep delivering faster things to the user because, yeah, it's all about speed at the end with with with the goal of edge.\u003C/p>\u003Cp>Speaker 0: Cool. So now we're in a space where it's not just about storing assets, whether that's bundles, images, you know, media, whatever. It's actually about running computational tasks at the edge. So I suppose, naturally, that moves us on to maybe what what is edge compute edge computing? What is it capable of now?\u003C/p>\u003Cp>And then I suppose we're going back to my initial concerns or skepticisms. 1, how do you keep all of these nodes? I don't know if they're called nodes. I'm assuming they're called nodes in in sync, especially when you gave an example of, you know, doing transformations at the edge and then caching that. Well, is the cache just at a node level?\u003C/p>\u003Cp>Do they propagate throughout a network? Stuff like this, I'm not too sure about. And then it sounds really complex. How how does this actually shake out to being something that's tolerable to developers?\u003C/p>\u003Cp>Speaker 1: Yeah. Edge and the and this this edge cache or caching cash busting, in CDN land, it has always been a pain, especially in the days where I was working as at a web agency. So, basically, all all all that we deliver over the wire is is assets and and JavaScript and images, and it was definitely always always a battle of, okay, well, what is actually being delivered to the user and cache busting it and and and figuring out, oh, okay. Oh, they're on an older version of the of the site, and they're seeing old content and it and us getting rang up by our clients being like, the other site that we just published it. It's not gone live, and that's having to explain.\u003C/p>\u003Cp>Okay. You're gonna wait for the cache to propagate. It's it's gonna it's gonna get to the your user eventually. No. But we've we've launched this because, some context here.\u003C/p>\u003Cp>I used to work in, theater websites, where we had very peak peaky demand spiky demand and very, and certain requirements around, you know, if a certain show went live, they want it live now so that people can book it now.\u003C/p>\u003Cp>Speaker 0: Which is not an unreasonable expectation if I hit publish that I mean, that's even the thing with, like, static website building. There's always a build period, and some people don't get that that build period, you know, is can be quite material.\u003C/p>\u003Cp>Speaker 1: Yep. And that is also that's a huge challenge now with edge compute as well, but and also at the time with CDNs as well because we are we're still trying to we're managing demand, and we're managing speed. And the most fast asset we can deliver to someone is something that's cashed. So there's always a challenge on how you distribute or revalidate that cash. So in the CDA, it's so it's definitely I'm not gonna say it's anything that that's that's easy or perfect.\u003C/p>\u003Cp>It's a challenge at every level no matter how simple, even at the simplicity of image and and bundle cache, but also to the level of, of full HTML page cache. Right? So during during go during, say, ticket sale go lives or or certain events, we may sort of cache entire web pages, and that becomes incredibly complicated to deliver to many, many users because we then have the challenge of say, okay. What happens when the tickets now run out? And everyone should be seeing a, you know, sold out banner.\u003C/p>\u003Cp>And, depending on where you last were when that cache hit that node Yeah. And the the the way the way cash is generally work is, you know, it's a pass through cache, meaning, the CDN is the thing that will ask your server, make the request to your server for content, receive it, stream it back to the user, and also save a version on on that server. So depending on who access it at what time, there will be a cache with a certain time to live of 5, 15 minutes a minute. So meaning someone accessed it at some point and has a version maybe 5 minutes old that is sitting there. So and that and, using DNS, using domain name resolution, Based on where you're coming from, it will pick that server and that cache from that server.\u003C/p>\u003Cp>So depending on where you are around the world, if no one's hit it in a in a few minutes, you may get stale content. Stale content, yes, huge challenge. And publishing new content, also a huge challenge because you will spike server resource, meaning it's you don't want to always necessarily be be live, because being live means you are sending a lot of data out.\u003C/p>\u003Cp>Speaker 0: Sure. Sure.\u003C/p>\u003Cp>Speaker 1: It's one thing that we was was common to do is or is like, okay. If I update, you know, if I update this article in WordPress or update this article in in whatever c CMS that I'm using to trigger like a full invalidation. Right? Like Directus. Thank you.\u003C/p>\u003Cp>Speaker 0: I\u003C/p>\u003Cp>Speaker 1: have used directors at at an organization as well. Don't be worried. In in in in in whatever CMS that you are using to just have, like, a blanket, like, asterisk invalidate everything.\u003C/p>\u003Cp>Speaker 0: Yeah. Yeah.\u003C/p>\u003Cp>Speaker 1: Which is a perfectly valid strategy for keeping things up to date if that's, you know, your your, your your your requirement. Yeah. Your But there and there's there's also the requirement then of, well, keeping things up and keeping things, keeping things efficient as well. C CDNs and edge computing are also about efficiencies. We talked a bit about sending requests to your server and that caching level being also a protection mechanism of you not actually hitting your server all of the time.\u003C/p>\u003Cp>So, yes, it's very complicated, and it's kind of still on the engineer's side to figure that out. There are better frameworks to help with this. And as we get closer to real edge computing, there's less whole page caching that we have to do. And this is where we we I kind of wanna transition into well, okay. Well well, why?\u003C/p>\u003Cp>Well, why edge compute? Edge compute means we can run more things closer to the user, meaning, hopefully, we need to cache less. And if we cache less, we could be more live or or we could be we we can have, less individual things that we cash, and the broader interactivity of the website or smaller subsections of it can be more individually individually controlled as to what level of, of staleness we accept within a web.\u003C/p>\u003Cp>Speaker 0: But it's that trade off, isn't it, against, the or, you know, having non stale content ultimately delivered to users and the efficiency gained by being close to them.\u003C/p>\u003Cp>Speaker 1: Exactly. That's it's that's the that's the balance that we're trying to strike. And, really, CDN caching, and at the level of caching whole HTML pages is kind of like is, you know, is the nuclear option. Right? If you can afford to have if if you're a a, you know, a news site, a very, very static type content website where you can you can do things over JavaScript on the client, or, the the content that you're delivering is is very static, then you can afford whole page caching.\u003C/p>\u003Cp>But as as we get close as we get nearer the graph of, right, highly static versus highly interactive or highly, like, live content, like a ticketing experience where you before you go through the booking experience, you need to know you kind of wanna know, okay. Am I going into this? Am I gonna have to make it a whole account and then be told, oh, by the way, it's sold out as it is.\u003C/p>\u003Cp>Speaker 0: Or even or even more, like, personalized experiences like a social media network where we're not gonna full page cache every one of the profiles and every post and every comment that exists. No. No. No. So there's some element of we need to get fresh fresh data as part of this load.\u003C/p>\u003Cp>Speaker 1: Exactly. I mean, how many times are you, you know, just scrolling scrolling up to the top to get your fresh tweets all the time. Right? It's it's, there's a certain amount of stillness a user will accept, and there's also a certain amount of stillness that, the us as engineers or us as whatever company entity that we are will accept with with end users. And Yeah.\u003C/p>\u003Cp>And, and that's that's that's the balance that we're trying to strike. And with edge computing, we are trying to deliver more things or cash listings, and that's achievable in how's that? How's how how do you describe this now? It's a very challenging to describe because we already have compute in in one location. And, moving it closer, just moving it doesn't fulfill, like, kind of the whole stack of requirements that we have, which is we need to deliver quickly.\u003C/p>\u003Cp>We need to be efficient with our resource. We don't wanna just, you know, put things on the edge for the sake of it being on the edge and it costing an arm and a leg, which is one of the concerns you sort of raised as well, which is a a a perfectly normal, you know, objection to this.\u003C/p>\u003Cp>Speaker 0: I didn't even think about the cost. I was thinking purely complexity. But, yeah, of course, there's cost. You're replicating nodes, basically. And the more that happens on those nodes, the more expensive it is.\u003C/p>\u003Cp>I'd not even considered that, but, of course, that's part of it.\u003C/p>\u003Cp>Speaker 1: Yep. And a lot of the the a lot of the current generation, edge compute are serverless pricing models, meaning you are paying for the, there's a there's a couple of pricing models in in this. It's called wall clock time and, and CPU time. One being CPUs, the actual CPU cycles that you consume, meaning you have to write also efficient code to execute. I'll do.\u003C/p>\u003Cp>Otherwise, if you're writing code that is doing weird thing, and there's all sorts of limitations, that exist in terms of what you can do because the run times are also different. When we have a real server, we have any run time that we want. We have any c libraries that we want. We have any language that we want. And a lot of the modern, edge compute locations or edge compute run times because they're also designed to run very, very fast on and not in necessarily, I'll say in air quotes, real service like, real they're not actually real servers.\u003C/p>\u003Cp>They are like a subset of the resources that a that these compute platforms have. So so, again, a couple of the popular ones I mentioned, CloudFlare and and CloudFront. Cloudfront is fronted by, Lambders, but not real Lambders. They're actually a subset of the Lambda runtime, that can only run certain limited amount of JavaScript.\u003C/p>\u003Cp>Speaker 0: What? In an effort to be less computationally expensive?\u003C/p>\u003Cp>Speaker 1: That, but also they've developed from this need of okay. I just need to transform an image very slightly. And over time, we've just kind of put trying to put more and more things in it. Cloudflare is is is a bit more is a lot more advanced. And also the CloudFront one, since I've used it back then, it has got a lot more complicated as well and has gained a lot more features.\u003C/p>\u003Cp>CloudFront have again, it's a limited runtime. It's not node. It is a node compatible runtime, but you don't have access to say every single node library or you don't have access maybe even to the entire node modules, or the entire NPM registry.\u003C/p>\u003Cp>Speaker 0: Oh, is this, is this isolates where it's like pure it's a pure JavaScript runtime with, like, some exposed additional functionality? Because we use those inside of directors, inside of our automation builder. But I didn't know that a lot of the JavaScript creature comforts, things even like console log, things like set time out and things like this, they're not they're not part of Node JS. They're not part of, sorry, the core JavaScript spec. They're part of Node and the browser implementation of JavaScript.\u003C/p>\u003Cp>And you take for granted when these are the 2 really predominant runtimes that we use. So maybe it's something like that where they're using these isolate, environments with whatever additional functionality is exposed by the vendor.\u003C/p>\u003Cp>Speaker 1: Yep. Yep. I I think isolates her was inspired by some of the some of the work that cloud flooded or or may even be a direct, a direct thing from from from Cloudflare. Yeah. Is it a direct descendant?\u003C/p>\u003Cp>But, but, yeah, it's it's exactly that of of we're no longer in the browser. We're very much on a highly efficient, sub resource, that exists far out on on the edges of of these compute regions. And if we if we were to bring up a map, and I'll share share some assets after as well of we we know the general, you know, EU west 2 being London and, US east 2 from AWS being, you know, in Ohio somewhere and your EU West 1 or your or Google's, you know, EU Central 1 in Norway. These are all locations that they have massive, massive, huge data centers with further sub availability zones being, you know, eu west 1ab, e u west 1 c, etcetera. So huge, huge, you know, warehouse sized data centers with tons and tons of servers that have tons and tons of services on them.\u003C/p>\u003Cp>When you additionally look at the map of of, what are the cloud front regions, which are regions where AWS or some other, you know Vendor. Vendor may have, caching locations for, which which are much the smallest subset of machines that they that they control that in partner data centers or in what are called crosslink or interlinked locations where, you know, many vendors, have their servers and, you know, pass cables all over to each other. That's sort of how how the Internet works in that. There's a whole bunch of data centers where eventually cables interlink with each other's data centers. But vendors will have also service in these locations, and these locations are, you know, a lot pricier, a lot closer to the user, and also run much less much much more controlled environments.\u003C/p>\u003Cp>And these are these are these are what we actually refer to. This is what we really, really refer to when we talk about edge locations. We can be close to the user in terms of real locations also in terms of okay. Practically, you sort of want say, if you have a server in you e in US in the West Coast of the US in California, you would also really like a u a a server sort of in Europe ish to serve most of Europe. And then the next optimization is to put them super, super, super close to exactly where, you know, their ISP will interlink and hop over into AWS's domain.\u003C/p>\u003Cp>And that that right there is where we where we actually talk about edge compute.\u003C/p>\u003Cp>Speaker 0: Interesting.\u003C/p>\u003Cp>Speaker 1: So we're putting them beyond server farms going all the way down to kind of the interchanges. And that's all, yeah, we kind of we we put up with lesser resource at our disposal, because also we're sharing with a lot more users or, like, the servers have to serve, you know, many other users, things like that. And, also, because we're so close to the edge and we're doing many other things like fetching and, you know, sending things over the wire as we're streaming. We're we're we're constrained to much more, much faster response times. So Cloudflow, for example, I think you you have to do whatever you whatever compute you do on that edge has to be within, like, half half a second or something or or a second, and you can't do anything more than that, and you get instantly cut off.\u003C/p>\u003Cp>Speaker 0: That's fascinating. So this is really interesting because I thought edge computing was a concept that anyone could apply. It's just the idea that, hey. You know what? I'm gonna spin up resources on these different, you know, servers and do some complex, you know, routing of those.\u003C/p>\u003Cp>No. It really is provided by vendors who are kind of more at that backbone layer of infrastructure. And it's uniquely enabled by being really close to what you're calling these interchanges. That's really interesting and not something I quite understood because it's never a marketing material because it's it's really getting in the weeds.\u003C/p>\u003Cp>Speaker 1: Yep. Edge computing, unfortunately, is a very monopolistic type, very, type of operation because it's not anything that we can get involved with. I I cannot I cannot go and run around and just give people, just put some pies around in server files.\u003C/p>\u003Cp>Speaker 0: Just that. Also, nice sticker. That is a retro sticker.\u003C/p>\u003Cp>Speaker 1: Of mine. Yeah. Because there's there's even more stickers in Slack. I can't afford to go and put these servers around, really, really close to users because I do not own that infrastructure, and I would never have access to that infrastructure because you need a lot a lot of money to be put into these type of locations. Like, you need to be running a network.\u003C/p>\u003Cp>But what you were describing there for just a moment as well of, yeah, just being able to put services kind of vaguely close to users, that is distributed computing, which is different from edge computing. It's both that's an versions of distributed computing are also very important to our goals of of, serving content to users really, really quickly. And this should be a computing and edge computing kind of go a little bit hand in hand as well. Because in in this day and age, at least today, we do have, and you raised at at the head of the show as well, your talk about databases or or data as well. This day and age, we sort of have, an inkling of kind of like the early CDN days.\u003C/p>\u003Cp>We have an inkling of being able to put databases on the edge. But, again, because there's such limited sub resourced run times, it's very, very difficult. So the current generation of databases still lie on the distributed computing layer, which means we our our content can be really, really close to the user. And, hopefully, the content that that little edge computing box is requesting is is kind of like our users are over here, and then the the users of our database being the edge computing nodes, we wanna also be close to those users. So, we need to we need to both balance, where we put the user's content and also where you put the edge computing nodes content.\u003C/p>\u003Cp>Yeah. Yeah. And more often than that, you know, at the end of the day, a website is kind of putting together a whole bunch of database queries. Right? Most websites.\u003C/p>\u003Cp>So if if we can also put the data that is needed to construct that web page close to where it is being constructed, then we have further performance gain. If we can get it if we can get them collocated right next to each other, that's fantastic. And one of the arguments against edge computing always like and you've also mentioned it as well. Why go through all the hassle to need to have this, now we have less environment. Now we have we can't use all my nice libraries, and I have to I have to struggle with, you know, doing things in under 500 milliseconds and whatever.\u003C/p>\u003Cp>Why do I have to go through all these struggles when it's good enough if they can just access my already distributed you know, I've already put a node in Europe, and I've already put a node in the US. That's good enough. And it comes down to what is the quality that what is the quality experience that we are aiming for? Are we aiming for near native, or, is, you know, is good enough good enough? I am a firm believer in it's the world wide web, and it's not the US web.\u003C/p>\u003Cp>And there was a time, Guillermo Rauch, kind of pioneers a little bit now with with Vercel, and their efforts in in edge compute or or delivering good UX as well to developers for for engineering on these new new run times. I've read the tweet of him once of in the early days when he was setting up Vercel or, whatever it was called back in the day. Zite. Zite. Zite.zite.co.\u003C/p>\u003Cp>Whenever he's setting up zite.co and he first went to the s went first went to San Francisco, he's like, oh my god. The Internet's so fast over here because that's just where that's just where your your data center starts is in the US. And then good luck when you finally if you decide to put a server somewhere in Europe and we get access to some slightly faster websites. And And at the end of the day, we are talking slightly faster here and there in a lot of cases because our undersea cables between, you know, Europe and the US are pretty fast. If I was to put a data center in Oregon, and we can do we can do this this is one thing we can do.\u003C/p>\u003Cp>We could do a little, ping test as these sites that can ping across all the data centers. If you if if you do a ping test into Ohio, it's about 250 to 300 milliseconds of round trip time. Vaguely okay for most for most use cases. It depend\u003C/p>\u003Cp>Speaker 0: it depends how resource intensive your application is, I suppose. If you're starting to build something that's really, really bandwidth intensive, multimedia site, actually, it starts to matter a lot more.\u003C/p>\u003Cp>Speaker 1: Yep. Multimedia if just even just a normal website. That's that's that's kind of fine for Europe. Right? But, again, we're not the worldwide web of US and UK.\u003C/p>\u003Cp>We're the world wide web. And imagine having being in Australia or being in South Africa or being in India or being in China, and having 800 milliseconds to 1.2 seconds of round trip time. And that is round trip time meaning on literally anything that you do. So first, there's and in modern web applications, that is alright. First load HTML.\u003C/p>\u003Cp>1.2 seconds just to get the first request going with streaming HTML, then it hits some JavaScript that he has to go fetch. And that's another 1.2 seconds of doing that. And that's that's just the round trip time, not only you then have to be downloading through that entire time. So it's it really compounds the more that's going on. And when we're aiming for native like snappiness, the goal the goal is native like snappiness with edge.\u003C/p>\u003Cp>And if the the the point is we need to get things really, really close to the user for that initial load. Humans, there's a psychological, element to slowness or to to delivering fast experiences. And if we can show things quickly, it doesn't matter as much how long it actually takes to then deliver things as long as something's happening. There's actually an interesting UX that exists today in AI because, there's this new UX pattern now of streaming text Oh, yeah. Which we didn't have a year ago.\u003C/p>\u003Cp>Speaker 0: The the data physically doesn't exist when that when I start getting a response. It's yeah.\u003C/p>\u003Cp>Speaker 1: And you you you imagine if if if we didn't have that u that UX only serves the purpose of AI being too slow today. Imagine us having to wait a full 10 seconds for us to get the full kind of page back of the AI response. That it that that would be an extremely frustrating human UX experience. But just by the fact that it's actually coming in character by character, you think, oh, shit. That was really fast.\u003C/p>\u003Cp>Because you you actually you're reading you and you're consuming. And that is the UX that edge computing is trying to tackle of. And having having a risk response back instantly and having your content just there straight away.\u003C/p>\u003Cp>Speaker 0: Could I ask a question? When you were talking about the, the concern I had around the effort that needs to go in, the effort you described was all about writing performant code in a more, in a more limited environment. But I ask, what is the actual work involved in setting up edge applications beyond just writing code that will run? Do I have to manage where stuff is, or do I basically just, like, fling it up to a vendor and they take care of the of the distribution of that? Like, is there is there work required to physically make it edge code that will run on the edge beyond the environment and beyond the, the performance requirements?\u003C/p>\u003Cp>Speaker 1: There is a gradient. There there there is there is a gradient of of effort that can be spent here. And every sing every single day, there are new there is more and more tooling that makes it easier. As a baseline, anything you do in AWS will be hard because they are a platform. Right?\u003C/p>\u003Cp>They're they're just they they are they are infrastructure of a service. They're not the platform, and they're not the tooling. They are they have other services on top, like, you know, your, whatever they call their their stuff.\u003C/p>\u003Cp>Speaker 0: Amp Amplify.\u003C/p>\u003Cp>Speaker 1: Amplify. AWS Amplify are sort of some of those toolings that do distribute compute. Google Cloud, also, it's gonna be hard, but they have other services built on top. They have things like Cloud Spanner is their distributed database. Really, really fun if you read their white paper on on Cloud Spanner.\u003C/p>\u003Cp>It's it's a very googly white paper, and it's very very overcomplicated thing. And they talk about syncing satellites and time clocks in servers across the planet and how they use this network of satellites to have, like, microsecond time precision is very interesting read. Highly recommend. There's things like that. And there's Firebase on top, which also does distributed computing.\u003C/p>\u003Cp>And so there's, so there's frameworks built upon these things that that that make it easier. And ultimately, it comes down to it to frameworks or other platforms that make it easier as we go up the stack. Vercel also are a platform, but they make underneath you know, they they use AWS Lambdas. They use Cloudflare edge workers.\u003C/p>\u003Cp>Speaker 0: Mhmm.\u003C/p>\u003Cp>Speaker 1: And that's sort of where if if you've ever experienced working with Nest, which is, again, kind of another superset on top of Vercel, because it just kinda uses all of Vercel features. When you are working in a an edge nest route, you have a limited amount you have a subset of things that you are able to sort of do from from the from the platform Sure. Because it's running it's no longer running on a Lambda, which is a 4 Node JS instance. It's actually running on a Cloudflare Worker, which is that isolate that you just described. So they are tooling.\u003C/p>\u003Cp>There is tooling that makes it easier. And then in other realms, in databases or in CDNs, there's more tooling there as well. There's there there's such tools like, PlanetScale is becoming very popular these days with its easy distributed computing of or easy distributed computing of databases. And there are other things that are trying to actually put databases on the edge, and the current kind of front runner for that are, SQLite, which actually Cloudflare again, another pioneer in this area. KV, key value store, is a product that is built on SQLite.\u003C/p>\u003Cp>Dino, have a KV, a key value store. Again, kind of built not on SQLite directly, but a fork of it that does this distributed computing. A whole bunch of startups doing SQLite type distributed databases. Edge actually, edge database. Not only distributed, but actual edge\u003C/p>\u003Cp>Speaker 0: On those edge nodes. The how does syncing oh, sorry. What I'm basically hearing is vendors more or less will take care of the of the distributing of your assets, whatever those assets are across nodes that they make available. That's not something I necessarily need to do. If I don't care about edge, if I just care about distributed, it's something I could do with great pain.\u003C/p>\u003Cp>Yep. I or I imagine there's tooling still. But a lot of these vendors will just take care of that. That's part of the offering. What about syncing stuff?\u003C/p>\u003Cp>Surely, there's some trade off around, you know, even if it's the data store is what I'll call it over a database. The data store, the cloud, you know, the cloud functions that run the edge functions, and all the assets and stuff like that. How do they all stay in sync? Obviously, when I push an explicit update, I imagine there is an explicit, you know, propagation of that of the updates. But just day to day, I\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: I'm a user. I add an item to a data store that happens near me. How does it get to everything else? And is there a risk involved that things will start to fall out of sync?\u003C/p>\u003Cp>Speaker 1: Yep. Certainly. Now the for the KV stores, for the for the SQLite stores, I've really not read up enough about what's going on there. It's like mind blowing stuff that goes beyond my comprehension right now. So I have no idea how they sync because, also, SQLite is a binary data format.\u003C/p>\u003Cp>So it's it's mostly if anything, I believe it's kind of for, like, edge caching or some sort of be able to write small things. I don't I don't know how it gets back to your main server. Crazy crazy black magic over there.\u003C/p>\u003Cp>Speaker 0: Sorry. Can I pause you? Your main server. So, in all of this, there's still a main server running that\u003C/p>\u003Cp>Speaker 1: You could. You could. Or at least, you you you definitely could write you kind of have to flip how you how you develop. If you wanna be entirely edge, you certainly can. And a lot of use cases don't really fit into just being entirely edge.\u003C/p>\u003Cp>Mhmm. Even with, say, Vercel, for all their efforts in doing, distributed computing, or edge computing, when you actually log into your Vercel instance, there is a drop down that you could actually pick your main location that it that it puts. Interesting. It's, it still needs to run that Lambda somewhere in some AWS data center. So there's still a a a main place that you're actually doing real compute.\u003C/p>\u003Cp>You can definitely, if put with a lot of effort, build entirely edge, services, but there will be points at which you need to escape from that to build, to have the full, you know, a company of of real runtime. Sure. And any other any other service that you might need to inter interconnect with, like any, like, queuing system or whatever or and any any real app that is doing something more complicated than just serving a website is gonna need other resource that cannot be run on the edge. And, honestly, it doesn't necessarily even need to run on the edge because we can get you know, there there is, like, the liveness of the website and your interactivity. And then there may be other things that you need to do in terms of, you know, workers or sending batch emails or, you know, do doing other compute or, like, you know, on YouTube scale, like, you know, upload a video and you have to transcode it and all that.\u003C/p>\u003Cp>So there there there's there's always other things that you may need to do in a in a business to serve whatever use case that your business is serving, but the the, serving of a website can kinda be isolated down to, you know, this edge thing.\u003C/p>\u003Cp>Speaker 0: That's but that's fascinating. I never thought about that. Edge is like a is like one of the tools in a wider application. It's not I it's not application that's very traditional or perhaps distributed or whatever, but it runs properly. And then elements of this application are run on the edge that benefit from the, from the characteristics of running on the edge.\u003C/p>\u003Cp>That is something that has never quite I never knew that. That's really interesting. But is it fair to say that that core application has to coordinate the edge nodes? Is that what you're what you're thinking? Because you said because you were talking about stuff going back to your main application.\u003C/p>\u003Cp>Tangent.\u003C/p>\u003Cp>Speaker 1: You you you you could. And I and I think it's it's really clicking with you when, earlier when I was describing, you know, just delivering full page versus those small elements of it, and it's like and it's kind of exactly that. But in terms of coordination, you know, this this then gets into, you know, microservice distributed computing type deals of, like, okay. They they can be isolated in their own way and, more was more referring to when sending things back is kind of the data that the user may may have input. So user input or, you know, if they're chatting away, if they're interacting with if they purchased a ticket and writing that back to the source of truth that ultimately, there must be a source of truth somewhere.\u003C/p>\u003Cp>Ultimately, some database needs to know that, look, the tickets were a 100, and now they're 99 available. Someone needs to know and be written back to, and that's what I what I'm referring to when I say, okay. Eventually, get back to\u003C/p>\u003Cp>Speaker 0: Got it.\u003C/p>\u003Cp>Speaker 1: That that source.\u003C/p>\u003Cp>Speaker 0: Got it. Got it.\u003C/p>\u003Cp>Speaker 1: And there's many strategies that we can employ on that source of truth because we can we can have a distributed source of truth. Kinda this Cloud Spanner database from Google, which kinda came out, like, 8 years ago, and that that white paper was making its rounds in distributed computing land, was really, really exciting because it sold it was trying to solve the problem of multi node writers. It's a challenging databases today to have multiple, multiple writer nodes. So current current database technology relies on having a single writer with multiple read rep. So you can you can you can replicate readers all day long because it's easy to, and sort of the the whole theme of this conversation has been it's really easy to replicate, caching content, so replicate reading content, but it's very, very hard to replicate writing or updating content or Absolutely.\u003C/p>\u003Cp>Invalidating. So it's always been very easy to do low latency writes with low latency read replicas where, you know, you might have, you know, those, you know, basic basically, the the ping between 2 servers be your latency from from read write replicas. So you may say and it and depending on your use cases, it may be acceptable that, okay, Europe is, say, 500 milliseconds behind US. So if you were to go to a website, technically, you would be seeing 500 millisecond, stale content, and that may be acceptable. But it may in terms of fairness of the Internet or the World Wide Web and not the UK, US Web, It's unfair to set to always put an advantage on, your your geological, position.\u003C/p>\u003Cp>So, technically, or, and I think that there there was an article once upon a time around when the Cloudspana or one of the Cloudspana customers originally was Ticketmaster. And their particular use case was this sort of fairness topic of well, okay. Australia will always be 1.2 seconds behind everybody. So if they go to book a ticket and someone literally just, you know, 1.2 seconds closer to the server clicks it, they will always win. So it's unfair to put people at this kind of dispute disadvantage.\u003C/p>\u003Cp>So we're trying to build technologies that distribute these rights more evenly, and you can get kind of more fair distribution of of updates.\u003C/p>\u003Cp>Speaker 0: Yes. Ticketmaster, the bastion of fairness. Sorry. Please continue.\u003C/p>\u003Cp>Speaker 1: Well, in in in reality, what they actually want is, you know, more they can cash less and not have this this, queuing system, which which leads to more clicks, more purchases. Right? That's that's that's the end goal. That's the end goal with all of this. There's well documented stats about how Google goes about about web performance on the web.\u003C/p>\u003Cp>And, you know, if you're x milliseconds slower than a competitor, you know, you're more likely to drop off and just go to the next site that is faster. So while us as engineers, we wanna over complicate our life and do fun complicated engineering things, Us as business people, we wanna deliver things fast because it yields greater returns and is a better native experience and will yield happier users. That's ultimately the goal that we're trying to trying to solve, while also doing it more cheaply, hopefully.\u003C/p>\u003Cp>Speaker 0: Or at least over time. So a question for you then. In the current state of edge computing, what are people actually doing on\u003C/p>\u003Cp>Speaker 1: the edge beyond what, you know, what was once possible with, you know, just CDNs? What people what the most trendy thing now is we are rendering HTML on the edge. We are rendering websites closer to users. And what I mean by rendering websites is we are rendering more interactive content next to the user. So no longer are we constructing the whole webs web page, you know, 800 milliseconds away from the user and then delivering that over the wire.\u003C/p>\u003Cp>We are constructing it closer, meaning all the other subsequent calls that we might have to make to other services, are hopefully also calling services also closer to that edge region. So it's it's more and more, companies are putting more and more of their own services in more locations. Sure. So say, let's take an example of I wanna construct a web pay a Shopify web page. Shopify have a fantastic global API network.\u003C/p>\u003Cp>I argue that actually Shopify have the most distributed database on the market because actually, you know, when you update something in Shopify, it's already distributed to their network of of of re replicas around the world. If I was to construct the data there, and then, okay, I also call out to another service that I control, or I also call out to, like, another partner API. If we if we request that, make a request there, and it goes 800 milliseconds to wherever it was being constructed, and then that that does its course and it comes 800 milliseconds back, that's, you know, 600 a 1.6 second round trip or whatever. If we take that request and say put it right next to the user and go, okay. As soon as it comes in, I'm instantly streaming back.\u003C/p>\u003Cp>So it's close to the user, say, 20 milliseconds away. Generally, edge location is a sub 100 millisecond close to the user.\u003C/p>\u003Cp>Speaker 0: Good to know.\u003C/p>\u003Cp>Speaker 1: So they they hit it. Instantly, they get back, you know, they white page with loading things, and it goes, okay. Slows this. Okay. Now I need to grab some stuff from Shopify.\u003C/p>\u003Cp>I've done a request to Shopify back and forth. Hopefully, they're also at edge location, 20 milliseconds back and forth on my edge. I've got my Shopify content. Oh, I'm doing an AB test. I've hit my AB test service.com, and I've got back the result of what needs to happen.\u003C/p>\u003Cp>And, okay, I've loaded that content. And it's this it's this dynamicness that we wanna put closer and closer to the user because if we were\u003C/p>\u003Cp>Speaker 0: to And it and it compounds. Yep. All of these 800 millisecond round trips are grueling, but a bunch of 20. I mean, of course, you still wanna be mindful of how many you're doing, but that's the same in any web application. But we're talking orders of magnitude quicker realistically when you're loading any modern web application.\u003C/p>\u003Cp>Speaker 1: Exactly. Yep. So it's it's it's more it's more of this dynamicness because we could totally just off cache that entire page and delivered it to all the users equally. But what if one one user is logged in, one user has a basket where they, that that that we wanna also display and and render on the server? We could totally render it on the client, and this is where also this new modern trend of, you know, it was very it was perfectly fine to deliver entire huge bundle to the user and it be interactive and do API calls on the client side because the client side is also an edge location.\u003C/p>\u003Cp>Sure. But we pay for the upfront cost of them receiving that bundle. So if we can\u003C/p>\u003Cp>Speaker 0: That's interesting. Yep. So it's that trade off still of speed. We need to deliver a a reasonable experience as quickly as possible. Then, ideally, we want all subsequent requests to be as speedy as possible.\u003C/p>\u003Cp>But if, you know, if this is where we're starting to pay a little bit, that can be more acceptable dependent on user and kind of, developer experience or, like, you know, vendor experience requirements. Exactly. That's really interesting. So it's just this trade off of of requirements. I feel like this, you know, edge computing starting to gain some traction is bringing a whole new realm of, I don't know what to call that requirement gathering, like, trade off decisions for developers who didn't need to care as much about infrastructure before.\u003C/p>\u003Cp>And now suddenly, the same way front end developers now have to do a bunch of back end work with these kind of mixed hybrid frameworks that run everywhere. I feel like in the in the same way, anyone who does back end work is now having to care about infrastructure in a way where perhaps the the expectations in the past were less than. Yep. And then it's the job of these vendors who who run edge nodes to make that experience as contain as little pain as possible.\u003C/p>\u003Cp>Speaker 1: I think I think you've hit the nail right right on the head there is, we we've we've this this trend is the flip side of is is the next evolution of SPAs, of single page web apps. So that was our edge in the past, and that was our limps limited subset runtime. We could only run JavaScript, then we could only run web JavaScript in that runtime. And, what we're having now is that it's a limited run time, yes, but there's we have more languages, more things that we can run closer to the user, and it's we're delivering them. We're not using the user's resource.\u003C/p>\u003Cp>Right? So a a lot of SPAs or a lot of, mid 2000 in the Internet relied on, you know, fast devices or, you know, doing a lot of read writing of, you know, the DOM tree and all this, and then lower, lower quality devices with less CPU, like dumb phones or or or or basic Android devices had, you know, a worse experience on the web. And what we're trying to deliver is by doing doing the complexities of the HTML rendering and whatnot close to the user and delivering them, you know, the content that just needs to be rendered and device can become dumber again, and need to do less, you know, JavaScript execution on the client. Because, really, all we all we actually want at the end of the day is to deliver that HTML to the user. And that liveness, that interactivity used to come from or used to only be able to come from SPAs, client side JavaScript.\u003C/p>\u003Cp>And what we're seeing today is that we're moving more and more of that client JavaScript onto the server, but also quite close to the user to be kind of that native like experience.\u003C/p>\u003Cp>Speaker 0: And that and they're the parts they're the parts. It is we want to make the users do as little computation as possible while not, suffering based on their location. The fact that a load of that computation is going to happen elsewhere. And it's that specific pairing of of requirements, that makes edge com that makes distributed computing more powerful and even more so edge computing because they run right at those interchanges. That's that's great.\u003C/p>\u003Cp>Now, for some of the other episodes of, learning things I love to hate, we went into a demo, but I feel like we've covered a lot here. I don't necessarily think we need to we need to do it so much. Plus, it's a really abstract concept where, you know, I'm sure there are ways, but it isn't gonna be now it's running on the edge, and we can really show that to its fullest. But I think I understand a little more why why edge computing is interesting now. Some more or less basic concepts about how it works when it also might not always be right.\u003C/p>\u003Cp>And this new idea that, oh, no. It's just parts of your application you can delegate to the edge. Again, not something that is spoken about by the hype machine very often.\u003C/p>\u003Cp>Speaker 1: Yep. It's just edge edge\u003C/p>\u003Cp>Speaker 0: edge edge all the time.\u003C/p>\u003Cp>Speaker 1: Just parts. Just parts. Right? And we we we have the we have the same mistaken view of serverless computing, in the past where it was like, okay. Now everything must be serverless.\u003C/p>\u003Cp>Mhmm. No. It's a subset of use cases that are that that benefit from it, and that's the it's the same with the edge. It's not we don't wanna put we don't need to put the entire server close to the user. That's impractical.\u003C/p>\u003Cp>But elements of it definitely can. And, and, yeah, and I think the the the the the reason it's so abstract in this way is it's hard for us to experience what other people are experiencing when we're in our, you know, our our nice modern MacBook on gigabit Internet in countries with good Internet. So it's it's hard to put us put ourselves in those shoes. And, the way we used to do that with SPAs and stuff is literally have a crappy old Android phone for dev that you would, you know, every so often look at and use. And we can replicate that if try try the web on a, on a VPN every so often.\u003C/p>\u003Cp>Or if you're in a foreign country, try going to some of your favorite websites and just seeing how much slower they might feel. Because, yeah, it's it's\u003C/p>\u003Cp>Speaker 0: conjured up a memory of a it's slightly off topic, but you've just conjured up a memory of in London, Mozilla used to have an open community space, and in that, they had what they called the Mozilla Device Lab. I imagine there\u003C/p>\u003Cp>Speaker 1: were a few of them, and\u003C/p>\u003Cp>Speaker 0: it was just a bunch of different devices. Or, like, they run they run browsers which are, you know, really, really, really limited with this exact idea that you could be testing on all these different devices. But, yeah, now we need to consider and I suppose that's the other part here. What edge computing enables is less computation happening on a user device, so it's lowering the needs on the user device, and it is making that computation closer. So we're kinda leveling the playing field.\u003C/p>\u003Cp>It's this fairness, as you mentioned earlier, of you don't need the best hardware, and it doesn't necessarily matter where in the world you are. Both of those factors are handled by this new emergent technology.\u003C/p>\u003Cp>Speaker 1: Exactly. Exactly. And it's it's it's all it's all right as rain for us to talk about fairness, but at the end of the day is we want to have users around the world because we're hitting we're hitting more ability for us to sell to more users. And at the end of the day, that's really what we're trying to solve with with edge computing because get more and more users with more and more happy users because we can have users that are unhappy, but, really, we wanna have more and more happy users.\u003C/p>\u003Cp>Speaker 0: Yeah. Awesome. Thank you so much for joining me. This has been really, really interesting. I'm loving filming this series.\u003C/p>\u003Cp>I'm learning tons. And hopefully people who have joined us for the ride also, are learning loads. Just before we go, is there anything else you wanna share, point people to where to find you?\u003C/p>\u003Cp>Speaker 1: Sure. I mean, if you want if you wanna see more of, like, crazy drawings on my Twitter, I'm I'm at Pandeliszed, p a n d e l I s, zed, on Twitter. It's it's funny when I was drawing that in the office because we we kinda sit in a bit of a semicircle, and my colleagues turned around and were like, what? How are you doing? I'm like, oh, just drawing.\u003C/p>\u003Cp>Just drawing some stick figures. I was like, okay. Fine. But, yep, that's that's that's where my normal antics are or on Twitter. And, and yeah.\u003C/p>\u003Cp>No. I'm so so happy, that you had me, Kevin, and it was, I I saw your brain clicking at the end there. So I'm so glad that I could I could share that with you, and, hopefully, it's it's no longer anything that you that you hate.\u003C/p>\u003Cp>Speaker 0: It is not something that I hate. I have, a newfound like other episodes, a newfound appreciation and more importantly, an understanding of the qualities. Right? Which mean that it may be something I reach for when appropriate. Whereas before, I just lacked the knowledge.\u003C/p>\u003Cp>So I was never gonna reach for it. And, you know, in time, that could lead to me building things that aren't as good, as they could be. So, yeah, thank you ever so much. Thank you to everyone who has joined us, and, we will see you in another episode of learning things I love to hate. Bye.\u003C/p>","Hello, and welcome to learning things I love to hate. This is a show where I basically stop putting off learning things that I have avoided for one reason or another, inviting in my friends and experts around the topics that I'm trying to learn. And today's topic is edge computing, a topic which has got a little spicy in web circles in the last few weeks. I'm inviting my lovely dear friend, Pandellus, here today. Pandellus, would you like to introduce yourself? Yeah. Hi, Kevin. Nice to see you again. Yeah. I'm I'm Pandellus. I've been an engineer for the last few years working in web, working in in deploying a lot of the things that I claim to write. I've worked in web agencies where delivering content to users was super, super important. Back in the jQuery CDN days where when edge computing was basically only CDNs into kind of our modern edge computing world now where we can put entire runtimes, entire databases onto the edge. There's all things I've experimented with at various companies. So I I'm so happy when I saw your tweet. It's something I've been experimenting with a lot. And, as someone as a user of the web, I think it's something super important for for everyone to to to to to know about and experiment with. Awesome. And, and yeah. So I tweeted out a few days before this recording. Can anyone just, like, explain edge computing to me? Because I really don't understand it. And you came in with a wonderful set of illustrations, which actually will just edit in here right now. These are the illustrations, to kind of explain a little bit about how Edge works. So I thought, hey, I'm just gonna invite you onto this show. This was already a topic I was planning on talking about. You preluded, I suppose, some of the things I wanted to discuss today. So just to just to open with this, I wanted to explain kind of where my knowledge is now, and hope that you might be able to take me and anyone watching who's in a similar spot through that journey of understanding more. And perhaps if we have some time at the end, we can do a little demo of this abstract concept. So what I know of edge computing, I think, really is around content delivery network, CDNs. We can maybe start a little bit about talking about this. And I know that the concept of what can be stored, you know, and replicated in multiple regions and serve to people based on where they are in the world has just grown more complex. We we're doing that with more things. And I suppose my biggest concern and the reason I haven't really done it yet is, 1, the idea that, you somehow need to replicate things really rapidly in lots of regions. And 2, unrelated, it just seems really complex, and I'm not sure how how much the complexity is worth, what might be a payoff that doesn't matter. I know there are some performance hardcore people who are like, of course, it matters. But, yeah, that's kind of my starting point. So maybe if you take us back, talk a little bit about CDNs and what they are and kinda how that's evolved to where we are now, that would be really good. Yeah. Of course. Yeah. No. I mean, as engineers, we like to make our lives harder for sure. It's it's it's we we we do sometimes like to like to overengineer, and it and it's all as with everything in engineering, there's a million ways to do them. Not all of them are right, and a lot of them are right. So your mileage may vary with your needs of edge whatever. A lot of people don't even need the CDN, but it comes down to your users. Right? It always comes down to your users and the experience that you're trying to deliver. So let's let's start with CDNs, and what is the problem that that that CDNs solve? So if we go back to the mid 2000, when jQuery and, you know, serving libraries over the wire were were were all their age, we would, you know, blindly go to, CDN stack or whatever those CDN websites were and grab URLs for for jQuery and put them in our sites. Before build systems, before your web packs, before the the the modern JavaScript ecosystem back in days, CDNs were all their age because we wanted to deliver large JavaScript bundles to the user quickly. Internet was slower, and still is slow. Or this is h t p one days, right, before streaming h before multi, you know, multiple h t p two streams, and all that type of stuff. So it was very important or we people saw a lot of gain in having these large JavaScript bundles close to the user, meaning the end users could consume that JavaScript faster. And in the end, what that means is when they get their HTML through, it becomes interactive faster. Could I ask you a question? Could I pause it? So when you're saying near to where people are, just to clarify, we mean that this one jQuery bundle or whatever is stored on multiple servers throughout the world. I request it via one URL, and then the application which will serve up that file will find the place nearest to where I'm making the request and serve it. And then the additional benefit of CDNs, I suppose, is if I visit 10 sites that all are trying to load the same large bundle, this might be more CDN specific, and I think it also might be a relic of the past, but it it would not It is a relic of the past. That's that's that's one of the topics I was gonna touch on. In in the early days, it was when there was less CDNs around, we we could make the ex we could make the excuse that, okay, by using a shared CDN or, say, a common example was also Google Fonts. Mhmm. By using Google Fonts, by using a common CDN, it means that there's a higher likelihood that a user would already have those assets on their computer. Cached. Okay. Exactly. So it's a further cache. Ultimately, the the most edge location is the user's computer. So caching is always a tiering system and CDNs are a type of cache, and the ultimate caching is local. When you have a local cache, that's that's the ultimate edge. And really with edge compute, with edge with with the topic of delivering on the edge, what we're trying to achieve is native like performance, and the most edge you can get is being on device. So, arguably, the the most edge computing delivery method today is an app. Apps are on your device, and there's no more edge than the the the device that you're touching. And everything that we're doing on the web or or in edge compute land is to try and achieve native like performance. Cool. That makes sense. So, yeah, those are those are sort of CDNs, and that is kind of the the most basic form of putting things on on the edge that we had. So that and that was, and there was a legitimate benefit to to to to CDNs. And what we've tried to do in or what has happened in in the modern era is CDNs themselves have become more and more capable, to the point where they can actually run and execute code. 2 of the most popular and, CDNs around are are CloudFront from AWS and, Cloudflare. And Cloudflare are one of the pioneers of of edge computing as well, and their CDN offering just over time naturally has become more has gained features, has had feature creep, and eventually, we wanted to do more complicated things over time. Right? We we may wanna deliver a slightly different version of a bundle for certain users. Or if it gets hit with a with a certain URL parameter, we might wanna bust the cache or or run some sort of, run something on that server that's delivering it, maybe do an AB type test, where certain users got certain bundles or whatnot. So at some point transformations as well? File transformations became yep. Image image CDNs. So moving from kind of just JS bundles, image CDNs are, again, super popular because, on super useful because, the the 2 largest assets that get delivered are the bundle and the images. And images also very bandwidth intensive and benefit largely from being closer to the user. Sure. And we're gonna we're gonna say closer to the user a lot throughout this conversation. And what that what that practically means is that you are spending less time in flight over cables. Your server could be very far away. And the further away it is, the longer it will take to navigate, you know, the the, the the sea of cables that live under the sea, to get to your user. So the closer you can be, the less hops we can make, the the the the the better for for the end user, but also for our bank, CDNs, specifically, not necessarily edge compute, but CDNs, specifically, also benefit benefit us in that we send less data out because it's also a caching mechanism. Edge compute, not so much when we when we get when we get down to it, because one of those goals is to have as fresh content as possible. So when you're when you're when you're caching, you're limited by the freshness that you can deliver. So that that's that's sort of that that's sort of the niche that CDNs sold for a while. And, yeah, more and more doing more and more things on the edge or closer to the user or doing more compute at the CDN level means that we could do these kind of transformation things that you just mentioned with images. Right? So say I wanted an image 30% smaller, we could deliver 1 all the way back from our original server, or we could use the kind of already existing cached asset and do some computation on it and then also cache that. And that's this kind of level of level of tiering that that that being closer to the user kind of offers us is is we don't wanna keep hitting our main server, and wanna keep delivering faster things to the user because, yeah, it's all about speed at the end with with with the goal of edge. Cool. So now we're in a space where it's not just about storing assets, whether that's bundles, images, you know, media, whatever. It's actually about running computational tasks at the edge. So I suppose, naturally, that moves us on to maybe what what is edge compute edge computing? What is it capable of now? And then I suppose we're going back to my initial concerns or skepticisms. 1, how do you keep all of these nodes? I don't know if they're called nodes. I'm assuming they're called nodes in in sync, especially when you gave an example of, you know, doing transformations at the edge and then caching that. Well, is the cache just at a node level? Do they propagate throughout a network? Stuff like this, I'm not too sure about. And then it sounds really complex. How how does this actually shake out to being something that's tolerable to developers? Yeah. Edge and the and this this edge cache or caching cash busting, in CDN land, it has always been a pain, especially in the days where I was working as at a web agency. So, basically, all all all that we deliver over the wire is is assets and and JavaScript and images, and it was definitely always always a battle of, okay, well, what is actually being delivered to the user and cache busting it and and and figuring out, oh, okay. Oh, they're on an older version of the of the site, and they're seeing old content and it and us getting rang up by our clients being like, the other site that we just published it. It's not gone live, and that's having to explain. Okay. You're gonna wait for the cache to propagate. It's it's gonna it's gonna get to the your user eventually. No. But we've we've launched this because, some context here. I used to work in, theater websites, where we had very peak peaky demand spiky demand and very, and certain requirements around, you know, if a certain show went live, they want it live now so that people can book it now. Which is not an unreasonable expectation if I hit publish that I mean, that's even the thing with, like, static website building. There's always a build period, and some people don't get that that build period, you know, is can be quite material. Yep. And that is also that's a huge challenge now with edge compute as well, but and also at the time with CDNs as well because we are we're still trying to we're managing demand, and we're managing speed. And the most fast asset we can deliver to someone is something that's cashed. So there's always a challenge on how you distribute or revalidate that cash. So in the CDA, it's so it's definitely I'm not gonna say it's anything that that's that's easy or perfect. It's a challenge at every level no matter how simple, even at the simplicity of image and and bundle cache, but also to the level of, of full HTML page cache. Right? So during during go during, say, ticket sale go lives or or certain events, we may sort of cache entire web pages, and that becomes incredibly complicated to deliver to many, many users because we then have the challenge of say, okay. What happens when the tickets now run out? And everyone should be seeing a, you know, sold out banner. And, depending on where you last were when that cache hit that node Yeah. And the the the way the way cash is generally work is, you know, it's a pass through cache, meaning, the CDN is the thing that will ask your server, make the request to your server for content, receive it, stream it back to the user, and also save a version on on that server. So depending on who access it at what time, there will be a cache with a certain time to live of 5, 15 minutes a minute. So meaning someone accessed it at some point and has a version maybe 5 minutes old that is sitting there. So and that and, using DNS, using domain name resolution, Based on where you're coming from, it will pick that server and that cache from that server. So depending on where you are around the world, if no one's hit it in a in a few minutes, you may get stale content. Stale content, yes, huge challenge. And publishing new content, also a huge challenge because you will spike server resource, meaning it's you don't want to always necessarily be be live, because being live means you are sending a lot of data out. Sure. Sure. It's one thing that we was was common to do is or is like, okay. If I update, you know, if I update this article in WordPress or update this article in in whatever c CMS that I'm using to trigger like a full invalidation. Right? Like Directus. Thank you. I have used directors at at an organization as well. Don't be worried. In in in in in whatever CMS that you are using to just have, like, a blanket, like, asterisk invalidate everything. Yeah. Yeah. Which is a perfectly valid strategy for keeping things up to date if that's, you know, your your, your your your requirement. Yeah. Your But there and there's there's also the requirement then of, well, keeping things up and keeping things, keeping things efficient as well. C CDNs and edge computing are also about efficiencies. We talked a bit about sending requests to your server and that caching level being also a protection mechanism of you not actually hitting your server all of the time. So, yes, it's very complicated, and it's kind of still on the engineer's side to figure that out. There are better frameworks to help with this. And as we get closer to real edge computing, there's less whole page caching that we have to do. And this is where we we I kind of wanna transition into well, okay. Well well, why? Well, why edge compute? Edge compute means we can run more things closer to the user, meaning, hopefully, we need to cache less. And if we cache less, we could be more live or or we could be we we can have, less individual things that we cash, and the broader interactivity of the website or smaller subsections of it can be more individually individually controlled as to what level of, of staleness we accept within a web. But it's that trade off, isn't it, against, the or, you know, having non stale content ultimately delivered to users and the efficiency gained by being close to them. Exactly. That's it's that's the that's the balance that we're trying to strike. And, really, CDN caching, and at the level of caching whole HTML pages is kind of like is, you know, is the nuclear option. Right? If you can afford to have if if you're a a, you know, a news site, a very, very static type content website where you can you can do things over JavaScript on the client, or, the the content that you're delivering is is very static, then you can afford whole page caching. But as as we get close as we get nearer the graph of, right, highly static versus highly interactive or highly, like, live content, like a ticketing experience where you before you go through the booking experience, you need to know you kind of wanna know, okay. Am I going into this? Am I gonna have to make it a whole account and then be told, oh, by the way, it's sold out as it is. Or even or even more, like, personalized experiences like a social media network where we're not gonna full page cache every one of the profiles and every post and every comment that exists. No. No. No. So there's some element of we need to get fresh fresh data as part of this load. Exactly. I mean, how many times are you, you know, just scrolling scrolling up to the top to get your fresh tweets all the time. Right? It's it's, there's a certain amount of stillness a user will accept, and there's also a certain amount of stillness that, the us as engineers or us as whatever company entity that we are will accept with with end users. And Yeah. And, and that's that's that's the balance that we're trying to strike. And with edge computing, we are trying to deliver more things or cash listings, and that's achievable in how's that? How's how how do you describe this now? It's a very challenging to describe because we already have compute in in one location. And, moving it closer, just moving it doesn't fulfill, like, kind of the whole stack of requirements that we have, which is we need to deliver quickly. We need to be efficient with our resource. We don't wanna just, you know, put things on the edge for the sake of it being on the edge and it costing an arm and a leg, which is one of the concerns you sort of raised as well, which is a a a perfectly normal, you know, objection to this. I didn't even think about the cost. I was thinking purely complexity. But, yeah, of course, there's cost. You're replicating nodes, basically. And the more that happens on those nodes, the more expensive it is. I'd not even considered that, but, of course, that's part of it. Yep. And a lot of the the a lot of the current generation, edge compute are serverless pricing models, meaning you are paying for the, there's a there's a couple of pricing models in in this. It's called wall clock time and, and CPU time. One being CPUs, the actual CPU cycles that you consume, meaning you have to write also efficient code to execute. I'll do. Otherwise, if you're writing code that is doing weird thing, and there's all sorts of limitations, that exist in terms of what you can do because the run times are also different. When we have a real server, we have any run time that we want. We have any c libraries that we want. We have any language that we want. And a lot of the modern, edge compute locations or edge compute run times because they're also designed to run very, very fast on and not in necessarily, I'll say in air quotes, real service like, real they're not actually real servers. They are like a subset of the resources that a that these compute platforms have. So so, again, a couple of the popular ones I mentioned, CloudFlare and and CloudFront. Cloudfront is fronted by, Lambders, but not real Lambders. They're actually a subset of the Lambda runtime, that can only run certain limited amount of JavaScript. What? In an effort to be less computationally expensive? That, but also they've developed from this need of okay. I just need to transform an image very slightly. And over time, we've just kind of put trying to put more and more things in it. Cloudflare is is is a bit more is a lot more advanced. And also the CloudFront one, since I've used it back then, it has got a lot more complicated as well and has gained a lot more features. CloudFront have again, it's a limited runtime. It's not node. It is a node compatible runtime, but you don't have access to say every single node library or you don't have access maybe even to the entire node modules, or the entire NPM registry. Oh, is this, is this isolates where it's like pure it's a pure JavaScript runtime with, like, some exposed additional functionality? Because we use those inside of directors, inside of our automation builder. But I didn't know that a lot of the JavaScript creature comforts, things even like console log, things like set time out and things like this, they're not they're not part of Node JS. They're not part of, sorry, the core JavaScript spec. They're part of Node and the browser implementation of JavaScript. And you take for granted when these are the 2 really predominant runtimes that we use. So maybe it's something like that where they're using these isolate, environments with whatever additional functionality is exposed by the vendor. Yep. Yep. I I think isolates her was inspired by some of the some of the work that cloud flooded or or may even be a direct, a direct thing from from from Cloudflare. Yeah. Is it a direct descendant? But, but, yeah, it's it's exactly that of of we're no longer in the browser. We're very much on a highly efficient, sub resource, that exists far out on on the edges of of these compute regions. And if we if we were to bring up a map, and I'll share share some assets after as well of we we know the general, you know, EU west 2 being London and, US east 2 from AWS being, you know, in Ohio somewhere and your EU West 1 or your or Google's, you know, EU Central 1 in Norway. These are all locations that they have massive, massive, huge data centers with further sub availability zones being, you know, eu west 1ab, e u west 1 c, etcetera. So huge, huge, you know, warehouse sized data centers with tons and tons of servers that have tons and tons of services on them. When you additionally look at the map of of, what are the cloud front regions, which are regions where AWS or some other, you know Vendor. Vendor may have, caching locations for, which which are much the smallest subset of machines that they that they control that in partner data centers or in what are called crosslink or interlinked locations where, you know, many vendors, have their servers and, you know, pass cables all over to each other. That's sort of how how the Internet works in that. There's a whole bunch of data centers where eventually cables interlink with each other's data centers. But vendors will have also service in these locations, and these locations are, you know, a lot pricier, a lot closer to the user, and also run much less much much more controlled environments. And these are these are these are what we actually refer to. This is what we really, really refer to when we talk about edge locations. We can be close to the user in terms of real locations also in terms of okay. Practically, you sort of want say, if you have a server in you e in US in the West Coast of the US in California, you would also really like a u a a server sort of in Europe ish to serve most of Europe. And then the next optimization is to put them super, super, super close to exactly where, you know, their ISP will interlink and hop over into AWS's domain. And that that right there is where we where we actually talk about edge compute. Interesting. So we're putting them beyond server farms going all the way down to kind of the interchanges. And that's all, yeah, we kind of we we put up with lesser resource at our disposal, because also we're sharing with a lot more users or, like, the servers have to serve, you know, many other users, things like that. And, also, because we're so close to the edge and we're doing many other things like fetching and, you know, sending things over the wire as we're streaming. We're we're we're constrained to much more, much faster response times. So Cloudflow, for example, I think you you have to do whatever you whatever compute you do on that edge has to be within, like, half half a second or something or or a second, and you can't do anything more than that, and you get instantly cut off. That's fascinating. So this is really interesting because I thought edge computing was a concept that anyone could apply. It's just the idea that, hey. You know what? I'm gonna spin up resources on these different, you know, servers and do some complex, you know, routing of those. No. It really is provided by vendors who are kind of more at that backbone layer of infrastructure. And it's uniquely enabled by being really close to what you're calling these interchanges. That's really interesting and not something I quite understood because it's never a marketing material because it's it's really getting in the weeds. Yep. Edge computing, unfortunately, is a very monopolistic type, very, type of operation because it's not anything that we can get involved with. I I cannot I cannot go and run around and just give people, just put some pies around in server files. Just that. Also, nice sticker. That is a retro sticker. Of mine. Yeah. Because there's there's even more stickers in Slack. I can't afford to go and put these servers around, really, really close to users because I do not own that infrastructure, and I would never have access to that infrastructure because you need a lot a lot of money to be put into these type of locations. Like, you need to be running a network. But what you were describing there for just a moment as well of, yeah, just being able to put services kind of vaguely close to users, that is distributed computing, which is different from edge computing. It's both that's an versions of distributed computing are also very important to our goals of of, serving content to users really, really quickly. And this should be a computing and edge computing kind of go a little bit hand in hand as well. Because in in this day and age, at least today, we do have, and you raised at at the head of the show as well, your talk about databases or or data as well. This day and age, we sort of have, an inkling of kind of like the early CDN days. We have an inkling of being able to put databases on the edge. But, again, because there's such limited sub resourced run times, it's very, very difficult. So the current generation of databases still lie on the distributed computing layer, which means we our our content can be really, really close to the user. And, hopefully, the content that that little edge computing box is requesting is is kind of like our users are over here, and then the the users of our database being the edge computing nodes, we wanna also be close to those users. So, we need to we need to both balance, where we put the user's content and also where you put the edge computing nodes content. Yeah. Yeah. And more often than that, you know, at the end of the day, a website is kind of putting together a whole bunch of database queries. Right? Most websites. So if if we can also put the data that is needed to construct that web page close to where it is being constructed, then we have further performance gain. If we can get it if we can get them collocated right next to each other, that's fantastic. And one of the arguments against edge computing always like and you've also mentioned it as well. Why go through all the hassle to need to have this, now we have less environment. Now we have we can't use all my nice libraries, and I have to I have to struggle with, you know, doing things in under 500 milliseconds and whatever. Why do I have to go through all these struggles when it's good enough if they can just access my already distributed you know, I've already put a node in Europe, and I've already put a node in the US. That's good enough. And it comes down to what is the quality that what is the quality experience that we are aiming for? Are we aiming for near native, or, is, you know, is good enough good enough? I am a firm believer in it's the world wide web, and it's not the US web. And there was a time, Guillermo Rauch, kind of pioneers a little bit now with with Vercel, and their efforts in in edge compute or or delivering good UX as well to developers for for engineering on these new new run times. I've read the tweet of him once of in the early days when he was setting up Vercel or, whatever it was called back in the day. Zite. Zite. Zite.zite.co. Whenever he's setting up zite.co and he first went to the s went first went to San Francisco, he's like, oh my god. The Internet's so fast over here because that's just where that's just where your your data center starts is in the US. And then good luck when you finally if you decide to put a server somewhere in Europe and we get access to some slightly faster websites. And And at the end of the day, we are talking slightly faster here and there in a lot of cases because our undersea cables between, you know, Europe and the US are pretty fast. If I was to put a data center in Oregon, and we can do we can do this this is one thing we can do. We could do a little, ping test as these sites that can ping across all the data centers. If you if if you do a ping test into Ohio, it's about 250 to 300 milliseconds of round trip time. Vaguely okay for most for most use cases. It depend it depends how resource intensive your application is, I suppose. If you're starting to build something that's really, really bandwidth intensive, multimedia site, actually, it starts to matter a lot more. Yep. Multimedia if just even just a normal website. That's that's that's kind of fine for Europe. Right? But, again, we're not the worldwide web of US and UK. We're the world wide web. And imagine having being in Australia or being in South Africa or being in India or being in China, and having 800 milliseconds to 1.2 seconds of round trip time. And that is round trip time meaning on literally anything that you do. So first, there's and in modern web applications, that is alright. First load HTML. 1.2 seconds just to get the first request going with streaming HTML, then it hits some JavaScript that he has to go fetch. And that's another 1.2 seconds of doing that. And that's that's just the round trip time, not only you then have to be downloading through that entire time. So it's it really compounds the more that's going on. And when we're aiming for native like snappiness, the goal the goal is native like snappiness with edge. And if the the the point is we need to get things really, really close to the user for that initial load. Humans, there's a psychological, element to slowness or to to delivering fast experiences. And if we can show things quickly, it doesn't matter as much how long it actually takes to then deliver things as long as something's happening. There's actually an interesting UX that exists today in AI because, there's this new UX pattern now of streaming text Oh, yeah. Which we didn't have a year ago. The the data physically doesn't exist when that when I start getting a response. It's yeah. And you you you imagine if if if we didn't have that u that UX only serves the purpose of AI being too slow today. Imagine us having to wait a full 10 seconds for us to get the full kind of page back of the AI response. That it that that would be an extremely frustrating human UX experience. But just by the fact that it's actually coming in character by character, you think, oh, shit. That was really fast. Because you you actually you're reading you and you're consuming. And that is the UX that edge computing is trying to tackle of. And having having a risk response back instantly and having your content just there straight away. Could I ask a question? When you were talking about the, the concern I had around the effort that needs to go in, the effort you described was all about writing performant code in a more, in a more limited environment. But I ask, what is the actual work involved in setting up edge applications beyond just writing code that will run? Do I have to manage where stuff is, or do I basically just, like, fling it up to a vendor and they take care of the of the distribution of that? Like, is there is there work required to physically make it edge code that will run on the edge beyond the environment and beyond the, the performance requirements? There is a gradient. There there there is there is a gradient of of effort that can be spent here. And every sing every single day, there are new there is more and more tooling that makes it easier. As a baseline, anything you do in AWS will be hard because they are a platform. Right? They're they're just they they are they are infrastructure of a service. They're not the platform, and they're not the tooling. They are they have other services on top, like, you know, your, whatever they call their their stuff. Amp Amplify. Amplify. AWS Amplify are sort of some of those toolings that do distribute compute. Google Cloud, also, it's gonna be hard, but they have other services built on top. They have things like Cloud Spanner is their distributed database. Really, really fun if you read their white paper on on Cloud Spanner. It's it's a very googly white paper, and it's very very overcomplicated thing. And they talk about syncing satellites and time clocks in servers across the planet and how they use this network of satellites to have, like, microsecond time precision is very interesting read. Highly recommend. There's things like that. And there's Firebase on top, which also does distributed computing. And so there's, so there's frameworks built upon these things that that that make it easier. And ultimately, it comes down to it to frameworks or other platforms that make it easier as we go up the stack. Vercel also are a platform, but they make underneath you know, they they use AWS Lambdas. They use Cloudflare edge workers. Mhmm. And that's sort of where if if you've ever experienced working with Nest, which is, again, kind of another superset on top of Vercel, because it just kinda uses all of Vercel features. When you are working in a an edge nest route, you have a limited amount you have a subset of things that you are able to sort of do from from the from the platform Sure. Because it's running it's no longer running on a Lambda, which is a 4 Node JS instance. It's actually running on a Cloudflare Worker, which is that isolate that you just described. So they are tooling. There is tooling that makes it easier. And then in other realms, in databases or in CDNs, there's more tooling there as well. There's there there's such tools like, PlanetScale is becoming very popular these days with its easy distributed computing of or easy distributed computing of databases. And there are other things that are trying to actually put databases on the edge, and the current kind of front runner for that are, SQLite, which actually Cloudflare again, another pioneer in this area. KV, key value store, is a product that is built on SQLite. Dino, have a KV, a key value store. Again, kind of built not on SQLite directly, but a fork of it that does this distributed computing. A whole bunch of startups doing SQLite type distributed databases. Edge actually, edge database. Not only distributed, but actual edge On those edge nodes. The how does syncing oh, sorry. What I'm basically hearing is vendors more or less will take care of the of the distributing of your assets, whatever those assets are across nodes that they make available. That's not something I necessarily need to do. If I don't care about edge, if I just care about distributed, it's something I could do with great pain. Yep. I or I imagine there's tooling still. But a lot of these vendors will just take care of that. That's part of the offering. What about syncing stuff? Surely, there's some trade off around, you know, even if it's the data store is what I'll call it over a database. The data store, the cloud, you know, the cloud functions that run the edge functions, and all the assets and stuff like that. How do they all stay in sync? Obviously, when I push an explicit update, I imagine there is an explicit, you know, propagation of that of the updates. But just day to day, I Mhmm. I'm a user. I add an item to a data store that happens near me. How does it get to everything else? And is there a risk involved that things will start to fall out of sync? Yep. Certainly. Now the for the KV stores, for the for the SQLite stores, I've really not read up enough about what's going on there. It's like mind blowing stuff that goes beyond my comprehension right now. So I have no idea how they sync because, also, SQLite is a binary data format. So it's it's mostly if anything, I believe it's kind of for, like, edge caching or some sort of be able to write small things. I don't I don't know how it gets back to your main server. Crazy crazy black magic over there. Sorry. Can I pause you? Your main server. So, in all of this, there's still a main server running that You could. You could. Or at least, you you you definitely could write you kind of have to flip how you how you develop. If you wanna be entirely edge, you certainly can. And a lot of use cases don't really fit into just being entirely edge. Mhmm. Even with, say, Vercel, for all their efforts in doing, distributed computing, or edge computing, when you actually log into your Vercel instance, there is a drop down that you could actually pick your main location that it that it puts. Interesting. It's, it still needs to run that Lambda somewhere in some AWS data center. So there's still a a a main place that you're actually doing real compute. You can definitely, if put with a lot of effort, build entirely edge, services, but there will be points at which you need to escape from that to build, to have the full, you know, a company of of real runtime. Sure. And any other any other service that you might need to inter interconnect with, like any, like, queuing system or whatever or and any any real app that is doing something more complicated than just serving a website is gonna need other resource that cannot be run on the edge. And, honestly, it doesn't necessarily even need to run on the edge because we can get you know, there there is, like, the liveness of the website and your interactivity. And then there may be other things that you need to do in terms of, you know, workers or sending batch emails or, you know, do doing other compute or, like, you know, on YouTube scale, like, you know, upload a video and you have to transcode it and all that. So there there there's there's always other things that you may need to do in a in a business to serve whatever use case that your business is serving, but the the, serving of a website can kinda be isolated down to, you know, this edge thing. That's but that's fascinating. I never thought about that. Edge is like a is like one of the tools in a wider application. It's not I it's not application that's very traditional or perhaps distributed or whatever, but it runs properly. And then elements of this application are run on the edge that benefit from the, from the characteristics of running on the edge. That is something that has never quite I never knew that. That's really interesting. But is it fair to say that that core application has to coordinate the edge nodes? Is that what you're what you're thinking? Because you said because you were talking about stuff going back to your main application. Tangent. You you you you could. And I and I think it's it's really clicking with you when, earlier when I was describing, you know, just delivering full page versus those small elements of it, and it's like and it's kind of exactly that. But in terms of coordination, you know, this this then gets into, you know, microservice distributed computing type deals of, like, okay. They they can be isolated in their own way and, more was more referring to when sending things back is kind of the data that the user may may have input. So user input or, you know, if they're chatting away, if they're interacting with if they purchased a ticket and writing that back to the source of truth that ultimately, there must be a source of truth somewhere. Ultimately, some database needs to know that, look, the tickets were a 100, and now they're 99 available. Someone needs to know and be written back to, and that's what I what I'm referring to when I say, okay. Eventually, get back to Got it. That that source. Got it. Got it. And there's many strategies that we can employ on that source of truth because we can we can have a distributed source of truth. Kinda this Cloud Spanner database from Google, which kinda came out, like, 8 years ago, and that that white paper was making its rounds in distributed computing land, was really, really exciting because it sold it was trying to solve the problem of multi node writers. It's a challenging databases today to have multiple, multiple writer nodes. So current current database technology relies on having a single writer with multiple read rep. So you can you can you can replicate readers all day long because it's easy to, and sort of the the whole theme of this conversation has been it's really easy to replicate, caching content, so replicate reading content, but it's very, very hard to replicate writing or updating content or Absolutely. Invalidating. So it's always been very easy to do low latency writes with low latency read replicas where, you know, you might have, you know, those, you know, basic basically, the the ping between 2 servers be your latency from from read write replicas. So you may say and it and depending on your use cases, it may be acceptable that, okay, Europe is, say, 500 milliseconds behind US. So if you were to go to a website, technically, you would be seeing 500 millisecond, stale content, and that may be acceptable. But it may in terms of fairness of the Internet or the World Wide Web and not the UK, US Web, It's unfair to set to always put an advantage on, your your geological, position. So, technically, or, and I think that there there was an article once upon a time around when the Cloudspana or one of the Cloudspana customers originally was Ticketmaster. And their particular use case was this sort of fairness topic of well, okay. Australia will always be 1.2 seconds behind everybody. So if they go to book a ticket and someone literally just, you know, 1.2 seconds closer to the server clicks it, they will always win. So it's unfair to put people at this kind of dispute disadvantage. So we're trying to build technologies that distribute these rights more evenly, and you can get kind of more fair distribution of of updates. Yes. Ticketmaster, the bastion of fairness. Sorry. Please continue. Well, in in in reality, what they actually want is, you know, more they can cash less and not have this this, queuing system, which which leads to more clicks, more purchases. Right? That's that's that's the end goal. That's the end goal with all of this. There's well documented stats about how Google goes about about web performance on the web. And, you know, if you're x milliseconds slower than a competitor, you know, you're more likely to drop off and just go to the next site that is faster. So while us as engineers, we wanna over complicate our life and do fun complicated engineering things, Us as business people, we wanna deliver things fast because it yields greater returns and is a better native experience and will yield happier users. That's ultimately the goal that we're trying to trying to solve, while also doing it more cheaply, hopefully. Or at least over time. So a question for you then. In the current state of edge computing, what are people actually doing on the edge beyond what, you know, what was once possible with, you know, just CDNs? What people what the most trendy thing now is we are rendering HTML on the edge. We are rendering websites closer to users. And what I mean by rendering websites is we are rendering more interactive content next to the user. So no longer are we constructing the whole webs web page, you know, 800 milliseconds away from the user and then delivering that over the wire. We are constructing it closer, meaning all the other subsequent calls that we might have to make to other services, are hopefully also calling services also closer to that edge region. So it's it's more and more, companies are putting more and more of their own services in more locations. Sure. So say, let's take an example of I wanna construct a web pay a Shopify web page. Shopify have a fantastic global API network. I argue that actually Shopify have the most distributed database on the market because actually, you know, when you update something in Shopify, it's already distributed to their network of of of re replicas around the world. If I was to construct the data there, and then, okay, I also call out to another service that I control, or I also call out to, like, another partner API. If we if we request that, make a request there, and it goes 800 milliseconds to wherever it was being constructed, and then that that does its course and it comes 800 milliseconds back, that's, you know, 600 a 1.6 second round trip or whatever. If we take that request and say put it right next to the user and go, okay. As soon as it comes in, I'm instantly streaming back. So it's close to the user, say, 20 milliseconds away. Generally, edge location is a sub 100 millisecond close to the user. Good to know. So they they hit it. Instantly, they get back, you know, they white page with loading things, and it goes, okay. Slows this. Okay. Now I need to grab some stuff from Shopify. I've done a request to Shopify back and forth. Hopefully, they're also at edge location, 20 milliseconds back and forth on my edge. I've got my Shopify content. Oh, I'm doing an AB test. I've hit my AB test service.com, and I've got back the result of what needs to happen. And, okay, I've loaded that content. And it's this it's this dynamicness that we wanna put closer and closer to the user because if we were to And it and it compounds. Yep. All of these 800 millisecond round trips are grueling, but a bunch of 20. I mean, of course, you still wanna be mindful of how many you're doing, but that's the same in any web application. But we're talking orders of magnitude quicker realistically when you're loading any modern web application. Exactly. Yep. So it's it's it's more it's more of this dynamicness because we could totally just off cache that entire page and delivered it to all the users equally. But what if one one user is logged in, one user has a basket where they, that that that we wanna also display and and render on the server? We could totally render it on the client, and this is where also this new modern trend of, you know, it was very it was perfectly fine to deliver entire huge bundle to the user and it be interactive and do API calls on the client side because the client side is also an edge location. Sure. But we pay for the upfront cost of them receiving that bundle. So if we can That's interesting. Yep. So it's that trade off still of speed. We need to deliver a a reasonable experience as quickly as possible. Then, ideally, we want all subsequent requests to be as speedy as possible. But if, you know, if this is where we're starting to pay a little bit, that can be more acceptable dependent on user and kind of, developer experience or, like, you know, vendor experience requirements. Exactly. That's really interesting. So it's just this trade off of of requirements. I feel like this, you know, edge computing starting to gain some traction is bringing a whole new realm of, I don't know what to call that requirement gathering, like, trade off decisions for developers who didn't need to care as much about infrastructure before. And now suddenly, the same way front end developers now have to do a bunch of back end work with these kind of mixed hybrid frameworks that run everywhere. I feel like in the in the same way, anyone who does back end work is now having to care about infrastructure in a way where perhaps the the expectations in the past were less than. Yep. And then it's the job of these vendors who who run edge nodes to make that experience as contain as little pain as possible. I think I think you've hit the nail right right on the head there is, we we've we've this this trend is the flip side of is is the next evolution of SPAs, of single page web apps. So that was our edge in the past, and that was our limps limited subset runtime. We could only run JavaScript, then we could only run web JavaScript in that runtime. And, what we're having now is that it's a limited run time, yes, but there's we have more languages, more things that we can run closer to the user, and it's we're delivering them. We're not using the user's resource. Right? So a a lot of SPAs or a lot of, mid 2000 in the Internet relied on, you know, fast devices or, you know, doing a lot of read writing of, you know, the DOM tree and all this, and then lower, lower quality devices with less CPU, like dumb phones or or or or basic Android devices had, you know, a worse experience on the web. And what we're trying to deliver is by doing doing the complexities of the HTML rendering and whatnot close to the user and delivering them, you know, the content that just needs to be rendered and device can become dumber again, and need to do less, you know, JavaScript execution on the client. Because, really, all we all we actually want at the end of the day is to deliver that HTML to the user. And that liveness, that interactivity used to come from or used to only be able to come from SPAs, client side JavaScript. And what we're seeing today is that we're moving more and more of that client JavaScript onto the server, but also quite close to the user to be kind of that native like experience. And that and they're the parts they're the parts. It is we want to make the users do as little computation as possible while not, suffering based on their location. The fact that a load of that computation is going to happen elsewhere. And it's that specific pairing of of requirements, that makes edge com that makes distributed computing more powerful and even more so edge computing because they run right at those interchanges. That's that's great. Now, for some of the other episodes of, learning things I love to hate, we went into a demo, but I feel like we've covered a lot here. I don't necessarily think we need to we need to do it so much. Plus, it's a really abstract concept where, you know, I'm sure there are ways, but it isn't gonna be now it's running on the edge, and we can really show that to its fullest. But I think I understand a little more why why edge computing is interesting now. Some more or less basic concepts about how it works when it also might not always be right. And this new idea that, oh, no. It's just parts of your application you can delegate to the edge. Again, not something that is spoken about by the hype machine very often. Yep. It's just edge edge edge edge all the time. Just parts. Just parts. Right? And we we we have the we have the same mistaken view of serverless computing, in the past where it was like, okay. Now everything must be serverless. Mhmm. No. It's a subset of use cases that are that that benefit from it, and that's the it's the same with the edge. It's not we don't wanna put we don't need to put the entire server close to the user. That's impractical. But elements of it definitely can. And, and, yeah, and I think the the the the the reason it's so abstract in this way is it's hard for us to experience what other people are experiencing when we're in our, you know, our our nice modern MacBook on gigabit Internet in countries with good Internet. So it's it's hard to put us put ourselves in those shoes. And, the way we used to do that with SPAs and stuff is literally have a crappy old Android phone for dev that you would, you know, every so often look at and use. And we can replicate that if try try the web on a, on a VPN every so often. Or if you're in a foreign country, try going to some of your favorite websites and just seeing how much slower they might feel. Because, yeah, it's it's conjured up a memory of a it's slightly off topic, but you've just conjured up a memory of in London, Mozilla used to have an open community space, and in that, they had what they called the Mozilla Device Lab. I imagine there were a few of them, and it was just a bunch of different devices. Or, like, they run they run browsers which are, you know, really, really, really limited with this exact idea that you could be testing on all these different devices. But, yeah, now we need to consider and I suppose that's the other part here. What edge computing enables is less computation happening on a user device, so it's lowering the needs on the user device, and it is making that computation closer. So we're kinda leveling the playing field. It's this fairness, as you mentioned earlier, of you don't need the best hardware, and it doesn't necessarily matter where in the world you are. Both of those factors are handled by this new emergent technology. Exactly. Exactly. And it's it's it's all it's all right as rain for us to talk about fairness, but at the end of the day is we want to have users around the world because we're hitting we're hitting more ability for us to sell to more users. And at the end of the day, that's really what we're trying to solve with with edge computing because get more and more users with more and more happy users because we can have users that are unhappy, but, really, we wanna have more and more happy users. Yeah. Awesome. Thank you so much for joining me. This has been really, really interesting. I'm loving filming this series. I'm learning tons. And hopefully people who have joined us for the ride also, are learning loads. Just before we go, is there anything else you wanna share, point people to where to find you? Sure. I mean, if you want if you wanna see more of, like, crazy drawings on my Twitter, I'm I'm at Pandeliszed, p a n d e l I s, zed, on Twitter. It's it's funny when I was drawing that in the office because we we kinda sit in a bit of a semicircle, and my colleagues turned around and were like, what? How are you doing? I'm like, oh, just drawing. Just drawing some stick figures. I was like, okay. Fine. But, yep, that's that's that's where my normal antics are or on Twitter. And, and yeah. No. I'm so so happy, that you had me, Kevin, and it was, I I saw your brain clicking at the end there. So I'm so glad that I could I could share that with you, and, hopefully, it's it's no longer anything that you that you hate. It is not something that I hate. I have, a newfound like other episodes, a newfound appreciation and more importantly, an understanding of the qualities. Right? Which mean that it may be something I reach for when appropriate. Whereas before, I just lacked the knowledge. So I was never gonna reach for it. And, you know, in time, that could lead to me building things that aren't as good, as they could be. So, yeah, thank you ever so much. Thank you to everyone who has joined us, and, we will see you in another episode of learning things I love to hate. Bye.","published",[146,156],{"people_id":147},{"id":148,"first_name":149,"last_name":150,"avatar":151,"bio":152,"links":153},"82b3f7e5-637b-4890-93b2-378b497d5dc6","Kevin","Lewis","a662f91b-1ee9-4277-8c9d-3ac1878e44ad","Director of Developer Experience at Directus",[154],{"url":138,"service":155},"website",{"people_id":157},{"id":158,"first_name":159,"last_name":160,"avatar":161,"bio":162,"links":163},"e922cb25-10ca-40ff-a021-5fc9116c88ee","Pandelis","Zembashis","183e7b42-591a-4532-bf5e-d64490f0f666","Senior AI Engineer at Cosine",[164],{"url":135,"service":165},"twitter",[],{"id":168,"number":169,"year":170,"episodes":171,"show":175},"bf1ea922-80fc-420e-ac6a-fa17d6a2ab3e",1,"2023",[172,122,173,174],"2d848504-0907-4315-b73e-94745042b868","91cbc0a4-a6a6-462f-8f80-768ab96db9d1","4c5904eb-cc3b-4c54-b0a2-701be5ada53d",{"title":176,"tile":177},"Learning Things I Love To Hate","4a962a76-2351-477b-be15-c3f1fca6f82b",{"id":173,"slug":179,"season":168,"vimeo_id":180,"description":181,"tile":182,"length":183,"resources":8,"people":184,"episode_number":189,"published":190,"title":191,"video_transcript_html":192,"video_transcript_text":193,"content":8,"seo":8,"status":144,"episode_people":194,"recommendations":197},"ai-devs","893669889","Kevin is joined by Rizèl to learn how developers REALLY use AI in their day-to-day work, including ChatGPT and GitHub Copilot.","d7be166a-f383-4ab5-8399-fb2c2a3bf4ad",41,[185,188],{"name":186,"url":187},"Rizèl Scarlett","https://twitter.com/blackgirlbytes",{"name":137,"url":138},3,"2024-01-16","Generative AI For Devs with Rizèl ","\u003Cp>Speaker 0: Hello there. My name is Kevin, and welcome to learning things I love to hate. In this show, I stop avoiding technologies that I've been avoiding for one reason or another by inviting my friends and colleagues to teach me more about them. And today, my dear friend, Roselle, hello. Would you like to introduce yourself?\u003C/p>\u003Cp>Speaker 1: Hello. Yes. My name is Roselle Scarlett. I'm a staff developer advocate at a company called TUVD, which stands for to be decentralized, not to be determined. And\u003C/p>\u003Cp>Speaker 0: That's the first time I knew that.\u003C/p>\u003Cp>Speaker 1: I always was like, when's the company name coming? No. I was I was the same. Even, like, I was at the company, and I'm like, why why is it called TBD? I didn't wanna say anything until I heard someone say what it what it stands for.\u003C/p>\u003Cp>Speaker 0: So you went for to be decentralized?\u003C/p>\u003Cp>Speaker 1: Yes. To be decentralized. What else about me? Before that, I worked at GitHub as a developer advocate, focused on, like, GitHub Copilot things. And, besides that, I just I like to use social media a lot and watch TV.\u003C/p>\u003Cp>Speaker 0: Good. And today's topic is alluding to your previous role. You kind of, have provided the reason why I've asked you on, is about using AI as developers. So the way I start these episodes is I kinda tell you what I know or what I think I know now, what my experiences or my knowledge is. You firstly are welcome to tell me you got it wrong, but we can use that as a basis.\u003C/p>\u003Cp>And then my skepticisms and hesitancies, we knew that as a basis to just have a chat until we run out of time. At any point, if you wanna share your screen, feel absolutely free to be equally feel no pressure to. So using AI as developers. AI mostly popularized by OpenAI using the chat GPT interface, but since has permeated seemingly every piece of software that exists. My, GitHub came out with GitHub code pilot some time ago.\u003C/p>\u003Cp>You will fill in that blank for me. And I used it at the very beginning, like, right at the very, very beginning. And code pilot is a tool which basically allows you to to describe 1, describe what you're trying to achieve with your code, and it will give you a suggestion, by writing it in your code editor. And, also, I believe it can do, like, straight up, like, we will try and auto complete the code you're writing, maybe. In any case, my experience and thoughts around AI are the following.\u003C/p>\u003Cp>1, I know I've seen some stats, like, hallucination rates or, like, where wrong rates are are dropping, and I truly believe at one point this concern will not be the case. But I simply don't trust it. Right? I don't trust it to give me codes that either is correct or is the right way of doing things. You know?\u003C/p>\u003Cp>So that's one concern. And the other is having used CodePilot at the very beginning, just the first version of CodePilot before it was a bunch of of separate services, which I don't really know that much about, I found it really annoying. Like, I actually found their interaction really frustrating and in the way to the point where I turned it off because it was in the way. So, that's the kind of basis to one other bit of context that might be useful is I work for a company called Directus. We have a Discord with, like, 11,000 people in it.\u003C/p>\u003Cp>People drop by when they have problems. And community, including our team, see if we can help people and an increasing amount. And we spoke about this briefly on Twitter. An increasing amount people think our stuff's broken, and the code snippets they share are hallucinations. They never existed.\u003C/p>\u003Cp>They never even nearly existed. And there's a little bit of a maybe I'm slightly salty because, you know, people think your thing's broken, but, actually, it's not even it's not even a code snippet we wrote ever in any point in time. It's just wrong. And so there's something there around how people perceive AI and what it can do for them at their current stage in their career, their project, whatever. So that's my little brain dump to maybe start.\u003C/p>\u003Cp>Could you maybe give us a bit of a landscape, anything you know about AI for developers, whether that's GitHub specific, other other tools, whatever?\u003C/p>\u003Cp>Speaker 1: Yeah. Right now, what I use for coding with AI is usually GitHub Copilot or ChatTPT. But I do wanna, like, even back up and explain why I like it because you might just be like, oh, just because you worked at GitHub. But I remember and and that's fair. Right?\u003C/p>\u003Cp>But I remember when I joined GitHub, I did see all of, like, those, like, negative or mixed reviews. There were some people on Twitter being, like, I love it. And then there were some people being, like, I think I joined when it just went out in public beta, in, like, September 2021. And that's when I was like, let me see what this thing is. I'm, like, half skeptical or half curious.\u003C/p>\u003Cp>And at first, I was, like, this does not work. I hate it. It's, like, kinda like how you were like, it's a Yeah. Way. But as I started to use it more and more, I kinda got used to it and figured out, like, how it works on my workflow.\u003C/p>\u003Cp>I'm like, okay. Maybe I don't always want it to at first, at one point, when I knew what I was doing, I would, like, turn off GitHub Copilot. And then if I, like, needed help or if there was, like, a pattern like, let's say, I already wrote an API call with a get for a get request, and then I was, like, I did a post request. Maybe now I need to do a patch and delete. I'll get turn GitHub Copilot back on because I'm, like, okay.\u003C/p>\u003Cp>It it knows kind of, like,\u003C/p>\u003Cp>Speaker 0: what boiler plating almost.\u003C/p>\u003Cp>Speaker 1: Yeah. Yeah. Exactly. But then once I started to like, after that, like, I just had it on all the time. And then one thing that makes me passionate about it is that, is the psychological safety aspect.\u003C/p>\u003Cp>I feel like when I started off as a software engineer back in 2018 or 2019, I don't remember, like, my team was really mean. I was like, they were so mean. I was, like, the only black girl on the team. And, like, it got to the point where I, like, had, like, real life anxiety from my job. Like, I was, like, on the bus on the way home, and I couldn't even stand up because my muscles That's why I think we were, like, kinda, like, discussing or debating or had different points of views because you're like, I don't think it's good for juniors.\u003C/p>\u003Cp>But I'm like, I do because I wish I had at least something that I can talk back and forth with or, like, share an idea with and, when I was, like, just starting out. But I agree with you. It does sometimes generate wrong code. Or not even sometimes, a lot of times. And I do think that I think there's a part of, like, needed education around that.\u003C/p>\u003Cp>And, also, another thing on on your topic or on your concern about, like, direct us. One thing that the new company I work at has done is I'm sharing my screen right now. Okay. They trained let me I don't wanna show all my questions, so let me hide that. But they created a plug in.\u003C/p>\u003Cp>So so for, like, chat gpt4, or, like, I think it's, like, a bit like, a more premium mode. You have plug ins. Right? And they create a plug And\u003C/p>\u003Cp>Speaker 0: you you, like, feed it. You feed it your corpus of data, your docs. Right? And then you give it you give it some some the custom instructions. I I use chat GPT also.\u003C/p>\u003Cp>I just don't use it for code. So I understand the concepts. Right? So you give it the custom instructions and tell it who the persona is who will use it, and it'll basically spit out a custom assistant given the context.\u003C/p>\u003Cp>Speaker 1: Yeah. And I think GitHub Copilot has a similar thing too called Copilot for docs. And this the fact that they trained, the fact that TBD trained their doc or chat gpt on these docs has been extremely helpful for me. Because I walk in into this job as, like, I'm a staff developer advocate, so I'm expected to know this stuff. But I never heard of Web5 before, like, I even interview for the company, really.\u003C/p>\u003Cp>I don't know, like, how to do this stuff. And the documentation is, how will I say, I think it's a work in progress. And also, I'm just not a person. I'm I'm\u003C/p>\u003Cp>Speaker 0: like I I I lead the team that writes our docs. So I think calling things a work in progress is completely fair.\u003C/p>\u003Cp>Speaker 1: Okay. Good.\u003C/p>\u003Cp>Speaker 0: No. No. That look that look is like, yeah. I know what you mean.\u003C/p>\u003Cp>Speaker 1: Because I'm like, I read it, and I'm like, what? Like, there's missing stuff with it's it's still forming. The the product itself is still forming. And I'm also not really a person that reads documentation end to end. Like, that's just not me.\u003C/p>\u003Cp>Like, I do your\u003C/p>\u003Cp>Speaker 0: learning style.\u003C/p>\u003Cp>Speaker 1: Yeah. That's not my learning style.\u003C/p>\u003Cp>Speaker 0: Good shit. That's fine.\u003C/p>\u003Cp>Speaker 1: Exactly. So, like, for example, I for me to build an application at this company, I, like, I wanted to jump in. I wanted to, like, be able to be, like, alright. I'm ready to go and, like, you guys don't have to hold my hand or you all don't have to hold my hand. I just been talking to chat gpt and being like, hey.\u003C/p>\u003Cp>Why is it saying that the rec record dot send is not a function? Here's my code. And, like, it says, okay. It does exist. I'm like, alright.\u003C/p>\u003Cp>I know that. Like, you're being a little annoying. But then I I just from reading what they said, I was like, okay. I figured out that it needs to be destructured. And I'm like, okay.\u003C/p>\u003Cp>Sometimes when I refresh my page, this list disappears, and it goes on and tells me, oh, it's probably because of the asynchronous state setting. When you're calling this function, this, like, state variable is not persisting. I'm like, okay. Cool. So, like, I'm able to, like, go back and forth with it and be like, alright.\u003C/p>\u003Cp>Maybe I didn't think of this perception, or perspective. And I think it's, it's kinda like rubber ducking, I think, because sometimes I'm not I\u003C/p>\u003Cp>Speaker 0: think I think that's fair. See, I have a few thoughts about what you've just shown me. So the first is Yes. You 1, you made a comment that let's just say the docs are incomplete. Let's just summarize it there or they're not in their state.\u003C/p>\u003Cp>They're not accurate. Whatever it is, they are not they do not contain the complete information. So, 1, this piece of software is ultimately guessing at the blanks, and I'm not sure I'm comfortable with that. The second is you demonstrated that you have an ability to go, it was being slightly annoying. I I know it was not giving me the right answer.\u003C/p>\u003Cp>And that is not necessarily a skill everyone has with the immediate counter in my mind of, like, okay. But you and I are experienced developers and, like, I I'm not gatekeeping anyway for myself, but I do have some skill that allows me to assess the correctness that not everyone has. And I wonder if that changes the experience or the perceived experience of a user. And then, and then beyond that, I I get it. It allows you to rubber duck to have that thought process back and forth.\u003C/p>\u003Cp>But I have those 2 concerns. Garbage in, garbage out. And I'm not saying the docs are garbage, but the docs miss things. So how do you how does it reconcile that? Or, like, have you ever found that it does just make things up?\u003C/p>\u003Cp>Speaker 1: Yeah. And\u003C/p>\u003Cp>Speaker 0: the and the other is yeah, how what do you have any thoughts around people who lack that ability to look at something and go, this isn't right or isn't what I want or it's contextually incorrect or whatever it may be? Yeah. I mean,\u003C/p>\u003Cp>Speaker 1: exactly remember my skill set as a junior. But I do think it's kind of interesting because okay. When I was a junior, sometimes I would copy and paste stuff in Stack Overflow, and it wouldn't work. And then I would be, like, alright. Well, maybe I need to keep trying and find something else.\u003C/p>\u003Cp>So I always I I think it's an interesting case that, like, they're copying stuff from AI. They see that it doesn't work, and then they're just, like, well, it doesn't work. I'm, like, usually my even even when I was just learning, I feel like my initial response is, wait. Hold on. Let me try something else.\u003C/p>\u003Cp>Let me figure out. Because I will go back and tell ChachiPT or GitHub Copilot. I'll be like, this doesn't work. Like, what are you talking about? Yeah.\u003C/p>\u003Cp>You have this part wrong. Like, I'll usually feed it the information of, like, here's why you're wrong. And then it'll be, like, oh, sorry. And I don't I don't know. I think I've always told people when I taught them GitHub Copilot things, like, when I do talks, I'm like, treat it like you're it's a coworker.\u003C/p>\u003Cp>Like, you don't trust your coworker's code all the way.\u003C/p>\u003Cp>Speaker 0: Well, that's the thing. I I think you've made a really interesting point around trust. 1 Yeah. Yeah. But how else were you learning before?\u003C/p>\u003Cp>Probably through trial and error with code that didn't work. This is just surfacing it in a different way. It's just a different, yeah, way of generating or surfacing or searching that code on the web. And then the other side of it is, sure, what's the alternative? You know, my feelings about docs and AI, people who have hung around in our Discord server will know this is today.\u003C/p>\u003Cp>And we're recording this at the end of 2023. I don't necessarily think it will come out, you know, very, very swiftly. So just so people know because breakthroughs are happening all the time. I will not present codes to people as an educational resource that is AI generated because the moment you present it to people, you are endorsing it. And it doesn't matter how many caveats you put in.\u003C/p>\u003Cp>The people's behavior with AI seems to not fully get that it hallucinates. Yeah. And so today, it's just not there. It's just it's just not there. And don't get me wrong.\u003C/p>\u003Cp>I'm still exploring AI. I'm still thinking about it. I'm still thinking, well, how do we lay the foundation. So when that concern falls away, because it will, we can we can deliver something great in terms of the world of education. I never touch AI generated docs, AI generated answers.\u003C/p>\u003Cp>I never use chatbot interfaces. I do not trust them.\u003C/p>\u003Cp>Speaker 1: Interesting. Even if you try them out after, like, that's the part. Because it's not like they're just\u003C/p>\u003Cp>Speaker 0: But then I but then I am someone who doesn't mind reading documentation. Interesting thing about this is it isn't yeah. It doesn't just have to consume, consume docs. It can consume other, other sources of information around the web, and maybe that's that's kinda interesting. The other the other concern I had, was so, you know, a lot of these trust arguments, I see the counter arguments.\u003C/p>\u003Cp>It's not the first time I've heard many of them, but it's good to See. Excuse me. It's good to to think about them a little more. The other part of it was the annoyance factor. And I know that even in the world of GitHub, they have released CodePilot is not, just the thing that autofills your code based on comments.\u003C/p>\u003Cp>It's like an ecosystem of services. And I'm kinda curious, bearing in mind that it doesn't just have to be that interface. It doesn't just have to be chat in a browser. Mhmm. What how else do you use AI in your in your practice as a developer or a developer advocate, outside of what original Copilot looks like?\u003C/p>\u003Cp>Speaker 1: I use original Copilot. I use Copilot chat, which is, like, chat gpt. But, I'm\u003C/p>\u003Cp>Speaker 0: contextually aware of your codes and the environment at which it runs.\u003C/p>\u003Cp>Speaker 1: Yeah. I'm trying interesting. The I thought I had a Visual Studio example code example. Hold on. There we go.\u003C/p>\u003Cp>And then, oh, another one I use is within my terminal. So there's, like, Copilot CLI that I use. So sometimes I'll be like, oh, man. I forgot how to, like, do this bash for me. And I don't have the example right now.\u003C/p>\u003Cp>My bad. But I'll be like, I forgot how to, like, do this or I forgot how to write this script. And then I'll write in natural language what I wanna do, and then GitHub Copilot will give me a suggestion of what I wanna do of what it thinks I wanna do, and it'll give an explanation of what that command is. And then I can say, revise the query. That's not what I want.\u003C/p>\u003Cp>Or I can be like, yes. That's what I want. Go ahead and run the code. So if I'd be like, actually, I didn't want it to just, like, grab grab all of these JavaScript files or maybe I wanted to remove node modules or something like that, it'll, like, revise the query for me. But I mostly use the comment fee like, writing the comments and then the chats on the side.\u003C/p>\u003Cp>Speaker 0: Interesting. That's fascinating. That's a really interesting interface. And, of course, I I mean, logically, I know this interface exists. This will this will be one of many tools like this that is contextually aware of your code base.\u003C/p>\u003Cp>That's another issue with, like, chat g p t, for example, is it's not contextually aware of your code base. It just gives you a\u003C/p>\u003Cp>Speaker 1: isolated pace. Exactly. It's so annoying. Whereas this is like, oh, use one reference in the index dotjs, from line 22 to 2279 to, write out all of this. Oh, another thing I want to bring up is I mostly use AI to understand code, to be honest, more than, like, literally write out my code.\u003C/p>\u003Cp>Speaker 0: Yeah. Could I ask you could I ask you a favor? Could you bump your font size up Oh. 2 or 3 times?\u003C/p>\u003Cp>Speaker 1: I can't. Let me,\u003C/p>\u003Cp>Speaker 0: All good. That's good.\u003C/p>\u003Cp>Speaker 1: Doing right here. Cool. So one thing I use it for is, like, understanding that code. So, let's say, This is I don't know. I I use a lot of AI code even though you don't.\u003C/p>\u003Cp>Alright. So, oh, wait. Let's say, right, I I want a better understanding of this Web 5 package, which I've lit legitimately done in the past. Right? So I've opened up, like, these like, the the type source or the source definition for the code that they're creating.\u003C/p>\u003Cp>And I'll, like, highlight all of it and be, like, hey, Copilot chat. Can you explain what's going on in this? How do I sometimes I'll ask you questions, like, how do I, like, create a record or whatever. Let me see if I can just give an example here. If I just say explain.\u003C/p>\u003Cp>Speaker 0: Like a slash command. Interesting.\u003C/p>\u003Cp>Speaker 1: Yeah. They have slash commands. And okay. It's using all of these references. That's cool.\u003C/p>\u003Cp>They're working on it even more. But I I mostly use it for explanations because sometimes I'll look at this and I'm like, my brain is hurting. Like, what is this giving me? So once it breaks down all that stuff for me, it gives me more insight. Like, that's that's my main way\u003C/p>\u003Cp>Speaker 0: of thinking. Say I will say in the skeptics world here, a few things. 1 Yeah. Do do you trust what it says? That's a I mean,\u003C/p>\u003Cp>Speaker 1: I don't know. Go in half, like, half trusting. And I think that's okay.\u003C/p>\u003Cp>Speaker 0: My real thought though is this file you've opened up on the right looks very lovingly commented, which is fantastic.\u003C/p>\u003Cp>Speaker 1: Yeah. No. This is a bad example. But yeah.\u003C/p>\u003Cp>Speaker 0: In terms of power, because all it's doing really is presenting the information that has already been handwritten in a nice way. And so that's that's there's a bit less inference there. It's just a bit more summarizing what's already been\u003C/p>\u003Cp>Speaker 1: written. Yeah. But for me, I like seeing stuff in this format. I don't know why rather than, like, looking at, like, random I get it.\u003C/p>\u003Cp>Speaker 0: I get it. I don't know. I don't like reading code comments. Yeah. Yeah.\u003C/p>\u003Cp>To to understand. Right? It's not my style either. Yeah. So, yeah, that's really interesting.\u003C/p>\u003Cp>I suppose you can ask follow-up questions. It'll try its best. Interesting. So what have we discovered so far? So we're talking about chat GPT as I as an as an isolated example that we spoke about 2 forms of code pilot.\u003C/p>\u003Cp>We've spoken about this kind of if you write a comment, it will suggest things to you, which I'm really not a huge fan of the interaction. Though this idea that you can turn it on and off when you just want a bit of input, You know, when there's something meaningful coming up or when you need oh, I need to I don't know. Replace every I need to make this URL safe. Okay. Give me the regex for that.\u003C/p>\u003Cp>Yeah. Reg reg x. Give me the regex for that. Yeah. And it's something you would just Google.\u003C/p>\u003Cp>You'd find you'd maybe go to a little generator online. I I do get that. It's interesting turning it on, turning it off. That's fascinating. I may have a think about that.\u003C/p>\u003Cp>And the second is this chat interface, which is contextually aware of your code base.\u003C/p>\u003Cp>Speaker 1: Mhmm.\u003C/p>\u003Cp>Speaker 0: It also allows you to highlight portions and be very specific about the area in which you're asking, but it still understands the context.\u003C/p>\u003Cp>Speaker 1: Yeah. So these\u003C/p>\u003Cp>Speaker 0: are 3 tools used in different ways. Yeah. How else, if at all, does AI kinda come into your your day to day work life, as a developer?\u003C/p>\u003Cp>Speaker 1: Yeah. Was that a was\u003C/p>\u003Cp>Speaker 0: that a little kitty cat?\u003C/p>\u003Cp>Speaker 1: Yeah. Sorry about that. That's fine. I don't know why such, like, as if he would understand. I guess another thing and this is something I do a little bit less because I think this this is still a work in progress.\u003C/p>\u003Cp>But sometimes, like, let's say I'm making a demo app. I wanna, like, just put it out really quickly, but I'm like, maybe I should add some, like, comments or notes to it. I'm, like, I already wrote this all up, and, I'm I'm lazy. So another thing I'll do is, like\u003C/p>\u003Cp>Speaker 0: lazy. Think we're all lazy developers. There's no shame in that.\u003C/p>\u003Cp>Speaker 1: Yeah. So another thing I'll do is, like, I'll highlight it, and maybe I'll, do this where it's, like, generate docs. But what that's really doing is not generating real documentation. It's just generating, like, JS docs. But I found that to be, like, perfect because it doesn't have to think too hard about this.\u003C/p>\u003Cp>And also think about\u003C/p>\u003Cp>Speaker 0: the amount of time it would have taken you to write that versus what it just did. And then you just go in and fix it, adjust it, and so on. It's quite small amounts of code, though. You know, it's pretty pretty clean-cut, but it still takes time to do. And there are other generated ways to do it, but this really allows you to do a quick valid Yeah.\u003C/p>\u003Cp>Manual validation. I think that it that's the thing I keep coming back to though, which is the success of using AI as developers. I believe at this point, I believed at the beginning of our chat and I still believe relies on your ability to validate either through a gut feeling or through testing testing that the output is correct. And not everyone has that skill, but I worry that the way these tools are spoken about by companies that say, hey. We're providing this tool to you.\u003C/p>\u003Cp>I think there's a bit of a mismatch between between the marketing and reality. And I think that gap is being closed. But I think I've worked for companies in the past that have this tendency where they they don't say, hey. Our goal is to allow x and come with us on the right as we build that. It's we do x, and then they fall short.\u003C/p>\u003Cp>And I think that's a little bit naughty, and I think that's where a lot of my, hesitation comes from. Because that manual validation, something that today, can you automate? May we'll talk about that in a moment. But, you know, is is often not spoken about. On that note, I have heard about this thing.\u003C/p>\u003Cp>I forgot what it's I forgot the wording for it. But where you feed output of AI into another AI to try and validate its correctness. And if you do that enough times with enough systems, you can grow your confidence of an answer being correct while being a fully automated setup,\u003C/p>\u003Cp>Speaker 1: which is kind of I've never heard that. That's cool. But I will say what\u003C/p>\u003Cp>Speaker 0: it's called.\u003C/p>\u003Cp>Speaker 1: I agree with you on that part, and I think that's how we both even started the conversation because both of us said companies are, like, amping up AI too much. And then, the, Diane is like, y'all are saying the same thing. So, yeah, I agree with you. And I think I think that's that is a problem I had where it's like we're, like, copilot and all these AI things are gonna solve all your like, double check the work. Because, like, I've used it, like, double check the work because, like, I've used it.\u003C/p>\u003Cp>I used it the other day. My my company encouraged me to use it to generate test cause we needed to test code snippets in our in our, documentation. But, like, I had to I kept testing the test to make sure they were, like, actually right, not just, like, fake, like, passing.\u003C/p>\u003Cp>Speaker 0: Yeah. And, you know, it's also interesting. So how do I use AI? I use AI every day, and I use it for writing, and I use it for image generation for concepts. I'm a believer that, like, all AI output is based on stolen work, and you've never convinced me otherwise.\u003C/p>\u003Cp>But, but to take the ideas in my head and illustrate what I had in mind, AI is actually fantastic, and then I generally take it to an expert to do properly. You know? It's just a way of of expressing what I mean. Then I use it in terms of writing in 2 ways. 1, to overcome a blank page problem.\u003C/p>\u003Cp>Like, help me think about a structure of a a a blog post that's trying to have these key goals. You know? It's not that you're gonna generate it for me, but you're gonna help me overcome the, I don't know how to start. Or I've written a thing. I need it to feel or be more like x in terms of tone or length or, well, tone or length.\u003C/p>\u003Cp>And you can go away and try and do more with that. So I do it for a lot of rewriting. So the I use it in these ways every day and literally every single day. I also use it for, like, kind of what I would call composite queries where I'm like, here's a picture of a back of a complex cooking instruction or like or like, here's a recipe in German. I live in Berlin.\u003C/p>\u003Cp>My German is not that good still. Here's a recipe in Berlin. Here's a recipe in Berlin. Here's a recipe in German. I've just done x.\u003C/p>\u003Cp>What do I do next?\u003C/p>\u003Cp>Speaker 1: Yeah. Interesting. Because I I like I like that idea of the the back of the box. I don't I don't like it for writing. I hate it for writing.\u003C/p>\u003Cp>Is it not interesting? Sometimes So it says I'll use it for writing, but when I try to tell it to do it in a specific style, I feel like I've seen chat g p t be like, hey. What's up? I'm like, oh. Sorry.\u003C/p>\u003Cp>Sorry.\u003C/p>\u003Cp>Speaker 0: But I had a second point there, which which the answer to that, which is that I almost never will take the output for beating. In the image generation world, it still goes to someone who is able to turn that into something actually usable. Yeah. And in the writing world, it almost never gets it right. Even if I just give it a little not take, can you make sure to emphasize x?\u003C/p>\u003Cp>It rewrites other bits, and I'm like, no. That bit was good before. So so I end up again doing the manual validation slash fixing slash, you know, that that last mile piece of work Yeah. Manually. And I have no hesitations about that at all, but it has still saved me 80% of my time.\u003C/p>\u003Cp>And I feel like, you know, I so I said things like, okay. So I use chat gpt for, like, generative creative work, but I won't use it for code. Code is just it is creative.\u003C/p>\u003Cp>Speaker 1: It is. Yeah. So it's not interesting. Same It's the same thing. The same process because Exactly.\u003C/p>\u003Cp>Let me see if I have an example. Here, this is one of my first days at, at the company. I'm like, how do I use this? And it showed me it in JavaScript. I don't even write it in JavaScript, but I'm like, okay.\u003C/p>\u003Cp>Cool. Can you write out the whole code? And then it did. I'm like, that's interesting. What will it look like if it's in Next.\u003C/p>\u003Cp>Js? And it told me. And I'm just and I'm like, so what's this part? Like, what is this thing or app protocol? How do I create that?\u003C/p>\u003Cp>Do I need my own URL? And then it tells me. So, like, I'm not I know it's gonna be sometimes wrong, like, with writing. And I know it's I'm not gonna copy and paste this exactly, but I'm like, it's a starting off point because I had no clue what DW or communicating across DWNs is. Like, I just keep hearing my coworkers say that.\u003C/p>\u003Cp>I'm like, what is it? How do I do it? Or like I was\u003C/p>\u003Cp>Speaker 0: acronyms. I use it for acronyms all the time. Like, what does what does this mean? Like, someone's just used an acronym. Good for docs as well.\u003C/p>\u003Cp>Like, how could I make this more approachable? Or where where do you think I might have made assumptions based in my writing? And then they'll give me the answers. It doesn't mean I need to fix them. It means I'm aware of them.\u003C/p>\u003Cp>And some of those assumptions are completely fine. Because, you know, in the world of directors, for example, you know, some, developer relations educator teams I've been on in the past have been, like, how do we enable people from the first line of code they write to be successful with us? And because of the size team we are, I'm not incentivized by that directors today. I'm incentivized by saying, this is the knowledge you must have. And if you have this knowledge, we will take you from there.\u003C/p>\u003Cp>Hence, some assumptions are fine. But I can interpret that and make decisions based on that. That is fact I also like I use it for writing in terms of changing form. So every minor release of directors actually recorded one just today, which will now age this video if I've not done another one since. We we, yeah, do this release notes video that kinda describes what's new, what's important, any breaking changes, community contributions, and so.\u003C/p>\u003Cp>And I will write it as a set of bullet points, and that's my script. I don't script word for word. I script bullet points, and I can talk around them. But then I'll take those bullet points. I'll be like, hey.\u003C/p>\u003Cp>Could you just turn this into a announcement blog post? 80% of the way that I'm never gonna publish it as it comes out. As an AI sent, as you said. Like, I can tell. But, again, it will help me find the words.\u003C/p>\u003Cp>Sometimes a bit of nudging and correcting. And then I'll say, great. Summarize this as a discord post. Like, I want it in a tweet link. I want it in a discord post.\u003C/p>\u003Cp>Great. So I've written some material that was handwritten. Yeah. Bullet points. I can write I can use that, but you could generate a script.\u003C/p>\u003Cp>But I I just use that as my script. I get all these other modalities from one source material. So I yeah. So that's another way in which I use it for non code content, which I find very effective.\u003C/p>\u003Cp>Speaker 1: Yeah. I guess sometimes I do use AI. In other words, there's, like, a product called video tap.\u003C/p>\u003Cp>Speaker 0: Yeah. It will\u003C/p>\u003Cp>Speaker 1: be by Chris. Yeah. Yeah. Sometimes I'm gonna use that. Sometimes I'll be like, yeah.\u003C/p>\u003Cp>Turn this into little clips for me. Or So for those watching, just\u003C/p>\u003Cp>Speaker 0: so you know, the way VideoTap works is is you feed it a long form video, it'll spit out a transcript, summaries, some, like, social posts, social, like, little sting videos, some little teaser videos, and so on. That's a big reason that our team is really interested in long form video, like you're watching right now on this platform on right now, is because it once transcribed, which you can do now, even locally for free, You know? You can do it using other external services. But, you know, however you do that, you can then derive lots of other content out of it. I don't wanna turn this into\u003C/p>\u003Cp>Speaker 1: a factory of impersonal content where we make video\u003C/p>\u003Cp>Speaker 0: and everything else is just AI generated. Factory of impersonal content where we make video and everything else is just AI generated dribble. But, again, it allows us to quickly move on the other content. Structure wise, often, write written content and video content can follow a similar or same structure. The general points I'm making are the same.\u003C/p>\u003Cp>It might just be a little less conversational. Is a tool that, we're exploring as part of that, actually.\u003C/p>\u003Cp>Speaker 1: I love that. I want my team to get into it, but I I haven't been able to convince them yet. Another well, another thing while you're while you were talking, I was thinking of was, like, sometimes converting things to, like, different well, this is my last point because I know we're getting down to the wire. Like, different, like, languages, I guess. Oh, wait.\u003C/p>\u003Cp>For example, like, my team would and and in in terms of time too because I'm like, now I'm not spending, like, hours trying to figure out how this works. My team was saying, like, hey. Some things are failing because we don't have, like, webpack configured or whatever. And I'm like, okay. So I've seen how they configured it for a particular I think this was for DocuSaurus.\u003C/p>\u003Cp>But I'm like, how could I do something like that for my Next. Js app? Because I'm seeing this error in my Next. Js app. And then I just gave it what it gave me, what my team did for DocuSaurus, and it printed it out for for, Next.\u003C/p>\u003Cp>Js. I tried it out. And on top of that, like, what I really like is the explanation. I feel like back in the day when I was copying stuff from Stack Overflow, it may or may not have the explanation with it, and I'm like, I don't\u003C/p>\u003Cp>Speaker 0: all one that you can understand. Right? Yeah.\u003C/p>\u003Cp>Speaker 1: They're all like, you know, it's good.\u003C/p>\u003Cp>Speaker 0: You can ask follow ups, say that bit doesn't make sense, elaborate, and so on. Yeah. So I will say this is where I start to become skeptical again. I will give you just a quick story. You may know Joe Nash.\u003C/p>\u003Cp>Yes. So a friend of mine, and he he quite rightly, like, roasted me in, like, a friendly, funny way back in January. Back in January, I ran a conference which you spoke at.\u003C/p>\u003Cp>Speaker 1: Yeah. You got this.\u003C/p>\u003Cp>Speaker 0: Yeah. Over 3 time zones that we published the time zones for. And I wrote it all in one time zone. I went, hey. Could you please give me, you know, based on this time zone, I want Eastern, I want Indian Standard Time, and I want one other car, remember, whatever whatever one it was.\u003C/p>\u003Cp>And it got it right enough of the way that I manually validate it. Yeah. Yeah. Yeah. And then it started to go off the rails.\u003C/p>\u003Cp>I ended up inviting some speakers, like, an hour early for their talk, And my hosts were not there. There wouldn't have been there at the right time if I didn't realize. So, basically, I couldn't have trusted the output. I did not enough manual validation. Well, I should have just done it in a spreadsheet.\u003C/p>\u003Cp>I I've learned my lesson. But, you know, that's made me skeptical for things like, you know, convert a to b. Even languages, I'm a little bit skeptical, you know, on its translation ability. Yeah.\u003C/p>\u003Cp>Speaker 1: That's interesting. I think you're right. Like, I can't even argue with you that it does it does need manual validation. And maybe I'm spending the same I don't think I'm spending the same amount. Maybe I am taking a lot of time to do the manual validation, but I woulda never wrote this on my own.\u003C/p>\u003Cp>It woulda take well, I I could've, but it woulda take me to the This is\u003C/p>\u003Cp>Speaker 0: different. This is also comprehension. So it's teaching you a thing and then writing the code. In in my world, I'm perfectly able to convert time zones in a spreadsheet. I just got lazy, and I trusted it all and then didn't validate it.\u003C/p>\u003Cp>So, you know, the context is a little different. It was a it was a data manipulation slash entry task, and it it got it wrong. And I'm to blame for for trusting its output. But, yeah, that's a funny story that, in fact, even when we are now, this will really date it because I think this will change. We are in the few days where chat g p t is being accused of being lazy because it won't complete the whole task.\u003C/p>\u003Cp>That is the time frame in which we are recording.\u003C/p>\u003Cp>Speaker 1: When it does that.\u003C/p>\u003Cp>Speaker 0: Sure. So this is new. This has just happened, and I imagine this will have stopped. But regardless, it's just started happening in the last few days. So that's that's the time frame in which we are recording this video.\u003C/p>\u003Cp>And I did make a little joke. I was like, you know, how else am I gonna create an international, you know, conference schedule? Anyway, thank you for joining me. Actually, genuinely, now not every episode of these I've been ending the same way. Some have gone in and went, yeah.\u003C/p>\u003Cp>I still will never gonna use it, but now I understand it. Now do you wanna know what? I can see myself genuinely using GitHub auto, copilot, auto completion comment things opt in, like, I turn it on. Not that it's always there and gets under my feet. For things like he says, hey.\u003C/p>\u003Cp>I've done a I've done a get I've done a get in post request. Give me a patch. You know, patch, put, delete, or here's a schema. Can you just generate some endpoints? Yeah.\u003C/p>\u003Cp>Whatever. I also see the chat on the side working quite nicely, actually, because of its contextual awareness of your code. So you know what? Consider me to a degree one over. I'm still a little concerned about the correctness.\u003C/p>\u003Cp>But if I start to think about it as it doesn't need to be correct, it needs to show the approach or the thoughts because I do I'm lucky enough to have the skills to validate very quickly just by eye that feels right, that feels incorrect, and then respond accordingly. I'm still am skeptical around companies claiming it can do more than it can today. And as a maintainer of a large open source project, I definitely feel the the sharp end of that too.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: And, you know, people end up feeling like they have a bad experience with our product. Sometimes I will say that it's absolutely us. Sometimes it's bugs. Sometimes it is incorrect or incomplete dots. Absolutely.\u003C/p>\u003Cp>We're not perfect. But sometimes it is hallucinated code, which can only be spat out by one of these tools. And that makes them think, oh, things bust, and it just isn't. And so, that's interesting too. So I remain skeptical on that end.\u003C/p>\u003Cp>And as a result, today, I would still never implement it in docs for that reason. Yeah.\u003C/p>\u003Cp>Speaker 1: That's fair.\u003C/p>\u003Cp>Speaker 0: But as a personal user who can validate an output, yeah, do you know what I see it? And that's what I was really hoping to get. I was hoping to see someone who has touched these tools more.\u003C/p>\u003Cp>Speaker 1: Mhmm. And\u003C/p>\u003Cp>Speaker 0: you very much have, especially in your previous role. Yeah. I wanted to see how do you use it? How does this form part of your practice? And what parts of this might I be able to adopt?\u003C/p>\u003Cp>So, yeah, success. Thank you.\u003C/p>\u003Cp>Speaker 1: Yay. I'm glad. And I I think, it was good to have the to learn that perspective of other people just taking the code and just assuming it'll work because of how\u003C/p>\u003Cp>Speaker 0: An increasing amount marketed it. And increasing\u003C/p>\u003Cp>Speaker 1: I never seen that side.\u003C/p>\u003Cp>Speaker 0: Yeah. An increasing amount of, Yeah, which which is challenging, which is challenging. And yeah, I don't know how I don't think that is overcome by education. I think users believe the output is correct. I think that is solved, unfortunately, in time as they just get better at their job.\u003C/p>\u003Cp>Speaker 1: Yeah.\u003C/p>\u003Cp>Speaker 0: But that is that doesn't help me in the meantime, but that's okay. I can suck that. But but selfishly and personally, I've got some stuff. After you wrap up, is there anything else you wanna share? Where can people find you, your company, your projects, your favorite TV shows?\u003C/p>\u003Cp>Speaker 1: Cool. Yeah. You could find me on Twitter. I'm not gonna call it x at blackrobytes. Actually, any social media, you can find me at blackrobytes.\u003C/p>\u003Cp>My blog, blackrobytes.dev. What was the other questions you had?\u003C/p>\u003Cp>Speaker 0: I said, any anything work wise you wanna promote? And then Oh. The TV shows. Come on. Spit spit them out.\u003C/p>\u003Cp>What you're watching?\u003C/p>\u003Cp>Speaker 1: Speed TV shows. Okay. I got you. Okay. Work wise, go to my just go to blackrobites dot dev, and you will find something.\u003C/p>\u003Cp>Speaker 0: B y t e s. Bites. Like coding bites.\u003C/p>\u003Cp>Speaker 1: Yeah. B y t e s. It's like computer bites, not I'm biting you.\u003C/p>\u003Cp>Speaker 0: Or eating food or whatever. Yeah. Okay. Go on.\u003C/p>\u003Cp>Speaker 1: That's better than that.\u003C/p>\u003Cp>Speaker 0: Uh-huh.\u003C/p>\u003Cp>Speaker 1: TV show right now. Oh, wait. I'm a big reality TV show fan. So I've been watching Love is Blind. It's a crazy show.\u003C/p>\u003Cp>I love also invincible, gen v. They're a little gory. But\u003C/p>\u003Cp>Speaker 0: Yeah. So so invincible, everyone, is an 18 rated show, I must add. Because my friend Nathaniel made me watch an episode of it. And to be like, watch one episode you're either in or you're not in. And I was so in, we just haven't got around to watching more than the first episode.\u003C/p>\u003Cp>So Oh, what? You know it. Yeah. I I know. And I know it's a season 2.\u003C/p>\u003Cp>Really good. But my kids were there, and they they were, like, they were, like, 1 and, like, 3 and it's like and and it really gets gory towards the end of the first episode so you know it's a little bit of like an adult animation, but they don't bloody understand what's going on. It's fine. And at the end, we were like, really,\u003C/p>\u003Cp>Speaker 1: really? Did you just do that?\u003C/p>\u003Cp>Speaker 0: What the hell? So yes, it is a bit gory. And that's my story of learning it.\u003C/p>\u003Cp>Speaker 1: It's very gory. Keep it awake.\u003C/p>\u003Cp>Speaker 0: It was good. It was but it was on Amazon Prime Video. That's an Amazon Prime Video original, I think. Yeah. Excellent.\u003C/p>\u003Cp>Lovely speaking. Thank you so much. I'm going to can I use these in Codespaces? Because that's what I use.\u003C/p>\u003Cp>Speaker 1: Copilot? Yes. Yeah. Yeah. I think that I'm I I used to never use Visual Studio Code.\u003C/p>\u003Cp>I used to only use Codespaces.\u003C/p>\u003Cp>Speaker 0: I still don't. I just use Codespaces. Alright. Sick. Have a wonderful rest of your day.\u003C/p>\u003Cp>Thank you very much, and goodbye.\u003C/p>\u003Cp>Speaker 1: Bye\u003C/p>","Hello there. My name is Kevin, and welcome to learning things I love to hate. In this show, I stop avoiding technologies that I've been avoiding for one reason or another by inviting my friends and colleagues to teach me more about them. And today, my dear friend, Roselle, hello. Would you like to introduce yourself? Hello. Yes. My name is Roselle Scarlett. I'm a staff developer advocate at a company called TUVD, which stands for to be decentralized, not to be determined. And That's the first time I knew that. I always was like, when's the company name coming? No. I was I was the same. Even, like, I was at the company, and I'm like, why why is it called TBD? I didn't wanna say anything until I heard someone say what it what it stands for. So you went for to be decentralized? Yes. To be decentralized. What else about me? Before that, I worked at GitHub as a developer advocate, focused on, like, GitHub Copilot things. And, besides that, I just I like to use social media a lot and watch TV. Good. And today's topic is alluding to your previous role. You kind of, have provided the reason why I've asked you on, is about using AI as developers. So the way I start these episodes is I kinda tell you what I know or what I think I know now, what my experiences or my knowledge is. You firstly are welcome to tell me you got it wrong, but we can use that as a basis. And then my skepticisms and hesitancies, we knew that as a basis to just have a chat until we run out of time. At any point, if you wanna share your screen, feel absolutely free to be equally feel no pressure to. So using AI as developers. AI mostly popularized by OpenAI using the chat GPT interface, but since has permeated seemingly every piece of software that exists. My, GitHub came out with GitHub code pilot some time ago. You will fill in that blank for me. And I used it at the very beginning, like, right at the very, very beginning. And code pilot is a tool which basically allows you to to describe 1, describe what you're trying to achieve with your code, and it will give you a suggestion, by writing it in your code editor. And, also, I believe it can do, like, straight up, like, we will try and auto complete the code you're writing, maybe. In any case, my experience and thoughts around AI are the following. 1, I know I've seen some stats, like, hallucination rates or, like, where wrong rates are are dropping, and I truly believe at one point this concern will not be the case. But I simply don't trust it. Right? I don't trust it to give me codes that either is correct or is the right way of doing things. You know? So that's one concern. And the other is having used CodePilot at the very beginning, just the first version of CodePilot before it was a bunch of of separate services, which I don't really know that much about, I found it really annoying. Like, I actually found their interaction really frustrating and in the way to the point where I turned it off because it was in the way. So, that's the kind of basis to one other bit of context that might be useful is I work for a company called Directus. We have a Discord with, like, 11,000 people in it. People drop by when they have problems. And community, including our team, see if we can help people and an increasing amount. And we spoke about this briefly on Twitter. An increasing amount people think our stuff's broken, and the code snippets they share are hallucinations. They never existed. They never even nearly existed. And there's a little bit of a maybe I'm slightly salty because, you know, people think your thing's broken, but, actually, it's not even it's not even a code snippet we wrote ever in any point in time. It's just wrong. And so there's something there around how people perceive AI and what it can do for them at their current stage in their career, their project, whatever. So that's my little brain dump to maybe start. Could you maybe give us a bit of a landscape, anything you know about AI for developers, whether that's GitHub specific, other other tools, whatever? Yeah. Right now, what I use for coding with AI is usually GitHub Copilot or ChatTPT. But I do wanna, like, even back up and explain why I like it because you might just be like, oh, just because you worked at GitHub. But I remember and and that's fair. Right? But I remember when I joined GitHub, I did see all of, like, those, like, negative or mixed reviews. There were some people on Twitter being, like, I love it. And then there were some people being, like, I think I joined when it just went out in public beta, in, like, September 2021. And that's when I was like, let me see what this thing is. I'm, like, half skeptical or half curious. And at first, I was, like, this does not work. I hate it. It's, like, kinda like how you were like, it's a Yeah. Way. But as I started to use it more and more, I kinda got used to it and figured out, like, how it works on my workflow. I'm like, okay. Maybe I don't always want it to at first, at one point, when I knew what I was doing, I would, like, turn off GitHub Copilot. And then if I, like, needed help or if there was, like, a pattern like, let's say, I already wrote an API call with a get for a get request, and then I was, like, I did a post request. Maybe now I need to do a patch and delete. I'll get turn GitHub Copilot back on because I'm, like, okay. It it knows kind of, like, what boiler plating almost. Yeah. Yeah. Exactly. But then once I started to like, after that, like, I just had it on all the time. And then one thing that makes me passionate about it is that, is the psychological safety aspect. I feel like when I started off as a software engineer back in 2018 or 2019, I don't remember, like, my team was really mean. I was like, they were so mean. I was, like, the only black girl on the team. And, like, it got to the point where I, like, had, like, real life anxiety from my job. Like, I was, like, on the bus on the way home, and I couldn't even stand up because my muscles That's why I think we were, like, kinda, like, discussing or debating or had different points of views because you're like, I don't think it's good for juniors. But I'm like, I do because I wish I had at least something that I can talk back and forth with or, like, share an idea with and, when I was, like, just starting out. But I agree with you. It does sometimes generate wrong code. Or not even sometimes, a lot of times. And I do think that I think there's a part of, like, needed education around that. And, also, another thing on on your topic or on your concern about, like, direct us. One thing that the new company I work at has done is I'm sharing my screen right now. Okay. They trained let me I don't wanna show all my questions, so let me hide that. But they created a plug in. So so for, like, chat gpt4, or, like, I think it's, like, a bit like, a more premium mode. You have plug ins. Right? And they create a plug And you you, like, feed it. You feed it your corpus of data, your docs. Right? And then you give it you give it some some the custom instructions. I I use chat GPT also. I just don't use it for code. So I understand the concepts. Right? So you give it the custom instructions and tell it who the persona is who will use it, and it'll basically spit out a custom assistant given the context. Yeah. And I think GitHub Copilot has a similar thing too called Copilot for docs. And this the fact that they trained, the fact that TBD trained their doc or chat gpt on these docs has been extremely helpful for me. Because I walk in into this job as, like, I'm a staff developer advocate, so I'm expected to know this stuff. But I never heard of Web5 before, like, I even interview for the company, really. I don't know, like, how to do this stuff. And the documentation is, how will I say, I think it's a work in progress. And also, I'm just not a person. I'm I'm like I I I lead the team that writes our docs. So I think calling things a work in progress is completely fair. Okay. Good. No. No. That look that look is like, yeah. I know what you mean. Because I'm like, I read it, and I'm like, what? Like, there's missing stuff with it's it's still forming. The the product itself is still forming. And I'm also not really a person that reads documentation end to end. Like, that's just not me. Like, I do your learning style. Yeah. That's not my learning style. Good shit. That's fine. Exactly. So, like, for example, I for me to build an application at this company, I, like, I wanted to jump in. I wanted to, like, be able to be, like, alright. I'm ready to go and, like, you guys don't have to hold my hand or you all don't have to hold my hand. I just been talking to chat gpt and being like, hey. Why is it saying that the rec record dot send is not a function? Here's my code. And, like, it says, okay. It does exist. I'm like, alright. I know that. Like, you're being a little annoying. But then I I just from reading what they said, I was like, okay. I figured out that it needs to be destructured. And I'm like, okay. Sometimes when I refresh my page, this list disappears, and it goes on and tells me, oh, it's probably because of the asynchronous state setting. When you're calling this function, this, like, state variable is not persisting. I'm like, okay. Cool. So, like, I'm able to, like, go back and forth with it and be like, alright. Maybe I didn't think of this perception, or perspective. And I think it's, it's kinda like rubber ducking, I think, because sometimes I'm not I think I think that's fair. See, I have a few thoughts about what you've just shown me. So the first is Yes. You 1, you made a comment that let's just say the docs are incomplete. Let's just summarize it there or they're not in their state. They're not accurate. Whatever it is, they are not they do not contain the complete information. So, 1, this piece of software is ultimately guessing at the blanks, and I'm not sure I'm comfortable with that. The second is you demonstrated that you have an ability to go, it was being slightly annoying. I I know it was not giving me the right answer. And that is not necessarily a skill everyone has with the immediate counter in my mind of, like, okay. But you and I are experienced developers and, like, I I'm not gatekeeping anyway for myself, but I do have some skill that allows me to assess the correctness that not everyone has. And I wonder if that changes the experience or the perceived experience of a user. And then, and then beyond that, I I get it. It allows you to rubber duck to have that thought process back and forth. But I have those 2 concerns. Garbage in, garbage out. And I'm not saying the docs are garbage, but the docs miss things. So how do you how does it reconcile that? Or, like, have you ever found that it does just make things up? Yeah. And the and the other is yeah, how what do you have any thoughts around people who lack that ability to look at something and go, this isn't right or isn't what I want or it's contextually incorrect or whatever it may be? Yeah. I mean, exactly remember my skill set as a junior. But I do think it's kind of interesting because okay. When I was a junior, sometimes I would copy and paste stuff in Stack Overflow, and it wouldn't work. And then I would be, like, alright. Well, maybe I need to keep trying and find something else. So I always I I think it's an interesting case that, like, they're copying stuff from AI. They see that it doesn't work, and then they're just, like, well, it doesn't work. I'm, like, usually my even even when I was just learning, I feel like my initial response is, wait. Hold on. Let me try something else. Let me figure out. Because I will go back and tell ChachiPT or GitHub Copilot. I'll be like, this doesn't work. Like, what are you talking about? Yeah. You have this part wrong. Like, I'll usually feed it the information of, like, here's why you're wrong. And then it'll be, like, oh, sorry. And I don't I don't know. I think I've always told people when I taught them GitHub Copilot things, like, when I do talks, I'm like, treat it like you're it's a coworker. Like, you don't trust your coworker's code all the way. Well, that's the thing. I I think you've made a really interesting point around trust. 1 Yeah. Yeah. But how else were you learning before? Probably through trial and error with code that didn't work. This is just surfacing it in a different way. It's just a different, yeah, way of generating or surfacing or searching that code on the web. And then the other side of it is, sure, what's the alternative? You know, my feelings about docs and AI, people who have hung around in our Discord server will know this is today. And we're recording this at the end of 2023. I don't necessarily think it will come out, you know, very, very swiftly. So just so people know because breakthroughs are happening all the time. I will not present codes to people as an educational resource that is AI generated because the moment you present it to people, you are endorsing it. And it doesn't matter how many caveats you put in. The people's behavior with AI seems to not fully get that it hallucinates. Yeah. And so today, it's just not there. It's just it's just not there. And don't get me wrong. I'm still exploring AI. I'm still thinking about it. I'm still thinking, well, how do we lay the foundation. So when that concern falls away, because it will, we can we can deliver something great in terms of the world of education. I never touch AI generated docs, AI generated answers. I never use chatbot interfaces. I do not trust them. Interesting. Even if you try them out after, like, that's the part. Because it's not like they're just But then I but then I am someone who doesn't mind reading documentation. Interesting thing about this is it isn't yeah. It doesn't just have to consume, consume docs. It can consume other, other sources of information around the web, and maybe that's that's kinda interesting. The other the other concern I had, was so, you know, a lot of these trust arguments, I see the counter arguments. It's not the first time I've heard many of them, but it's good to See. Excuse me. It's good to to think about them a little more. The other part of it was the annoyance factor. And I know that even in the world of GitHub, they have released CodePilot is not, just the thing that autofills your code based on comments. It's like an ecosystem of services. And I'm kinda curious, bearing in mind that it doesn't just have to be that interface. It doesn't just have to be chat in a browser. Mhmm. What how else do you use AI in your in your practice as a developer or a developer advocate, outside of what original Copilot looks like? I use original Copilot. I use Copilot chat, which is, like, chat gpt. But, I'm contextually aware of your codes and the environment at which it runs. Yeah. I'm trying interesting. The I thought I had a Visual Studio example code example. Hold on. There we go. And then, oh, another one I use is within my terminal. So there's, like, Copilot CLI that I use. So sometimes I'll be like, oh, man. I forgot how to, like, do this bash for me. And I don't have the example right now. My bad. But I'll be like, I forgot how to, like, do this or I forgot how to write this script. And then I'll write in natural language what I wanna do, and then GitHub Copilot will give me a suggestion of what I wanna do of what it thinks I wanna do, and it'll give an explanation of what that command is. And then I can say, revise the query. That's not what I want. Or I can be like, yes. That's what I want. Go ahead and run the code. So if I'd be like, actually, I didn't want it to just, like, grab grab all of these JavaScript files or maybe I wanted to remove node modules or something like that, it'll, like, revise the query for me. But I mostly use the comment fee like, writing the comments and then the chats on the side. Interesting. That's fascinating. That's a really interesting interface. And, of course, I I mean, logically, I know this interface exists. This will this will be one of many tools like this that is contextually aware of your code base. That's another issue with, like, chat g p t, for example, is it's not contextually aware of your code base. It just gives you a isolated pace. Exactly. It's so annoying. Whereas this is like, oh, use one reference in the index dotjs, from line 22 to 2279 to, write out all of this. Oh, another thing I want to bring up is I mostly use AI to understand code, to be honest, more than, like, literally write out my code. Yeah. Could I ask you could I ask you a favor? Could you bump your font size up Oh. 2 or 3 times? I can't. Let me, All good. That's good. Doing right here. Cool. So one thing I use it for is, like, understanding that code. So, let's say, This is I don't know. I I use a lot of AI code even though you don't. Alright. So, oh, wait. Let's say, right, I I want a better understanding of this Web 5 package, which I've lit legitimately done in the past. Right? So I've opened up, like, these like, the the type source or the source definition for the code that they're creating. And I'll, like, highlight all of it and be, like, hey, Copilot chat. Can you explain what's going on in this? How do I sometimes I'll ask you questions, like, how do I, like, create a record or whatever. Let me see if I can just give an example here. If I just say explain. Like a slash command. Interesting. Yeah. They have slash commands. And okay. It's using all of these references. That's cool. They're working on it even more. But I I mostly use it for explanations because sometimes I'll look at this and I'm like, my brain is hurting. Like, what is this giving me? So once it breaks down all that stuff for me, it gives me more insight. Like, that's that's my main way of thinking. Say I will say in the skeptics world here, a few things. 1 Yeah. Do do you trust what it says? That's a I mean, I don't know. Go in half, like, half trusting. And I think that's okay. My real thought though is this file you've opened up on the right looks very lovingly commented, which is fantastic. Yeah. No. This is a bad example. But yeah. In terms of power, because all it's doing really is presenting the information that has already been handwritten in a nice way. And so that's that's there's a bit less inference there. It's just a bit more summarizing what's already been written. Yeah. But for me, I like seeing stuff in this format. I don't know why rather than, like, looking at, like, random I get it. I get it. I don't know. I don't like reading code comments. Yeah. Yeah. To to understand. Right? It's not my style either. Yeah. So, yeah, that's really interesting. I suppose you can ask follow-up questions. It'll try its best. Interesting. So what have we discovered so far? So we're talking about chat GPT as I as an as an isolated example that we spoke about 2 forms of code pilot. We've spoken about this kind of if you write a comment, it will suggest things to you, which I'm really not a huge fan of the interaction. Though this idea that you can turn it on and off when you just want a bit of input, You know, when there's something meaningful coming up or when you need oh, I need to I don't know. Replace every I need to make this URL safe. Okay. Give me the regex for that. Yeah. Reg reg x. Give me the regex for that. Yeah. And it's something you would just Google. You'd find you'd maybe go to a little generator online. I I do get that. It's interesting turning it on, turning it off. That's fascinating. I may have a think about that. And the second is this chat interface, which is contextually aware of your code base. Mhmm. It also allows you to highlight portions and be very specific about the area in which you're asking, but it still understands the context. Yeah. So these are 3 tools used in different ways. Yeah. How else, if at all, does AI kinda come into your your day to day work life, as a developer? Yeah. Was that a was that a little kitty cat? Yeah. Sorry about that. That's fine. I don't know why such, like, as if he would understand. I guess another thing and this is something I do a little bit less because I think this this is still a work in progress. But sometimes, like, let's say I'm making a demo app. I wanna, like, just put it out really quickly, but I'm like, maybe I should add some, like, comments or notes to it. I'm, like, I already wrote this all up, and, I'm I'm lazy. So another thing I'll do is, like lazy. Think we're all lazy developers. There's no shame in that. Yeah. So another thing I'll do is, like, I'll highlight it, and maybe I'll, do this where it's, like, generate docs. But what that's really doing is not generating real documentation. It's just generating, like, JS docs. But I found that to be, like, perfect because it doesn't have to think too hard about this. And also think about the amount of time it would have taken you to write that versus what it just did. And then you just go in and fix it, adjust it, and so on. It's quite small amounts of code, though. You know, it's pretty pretty clean-cut, but it still takes time to do. And there are other generated ways to do it, but this really allows you to do a quick valid Yeah. Manual validation. I think that it that's the thing I keep coming back to though, which is the success of using AI as developers. I believe at this point, I believed at the beginning of our chat and I still believe relies on your ability to validate either through a gut feeling or through testing testing that the output is correct. And not everyone has that skill, but I worry that the way these tools are spoken about by companies that say, hey. We're providing this tool to you. I think there's a bit of a mismatch between between the marketing and reality. And I think that gap is being closed. But I think I've worked for companies in the past that have this tendency where they they don't say, hey. Our goal is to allow x and come with us on the right as we build that. It's we do x, and then they fall short. And I think that's a little bit naughty, and I think that's where a lot of my, hesitation comes from. Because that manual validation, something that today, can you automate? May we'll talk about that in a moment. But, you know, is is often not spoken about. On that note, I have heard about this thing. I forgot what it's I forgot the wording for it. But where you feed output of AI into another AI to try and validate its correctness. And if you do that enough times with enough systems, you can grow your confidence of an answer being correct while being a fully automated setup, which is kind of I've never heard that. That's cool. But I will say what it's called. I agree with you on that part, and I think that's how we both even started the conversation because both of us said companies are, like, amping up AI too much. And then, the, Diane is like, y'all are saying the same thing. So, yeah, I agree with you. And I think I think that's that is a problem I had where it's like we're, like, copilot and all these AI things are gonna solve all your like, double check the work. Because, like, I've used it, like, double check the work because, like, I've used it. I used it the other day. My my company encouraged me to use it to generate test cause we needed to test code snippets in our in our, documentation. But, like, I had to I kept testing the test to make sure they were, like, actually right, not just, like, fake, like, passing. Yeah. And, you know, it's also interesting. So how do I use AI? I use AI every day, and I use it for writing, and I use it for image generation for concepts. I'm a believer that, like, all AI output is based on stolen work, and you've never convinced me otherwise. But, but to take the ideas in my head and illustrate what I had in mind, AI is actually fantastic, and then I generally take it to an expert to do properly. You know? It's just a way of of expressing what I mean. Then I use it in terms of writing in 2 ways. 1, to overcome a blank page problem. Like, help me think about a structure of a a a blog post that's trying to have these key goals. You know? It's not that you're gonna generate it for me, but you're gonna help me overcome the, I don't know how to start. Or I've written a thing. I need it to feel or be more like x in terms of tone or length or, well, tone or length. And you can go away and try and do more with that. So I do it for a lot of rewriting. So the I use it in these ways every day and literally every single day. I also use it for, like, kind of what I would call composite queries where I'm like, here's a picture of a back of a complex cooking instruction or like or like, here's a recipe in German. I live in Berlin. My German is not that good still. Here's a recipe in Berlin. Here's a recipe in Berlin. Here's a recipe in German. I've just done x. What do I do next? Yeah. Interesting. Because I I like I like that idea of the the back of the box. I don't I don't like it for writing. I hate it for writing. Is it not interesting? Sometimes So it says I'll use it for writing, but when I try to tell it to do it in a specific style, I feel like I've seen chat g p t be like, hey. What's up? I'm like, oh. Sorry. Sorry. But I had a second point there, which which the answer to that, which is that I almost never will take the output for beating. In the image generation world, it still goes to someone who is able to turn that into something actually usable. Yeah. And in the writing world, it almost never gets it right. Even if I just give it a little not take, can you make sure to emphasize x? It rewrites other bits, and I'm like, no. That bit was good before. So so I end up again doing the manual validation slash fixing slash, you know, that that last mile piece of work Yeah. Manually. And I have no hesitations about that at all, but it has still saved me 80% of my time. And I feel like, you know, I so I said things like, okay. So I use chat gpt for, like, generative creative work, but I won't use it for code. Code is just it is creative. It is. Yeah. So it's not interesting. Same It's the same thing. The same process because Exactly. Let me see if I have an example. Here, this is one of my first days at, at the company. I'm like, how do I use this? And it showed me it in JavaScript. I don't even write it in JavaScript, but I'm like, okay. Cool. Can you write out the whole code? And then it did. I'm like, that's interesting. What will it look like if it's in Next. Js? And it told me. And I'm just and I'm like, so what's this part? Like, what is this thing or app protocol? How do I create that? Do I need my own URL? And then it tells me. So, like, I'm not I know it's gonna be sometimes wrong, like, with writing. And I know it's I'm not gonna copy and paste this exactly, but I'm like, it's a starting off point because I had no clue what DW or communicating across DWNs is. Like, I just keep hearing my coworkers say that. I'm like, what is it? How do I do it? Or like I was acronyms. I use it for acronyms all the time. Like, what does what does this mean? Like, someone's just used an acronym. Good for docs as well. Like, how could I make this more approachable? Or where where do you think I might have made assumptions based in my writing? And then they'll give me the answers. It doesn't mean I need to fix them. It means I'm aware of them. And some of those assumptions are completely fine. Because, you know, in the world of directors, for example, you know, some, developer relations educator teams I've been on in the past have been, like, how do we enable people from the first line of code they write to be successful with us? And because of the size team we are, I'm not incentivized by that directors today. I'm incentivized by saying, this is the knowledge you must have. And if you have this knowledge, we will take you from there. Hence, some assumptions are fine. But I can interpret that and make decisions based on that. That is fact I also like I use it for writing in terms of changing form. So every minor release of directors actually recorded one just today, which will now age this video if I've not done another one since. We we, yeah, do this release notes video that kinda describes what's new, what's important, any breaking changes, community contributions, and so. And I will write it as a set of bullet points, and that's my script. I don't script word for word. I script bullet points, and I can talk around them. But then I'll take those bullet points. I'll be like, hey. Could you just turn this into a announcement blog post? 80% of the way that I'm never gonna publish it as it comes out. As an AI sent, as you said. Like, I can tell. But, again, it will help me find the words. Sometimes a bit of nudging and correcting. And then I'll say, great. Summarize this as a discord post. Like, I want it in a tweet link. I want it in a discord post. Great. So I've written some material that was handwritten. Yeah. Bullet points. I can write I can use that, but you could generate a script. But I I just use that as my script. I get all these other modalities from one source material. So I yeah. So that's another way in which I use it for non code content, which I find very effective. Yeah. I guess sometimes I do use AI. In other words, there's, like, a product called video tap. Yeah. It will be by Chris. Yeah. Yeah. Sometimes I'm gonna use that. Sometimes I'll be like, yeah. Turn this into little clips for me. Or So for those watching, just so you know, the way VideoTap works is is you feed it a long form video, it'll spit out a transcript, summaries, some, like, social posts, social, like, little sting videos, some little teaser videos, and so on. That's a big reason that our team is really interested in long form video, like you're watching right now on this platform on right now, is because it once transcribed, which you can do now, even locally for free, You know? You can do it using other external services. But, you know, however you do that, you can then derive lots of other content out of it. I don't wanna turn this into a factory of impersonal content where we make video and everything else is just AI generated. Factory of impersonal content where we make video and everything else is just AI generated dribble. But, again, it allows us to quickly move on the other content. Structure wise, often, write written content and video content can follow a similar or same structure. The general points I'm making are the same. It might just be a little less conversational. Is a tool that, we're exploring as part of that, actually. I love that. I want my team to get into it, but I I haven't been able to convince them yet. Another well, another thing while you're while you were talking, I was thinking of was, like, sometimes converting things to, like, different well, this is my last point because I know we're getting down to the wire. Like, different, like, languages, I guess. Oh, wait. For example, like, my team would and and in in terms of time too because I'm like, now I'm not spending, like, hours trying to figure out how this works. My team was saying, like, hey. Some things are failing because we don't have, like, webpack configured or whatever. And I'm like, okay. So I've seen how they configured it for a particular I think this was for DocuSaurus. But I'm like, how could I do something like that for my Next. Js app? Because I'm seeing this error in my Next. Js app. And then I just gave it what it gave me, what my team did for DocuSaurus, and it printed it out for for, Next. Js. I tried it out. And on top of that, like, what I really like is the explanation. I feel like back in the day when I was copying stuff from Stack Overflow, it may or may not have the explanation with it, and I'm like, I don't all one that you can understand. Right? Yeah. They're all like, you know, it's good. You can ask follow ups, say that bit doesn't make sense, elaborate, and so on. Yeah. So I will say this is where I start to become skeptical again. I will give you just a quick story. You may know Joe Nash. Yes. So a friend of mine, and he he quite rightly, like, roasted me in, like, a friendly, funny way back in January. Back in January, I ran a conference which you spoke at. Yeah. You got this. Yeah. Over 3 time zones that we published the time zones for. And I wrote it all in one time zone. I went, hey. Could you please give me, you know, based on this time zone, I want Eastern, I want Indian Standard Time, and I want one other car, remember, whatever whatever one it was. And it got it right enough of the way that I manually validate it. Yeah. Yeah. Yeah. And then it started to go off the rails. I ended up inviting some speakers, like, an hour early for their talk, And my hosts were not there. There wouldn't have been there at the right time if I didn't realize. So, basically, I couldn't have trusted the output. I did not enough manual validation. Well, I should have just done it in a spreadsheet. I I've learned my lesson. But, you know, that's made me skeptical for things like, you know, convert a to b. Even languages, I'm a little bit skeptical, you know, on its translation ability. Yeah. That's interesting. I think you're right. Like, I can't even argue with you that it does it does need manual validation. And maybe I'm spending the same I don't think I'm spending the same amount. Maybe I am taking a lot of time to do the manual validation, but I woulda never wrote this on my own. It woulda take well, I I could've, but it woulda take me to the This is different. This is also comprehension. So it's teaching you a thing and then writing the code. In in my world, I'm perfectly able to convert time zones in a spreadsheet. I just got lazy, and I trusted it all and then didn't validate it. So, you know, the context is a little different. It was a it was a data manipulation slash entry task, and it it got it wrong. And I'm to blame for for trusting its output. But, yeah, that's a funny story that, in fact, even when we are now, this will really date it because I think this will change. We are in the few days where chat g p t is being accused of being lazy because it won't complete the whole task. That is the time frame in which we are recording. When it does that. Sure. So this is new. This has just happened, and I imagine this will have stopped. But regardless, it's just started happening in the last few days. So that's that's the time frame in which we are recording this video. And I did make a little joke. I was like, you know, how else am I gonna create an international, you know, conference schedule? Anyway, thank you for joining me. Actually, genuinely, now not every episode of these I've been ending the same way. Some have gone in and went, yeah. I still will never gonna use it, but now I understand it. Now do you wanna know what? I can see myself genuinely using GitHub auto, copilot, auto completion comment things opt in, like, I turn it on. Not that it's always there and gets under my feet. For things like he says, hey. I've done a I've done a get I've done a get in post request. Give me a patch. You know, patch, put, delete, or here's a schema. Can you just generate some endpoints? Yeah. Whatever. I also see the chat on the side working quite nicely, actually, because of its contextual awareness of your code. So you know what? Consider me to a degree one over. I'm still a little concerned about the correctness. But if I start to think about it as it doesn't need to be correct, it needs to show the approach or the thoughts because I do I'm lucky enough to have the skills to validate very quickly just by eye that feels right, that feels incorrect, and then respond accordingly. I'm still am skeptical around companies claiming it can do more than it can today. And as a maintainer of a large open source project, I definitely feel the the sharp end of that too. Yeah. And, you know, people end up feeling like they have a bad experience with our product. Sometimes I will say that it's absolutely us. Sometimes it's bugs. Sometimes it is incorrect or incomplete dots. Absolutely. We're not perfect. But sometimes it is hallucinated code, which can only be spat out by one of these tools. And that makes them think, oh, things bust, and it just isn't. And so, that's interesting too. So I remain skeptical on that end. And as a result, today, I would still never implement it in docs for that reason. Yeah. That's fair. But as a personal user who can validate an output, yeah, do you know what I see it? And that's what I was really hoping to get. I was hoping to see someone who has touched these tools more. Mhmm. And you very much have, especially in your previous role. Yeah. I wanted to see how do you use it? How does this form part of your practice? And what parts of this might I be able to adopt? So, yeah, success. Thank you. Yay. I'm glad. And I I think, it was good to have the to learn that perspective of other people just taking the code and just assuming it'll work because of how An increasing amount marketed it. And increasing I never seen that side. Yeah. An increasing amount of, Yeah, which which is challenging, which is challenging. And yeah, I don't know how I don't think that is overcome by education. I think users believe the output is correct. I think that is solved, unfortunately, in time as they just get better at their job. Yeah. But that is that doesn't help me in the meantime, but that's okay. I can suck that. But but selfishly and personally, I've got some stuff. After you wrap up, is there anything else you wanna share? Where can people find you, your company, your projects, your favorite TV shows? Cool. Yeah. You could find me on Twitter. I'm not gonna call it x at blackrobytes. Actually, any social media, you can find me at blackrobytes. My blog, blackrobytes.dev. What was the other questions you had? I said, any anything work wise you wanna promote? And then Oh. The TV shows. Come on. Spit spit them out. What you're watching? Speed TV shows. Okay. I got you. Okay. Work wise, go to my just go to blackrobites dot dev, and you will find something. B y t e s. Bites. Like coding bites. Yeah. B y t e s. It's like computer bites, not I'm biting you. Or eating food or whatever. Yeah. Okay. Go on. That's better than that. Uh-huh. TV show right now. Oh, wait. I'm a big reality TV show fan. So I've been watching Love is Blind. It's a crazy show. I love also invincible, gen v. They're a little gory. But Yeah. So so invincible, everyone, is an 18 rated show, I must add. Because my friend Nathaniel made me watch an episode of it. And to be like, watch one episode you're either in or you're not in. And I was so in, we just haven't got around to watching more than the first episode. So Oh, what? You know it. Yeah. I I know. And I know it's a season 2. Really good. But my kids were there, and they they were, like, they were, like, 1 and, like, 3 and it's like and and it really gets gory towards the end of the first episode so you know it's a little bit of like an adult animation, but they don't bloody understand what's going on. It's fine. And at the end, we were like, really, really? Did you just do that? What the hell? So yes, it is a bit gory. And that's my story of learning it. It's very gory. Keep it awake. It was good. It was but it was on Amazon Prime Video. That's an Amazon Prime Video original, I think. Yeah. Excellent. Lovely speaking. Thank you so much. I'm going to can I use these in Codespaces? Because that's what I use. Copilot? Yes. Yeah. Yeah. I think that I'm I I used to never use Visual Studio Code. I used to only use Codespaces. I still don't. I just use Codespaces. Alright. Sick. Have a wonderful rest of your day. Thank you very much, and goodbye. Bye",[195,196],"cbb6cf76-75f6-455a-a94b-dccd7551f105","0a6c499d-d9f7-46b8-b4b7-b6a5da546339",[],{"reps":199},[200,256],{"name":201,"sdr":8,"link":202,"countries":203,"states":205},"John Daniels","https://meet.directus.io/meetings/john2144/john-contact-form-meeting",[204],"United States",[206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255],"Michigan","Indiana","Ohio","West Virginia","Kentucky","Virginia","Tennessee","North Carolina","South Carolina","Georgia","Florida","Alabama","Mississippi","New York","MI","IN","OH","WV","KY","VA","TN","NC","SC","GA","FL","AL","MS","NY","Connecticut","CT","Delaware","DE","Maine","ME","Maryland","MD","Massachusetts","MA","New Hampshire","NH","New Jersey","NJ","Pennsylvania","PA","Rhode Island","RI","Vermont","VT","Washington DC","DC",{"name":257,"link":258,"countries":259},"Michelle Riber","https://meetings.hubspot.com/mriber",[260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344,345,346,347,348,349,350,351,352,353,354,355,356,357,358,359,360,361,362,363,364,365,366,367,368,369,370,371,372,373,374,375,376,377,378,379,380,381,382,383,384,385,386,387,388,389,390,391,392,393,394,395,396,397,398,399,400,401,402,403,404,405,406,407,408,409,410,411,412,413,414,415,416,417,418,419,420,421,422,423,424,425,426,427,428,429,430,431,432,433,434,435,436,437,438,439,440,441,442,443,444,445,446,447,237,448,449],"Albania","ALB","Algeria","DZA","Andorra","AND","Angola","AGO","Austria","AUT","Belgium","BEL","Benin","BEN","Bosnia and Herzegovina","BIH","Botswana","BWA","Bulgaria","BGR","Burkina Faso","BFA","Burundi","BDI","Cameroon","CMR","Cape Verde","CPV","Central African Republic","CAF","Chad","TCD","Comoros","COM","Côte d'Ivoire","CIV","Croatia","HRV","Czech Republic","CZE","Democratic Republic of Congo","COD","Denmark","DNK","Djibouti","DJI","Egypt","EGY","Equatorial Guinea","GNQ","Eritrea","ERI","Estonia","EST","Eswatini","SWZ","Ethiopia","ETH","Finland","FIN","France","FRA","Gabon","GAB","Gambia","GMB","Ghana","GHA","Greece","GRC","Guinea","GIN","Guinea-Bissau","GNB","Hungary","HUN","Iceland","ISL","Ireland","IRL","Italy","ITA","Kenya","KEN","Latvia","LVA","Lesotho","LSO","Liberia","LBR","Libya","LBY","Liechtenstein","LIE","Lithuania","LTU","Luxembourg","LUX","Madagascar","MDG","Malawi","MWI","Mali","MLI","Malta","MLT","Mauritania","MRT","Mauritius","MUS","Moldova","MDA","Monaco","MCO","Montenegro","MNE","Morocco","MAR","Mozambique","MOZ","Namibia","NAM","Niger","NER","Nigeria","NGA","North Macedonia","MKD","Norway","NOR","Poland","POL","Portugal","PRT","Republic of Congo","COG","Romania","ROU","Rwanda","RWA","San Marino","SMR","São Tomé and Príncipe","STP","Senegal","SEN","Serbia","SRB","Seychelles","SYC","Sierra Leone","SLE","Slovakia","SVK","Slovenia","SVN","Somalia","SOM","South Africa","ZAF","South Sudan","SSD","Spain","ESP","Sudan","SDN","Sweden","SWE","Tanzania","TZA","Togo","TGO","Tunisia","TUN","Uganda","UGA","United Kingdom","GBR","Vatican City","VAT","Zambia","ZMB","Zimbabwe","ZWE","UK","Germany","Netherlands","Switzerland","CH","NL",1773850428342]