-- SurrealDB SQL Export
-- Generated on 2025-01-11 21:26:50

-- Drop existing tables (if any)

-- Create tables
DEFINE TABLE IF NOT EXISTS page SCHEMALESS; 

DEFINE TABLE IF NOT EXISTS section SCHEMALESS;

DEFINE TABLE IF NOT EXISTS taxonomy_term SCHEMALESS;

DEFINE TABLE IF NOT EXISTS page_taxonomy_term SCHEMALESS;


DEFINE EVENT OVERWRITE create_post_taxonomy_relationships ON TABLE post WHEN $event = "CREATE" THEN {
  LET $cat = $value.categories;
  FOR $c in $cat {
    LET $existing = (SELECT name from category where name = $c)[0];
    IF $existing == NONE {
      LET $id = (CREATE category CONTENT { name: $c } RETURN id);
      RELATE $value->has_taxonomy->$id CONTENT { type: 'category' }
    }
  };
  LET $tags = $value.tags;
  FOR $c in $tags {
    LET $existing = (SELECT name from category where name = $c)[0];
    IF $existing == NONE {
      LET $id = (CREATE category CONTENT { name: $c } RETURN id);
      RELATE $value->has_taxonomy->$id CONTENT { type: 'tag' }
    }
  };
  LET $series = $value.series;
  FOR $c in $series {
    LET $existing = (SELECT name from category where name = $c)[0];
    IF $existing == NONE {
      LET $id = (CREATE category CONTENT { name: $c } RETURN id);
      RELATE $value->has_taxonomy->$id CONTENT { type: 'series' }
    }
  };
  LET $projects = $value.projects;
  FOR $c in $projects {
    LET $existing = (SELECT name from category where name = $c)[0];
    IF $existing == NONE {
      LET $id = (CREATE category CONTENT { name: $c } RETURN id);
      RELATE $value->has_taxonomy->$id CONTENT { type: 'project' }
    }
  };
  CREATE log CONTENT { timestamp: time::now(), categories: $cat };
};


-- Insert pages




  
  
  
  

  
  
  
  

  CREATE post CONTENT {
      title: "Zola Surreal DB site cache",
      slug: "zola-surreal-db-site-cache",
      path: "https://parkerjones.dev/posts/zola-surreal-db-site-cache/",
      content: "<ul>\n<li>Publishes all of your site's contents and relevant metadata into a SurrealDB SQL file.</li>\n<li>Serves this file from your site.</li>\n<li>Loads an in-memory SurrealDB database on the page.</li>\n<li>Enables full-text search over all pages.</li>\n<li>Supports the taxonomy structure and metadata defined in the front matter.</li>\n</ul>\n<hr />\n<h2 id=\"overview\"><strong>Overview</strong></h2>\n<ol>\n<li><strong>Understanding SurrealDB and Browser Compatibility</strong></li>\n<li><strong>Generating a SurrealDB SQL File During Site Build</strong></li>\n<li><strong>Serving the SQL File from Your Site</strong></li>\n<li><strong>Loading SurrealDB in the Browser</strong></li>\n<li><strong>Implementing Full-Text Search and Taxonomy Support</strong></li>\n<li><strong>Putting It All Together</strong></li>\n</ol>\n<hr />\n<h2 id=\"1-understanding-surrealdb-and-browser-compatibility\"><strong>1. Understanding SurrealDB and Browser Compatibility</strong></h2>\n<h3 id=\"what-is-surrealdb\"><strong>What is SurrealDB?</strong></h3>\n<p><a href=\"https://surrealdb.com/\">SurrealDB</a> is a scalable, distributed database designed to be flexible and efficient. It's capable of handling traditional relational data, document data, and graph data, and it provides an SQL-like query language.</p>\n<h3 id=\"browser-compatibility-as-of-january-2025\"><strong>Browser Compatibility as of January 2025</strong></h3>\n<p>As of <strong>January 2025</strong>, SurrealDB has made significant strides in terms of client-side capabilities. With the advancements in WebAssembly (WASM) and web technologies, it's now possible to run SurrealDB in-memory in the browser using WASM.</p>\n<p><strong>Key Points:</strong></p>\n<ul>\n<li><strong>WebAssembly Support:</strong> SurrealDB provides a WASM build that can be executed in modern browsers, allowing you to run an in-memory instance of the database client-side.</li>\n<li><strong>Client-Side Operations:</strong> This enables complex data querying and manipulation directly within the user's browser without the need for server-side operations.</li>\n</ul>\n<hr />\n<h2 id=\"2-generating-a-surrealdb-sql-file-during-site-build\"><strong>2. Generating a SurrealDB SQL File During Site Build</strong></h2>\n<p>To populate the in-memory database in the browser, we'll generate a <strong>SurrealDB SQL file</strong> containing all the site content and metadata.</p>\n<h3 id=\"2-1-extracting-site-content-and-metadata\"><strong>2.1. Extracting Site Content and Metadata</strong></h3>\n<p>We'll use Zola's <a href=\"https://www.getzola.org/documentation/templates/data-files/\">data directory</a> and <a href=\"https://www.getzola.org/documentation/templates/global-variables/\">global variables</a> to access all pages, sections, taxonomies, and their metadata.</p>\n<h3 id=\"2-2-creating-a-template-to-generate-the-sql-file\"><strong>2.2. Creating a Template to Generate the SQL File</strong></h3>\n<p>We'll create a template that iterates over all the pages and constructs SQL <code>CREATE</code> and <code>INSERT</code> statements.</p>\n<h4 id=\"create-a-template-file\"><strong>Create a Template File</strong></h4>\n<ul>\n<li><strong>Path:</strong> <code>templates/surrealdb_export.sql</code></li>\n<li><strong>Content:</strong></li>\n</ul>\n<pre data-lang=\"sql\" class=\"language-sql \"><code class=\"language-sql\" data-lang=\"sql\">{% raw %}\n-- SurrealDB SQL Export\n-- Generated on {{ now() | date(format=&quot;%Y-%m-%d %H:%M:%S&quot;) }}\n\n-- Drop existing tables (if any)\nREMOVE TABLE page;\nREMOVE TABLE section;\nREMOVE TABLE taxonomy_term;\nREMOVE TABLE taxonomy_item;\n\n-- Create tables\nDEFINE TABLE page SCHEMAFULL\n    PERMISSIONS FOR ALL NONE;\n\nDEFINE TABLE section SCHEMAFULL\n    PERMISSIONS FOR ALL NONE;\n\nDEFINE TABLE taxonomy_term SCHEMAFULL\n    PERMISSIONS FOR ALL NONE;\n\nDEFINE TABLE taxonomy_item SCHEMAFULL\n    PERMISSIONS FOR ALL NONE;\n\n-- Insert pages\n{% for page in pages %}\nCREATE page:{{ page.slug }} CONTENT {\n    title: {{ page.title | json_encode() }},\n    slug: {{ page.slug | json_encode() }},\n    path: {{ page.permalink | json_encode() }},\n    content: {{ page.content | json_encode() }},\n    summary: {{ page.summary | json_encode() }},\n    date: {{ page.date | date(format=&quot;%Y-%m-%dT%H:%M:%SZ&quot;) | json_encode() }},\n    taxonomies: [\n        {% for key, terms in page.taxonomies %}\n            { name: {{ key | json_encode() }}, terms: [ {{ terms | map(attribute=&quot;name&quot;) | join(&quot;, &quot;) | json_encode() }} ] },\n        {% endfor %}\n    ],\n    metadata: {{ page.extra | json_encode() }}\n};\n\n{% endfor %}\n\n-- Insert sections\n{% for section in sections %}\nCREATE section:{{ section.slug }} CONTENT {\n    title: {{ section.title | json_encode() }},\n    slug: {{ section.slug | json_encode() }},\n    path: {{ section.permalink | json_encode() }},\n    content: {{ section.content | json_encode() }},\n    summary: {{ section.summary | json_encode() }},\n    date: {{ section.date | date(format=&quot;%Y-%m-%dT%H:%M:%SZ&quot;) | json_encode() }},\n    metadata: {{ section.extra | json_encode() }}\n};\n\n{% endfor %}\n\n-- Insert taxonomy terms\n{% for kind, terms in taxonomies %}\n    {% for term in terms %}\nCREATE taxonomy_term:{{ term.name | slugify }} CONTENT {\n    kind: {{ kind | json_encode() }},\n    name: {{ term.name | json_encode() }},\n    slug: {{ term.slug | json_encode() }},\n    path: {{ term.permalink | json_encode() }},\n    pages: [ {% for p in term.pages %} page:{{ p.slug }}, {% endfor %} ]\n};\n    {% endfor %}\n{% endfor %}\n\n-- Relationships between pages and taxonomies can be defined here if needed\n\n{% endraw %}\n</code></pre>\n<p><strong>Explanation:</strong></p>\n<ul>\n<li>We're using Tera templating within a <code>.sql</code> file to generate the SQL statements.</li>\n<li>We loop over all pages, sections, and taxonomies to create SQL <code>CREATE</code> statements.</li>\n<li>JSON data is properly encoded using <code>json_encode()</code> to ensure compatibility.</li>\n</ul>\n<h3 id=\"2-3-configuring-the-template-for-output\"><strong>2.3. Configuring the Template for Output</strong></h3>\n<p>We need to ensure Zola processes our template and outputs the generated SQL file.</p>\n<h4 id=\"adjust-config-toml\"><strong>Adjust <code>config.toml</code></strong></h4>\n<p>Add an entry to <code>extra</code> to indicate where to output the SQL file.</p>\n<pre data-lang=\"toml\" class=\"language-toml \"><code class=\"language-toml\" data-lang=\"toml\">[extra]\nsurrealdb_sql_output = &quot;static&#x2F;surrealdb_export.sql&quot;\n</code></pre>\n<p>Alternatively, you can hardcode the output path in your build process.</p>\n<h3 id=\"2-4-create-a-build-script-to-generate-the-sql-file\"><strong>2.4. Create a Build Script to Generate the SQL File</strong></h3>\n<p>We'll use a build script to render the template.</p>\n<h4 id=\"create-generate-surrealdb-sql-sh\"><strong>Create <code>generate_surrealdb_sql.sh</code></strong></h4>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">#!&#x2F;bin&#x2F;bash\n\n# Ensure the script exits if any command fails\nset -e\n\n# Render the SQL template\nzola build -o &#x2F;tmp&#x2F;zola_build_sql --template-only\n# Copy the rendered file to the desired location\ncp &#x2F;tmp&#x2F;zola_build_sql&#x2F;surrealdb_export.sql static&#x2F;\n# Clean up temporary build directory\nrm -rf &#x2F;tmp&#x2F;zola_build_sql\n\necho &quot;SurrealDB SQL export generated successfully.&quot;\n</code></pre>\n<p><strong>Explanation:</strong></p>\n<ul>\n<li>We use <code>zola build</code> with <code>--template-only</code> to render only the templates without generating the full site.</li>\n<li>The output is temporarily stored in <code>/tmp/zola_build_sql</code>.</li>\n<li>We copy the rendered <code>surrealdb_export.sql</code> to the <code>static/</code> directory so it gets served by the site.</li>\n<li>We clean up the temporary build directory afterward.</li>\n</ul>\n<h4 id=\"make-the-script-executable\"><strong>Make the Script Executable</strong></h4>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">chmod +x generate_surrealdb_sql.sh\n</code></pre>\n<h3 id=\"2-5-update-your-build-process\"><strong>2.5. Update Your Build Process</strong></h3>\n<p>Ensure that the <code>generate_surrealdb_sql.sh</code> script runs before the site is built.</p>\n<h4 id=\"locally\"><strong>Locally</strong></h4>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">.&#x2F;generate_surrealdb_sql.sh &amp;&amp; zola build\n</code></pre>\n<h4 id=\"on-netlify\"><strong>On Netlify</strong></h4>\n<p>Update your <code>netlify.toml</code> or build command to include the script.</p>\n<pre data-lang=\"toml\" class=\"language-toml \"><code class=\"language-toml\" data-lang=\"toml\">[build]\n  publish = &quot;public&quot;\n  command = &quot;.&#x2F;generate_surrealdb_sql.sh &amp;&amp; zola build&quot;\n</code></pre>\n<hr />\n<h2 id=\"3-serving-the-sql-file-from-your-site\"><strong>3. Serving the SQL File from Your Site</strong></h2>\n<p>By placing <code>surrealdb_export.sql</code> in the <code>static/</code> directory, Zola will serve it at the root of your site.</p>\n<ul>\n<li><strong>URL:</strong> <code>https://yourdomain.com/surrealdb_export.sql</code></li>\n</ul>\n<p>Ensure that it's accessible and not blocked by any server rules.</p>\n<hr />\n<h2 id=\"4-loading-surrealdb-in-the-browser\"><strong>4. Loading SurrealDB in the Browser</strong></h2>\n<h3 id=\"4-1-using-the-surrealdb-webassembly-build\"><strong>4.1. Using the SurrealDB WebAssembly Build</strong></h3>\n<p>As of January 2025, SurrealDB provides a WebAssembly (WASM) build that can be run in the browser.</p>\n<h4 id=\"including-surrealdb-wasm-in-your-site\"><strong>Including SurrealDB WASM in Your Site</strong></h4>\n<ul>\n<li>\n<p><strong>Download the SurrealDB WASM and JavaScript bindings:</strong></p>\n<ul>\n<li><code>surrealdb.wasm</code></li>\n<li><code>surrealdb.js</code></li>\n</ul>\n</li>\n<li>\n<p><strong>Place them in your <code>static/js/</code> directory.</strong></p>\n</li>\n</ul>\n<h3 id=\"4-2-initializing-surrealdb-in-the-browser\"><strong>4.2. Initializing SurrealDB in the Browser</strong></h3>\n<pre data-lang=\"html\" class=\"language-html \"><code class=\"language-html\" data-lang=\"html\">&lt;script&gt;\n  &#x2F;&#x2F; Load the SurrealDB WASM module\n  Surreal.initWasm({\n    path: &#x27;&#x2F;js&#x2F;surrealdb.wasm&#x27;\n  }).then(async () =&gt; {\n    &#x2F;&#x2F; Create a new in-memory SurrealDB instance\n    const db = new Surreal();\n\n    &#x2F;&#x2F; Sign in as a root user (authentication can be bypassed in-memory)\n    await db.signin({ user: &#x27;root&#x27;, pass: &#x27;root&#x27; });\n\n    &#x2F;&#x2F; Select a namespace and database\n    await db.use({ ns: &#x27;myapp&#x27;, db: &#x27;mydb&#x27; });\n\n    &#x2F;&#x2F; Fetch the SQL file\n    const response = await fetch(&#x27;&#x2F;surrealdb_export.sql&#x27;);\n    const sql = await response.text();\n\n    &#x2F;&#x2F; Execute the SQL statements to populate the database\n    await db.query(sql);\n\n    &#x2F;&#x2F; Now the database is ready for use\n    &#x2F;&#x2F; You can perform queries, full-text search, etc.\n\n    &#x2F;&#x2F; Example query\n    const pages = await db.select(&#x27;page&#x27;);\n\n    console.log(&#x27;Pages:&#x27;, pages);\n\n    &#x2F;&#x2F; Implement search functionality and other features as needed\n\n  }).catch((e) =&gt; {\n    console.error(&#x27;Error initializing SurrealDB:&#x27;, e);\n  });\n&lt;&#x2F;script&gt;\n</code></pre>\n<p><strong>Explanation:</strong></p>\n<ul>\n<li><strong>Surreal.initWasm:</strong> Initializes the WASM module.</li>\n<li><strong>db.signin:</strong> Signs into the database (for in-memory, the credentials can be default).</li>\n<li><strong>db.use:</strong> Selects the namespace and database.</li>\n<li><strong>Fetching and Executing SQL:</strong>\n<ul>\n<li>We fetch the SQL file generated during the build.</li>\n<li>Execute it using <code>db.query(sql)</code>, which runs all the <code>CREATE</code> and <code>INSERT</code> statements to populate the database.</li>\n</ul>\n</li>\n</ul>\n<hr />\n<h2 id=\"5-implementing-full-text-search-and-taxonomy-support\"><strong>5. Implementing Full-Text Search and Taxonomy Support</strong></h2>\n<p>With the database populated, you can perform complex queries, including full-text search and navigating taxonomies.</p>\n<h3 id=\"5-1-full-text-search\"><strong>5.1. Full-Text Search</strong></h3>\n<p>SurrealDB supports full-text search indexes.</p>\n<h4 id=\"define-full-text-search-indexes-in-the-sql\"><strong>Define Full-Text Search Indexes in the SQL</strong></h4>\n<p>Modify your SQL template to include index definitions.</p>\n<pre data-lang=\"sql\" class=\"language-sql \"><code class=\"language-sql\" data-lang=\"sql\">-- Define full-text search index on page content\nDEFINE INDEX page_content_ft ON TABLE page FIELDS content SEARCH ANALYZER bm25 TOKENIZERS blank;\n\n-- Similarly, you can define indexes on other fields as needed\n</code></pre>\n<p><strong>Note:</strong> Ensure that your version of SurrealDB supports the specific index definitions. Syntax may vary; refer to the latest documentation.</p>\n<h4 id=\"performing-a-full-text-search-in-javascript\"><strong>Performing a Full-Text Search in JavaScript</strong></h4>\n<pre data-lang=\"javascript\" class=\"language-javascript \"><code class=\"language-javascript\" data-lang=\"javascript\">&#x2F;&#x2F; Function to perform a full-text search\nasync function searchPages(query) {\n  &#x2F;&#x2F; Use the &#x27;search&#x27; function in your SurrealQL query\n  const results = await db.query(`\n    SELECT * FROM page WHERE search(content, $terms)\n  `, { terms: query });\n\n  return results[0].result;\n}\n\n&#x2F;&#x2F; Example usage\nconst searchResults = await searchPages(&#x27;your search terms&#x27;);\nconsole.log(&#x27;Search Results:&#x27;, searchResults);\n</code></pre>\n<h3 id=\"5-2-taxonomy-structure-and-metadata\"><strong>5.2. Taxonomy Structure and Metadata</strong></h3>\n<p>You can query pages by taxonomy terms.</p>\n<h4 id=\"querying-pages-by-taxonomy-term\"><strong>Querying Pages by Taxonomy Term</strong></h4>\n<pre data-lang=\"javascript\" class=\"language-javascript \"><code class=\"language-javascript\" data-lang=\"javascript\">async function getPagesByTaxonomy(termName) {\n  const results = await db.query(`\n    SELECT * FROM page WHERE $term INSIDE taxonomies[*].terms\n  `, { term: termName });\n\n  return results[0].result;\n}\n\n&#x2F;&#x2F; Example usage\nconst pages = await getPagesByTaxonomy(&#x27;tech&#x27;);\nconsole.log(&#x27;Pages in Tech taxonomy:&#x27;, pages);\n</code></pre>\n<p><strong>Explanation:</strong></p>\n<ul>\n<li><strong>taxonomies[*].terms:</strong> Accesses all terms in the taxonomies array within each page.</li>\n<li><strong>$term INSIDE terms:</strong> Checks if the term is present.</li>\n</ul>\n<h3 id=\"5-3-accessing-metadata-defined-in-front-matter\"><strong>5.3. Accessing Metadata Defined in Front Matter</strong></h3>\n<p>The <code>metadata</code> field in each page or section contains the <code>extra</code> data from the front matter.</p>\n<pre data-lang=\"javascript\" class=\"language-javascript \"><code class=\"language-javascript\" data-lang=\"javascript\">async function getPageMetadata(slug) {\n  const page = await db.select(`page:${slug}`);\n  return page.metadata;\n}\n\n&#x2F;&#x2F; Example usage\nconst metadata = await getPageMetadata(&#x27;my-page-slug&#x27;);\nconsole.log(&#x27;Page Metadata:&#x27;, metadata);\n</code></pre>\n<hr />\n<h2 id=\"6-putting-it-all-together\"><strong>6. Putting It All Together</strong></h2>\n<h3 id=\"6-1-search-interface\"><strong>6.1. Search Interface</strong></h3>\n<p>Create a search input field in your HTML.</p>\n<pre data-lang=\"html\" class=\"language-html \"><code class=\"language-html\" data-lang=\"html\">&lt;input type=&quot;text&quot; id=&quot;search-input&quot; placeholder=&quot;Search...&quot;&gt;\n&lt;div id=&quot;search-results&quot;&gt;&lt;&#x2F;div&gt;\n</code></pre>\n<h3 id=\"6-2-javascript-to-handle-search\"><strong>6.2. JavaScript to Handle Search</strong></h3>\n<pre data-lang=\"javascript\" class=\"language-javascript \"><code class=\"language-javascript\" data-lang=\"javascript\">document.getElementById(&#x27;search-input&#x27;).addEventListener(&#x27;input&#x27;, async (event) =&gt; {\n  const query = event.target.value;\n  \n  if (query.length &gt; 2) { &#x2F;&#x2F; Start searching after 3 characters\n    const results = await searchPages(query);\n    displaySearchResults(results);\n  } else {\n    clearSearchResults();\n  }\n});\n\nfunction displaySearchResults(results) {\n  const resultsDiv = document.getElementById(&#x27;search-results&#x27;);\n  resultsDiv.innerHTML = &#x27;&#x27;;\n\n  results.forEach((page) =&gt; {\n    const pageLink = document.createElement(&#x27;a&#x27;);\n    pageLink.href = page.path;\n    pageLink.textContent = page.title;\n    resultsDiv.appendChild(pageLink);\n  });\n}\n\nfunction clearSearchResults() {\n  const resultsDiv = document.getElementById(&#x27;search-results&#x27;);\n  resultsDiv.innerHTML = &#x27;&#x27;;\n}\n</code></pre>\n<h3 id=\"6-3-handling-taxonomy-navigation\"><strong>6.3. Handling Taxonomy Navigation</strong></h3>\n<pre data-lang=\"javascript\" class=\"language-javascript \"><code class=\"language-javascript\" data-lang=\"javascript\">&#x2F;&#x2F; Example function to list all taxonomy terms\nasync function listTaxonomyTerms(kind) {\n  const results = await db.query(`\n    SELECT * FROM taxonomy_term WHERE kind = $kind\n  `, { kind });\n\n  return results[0].result;\n}\n\n&#x2F;&#x2F; Display taxonomy terms\nconst terms = await listTaxonomyTerms(&#x27;tags&#x27;);\nterms.forEach((term) =&gt; {\n  console.log(&#x27;Term:&#x27;, term.name);\n});\n</code></pre>\n<hr />\n<h2 id=\"considerations-and-best-practices\"><strong>Considerations and Best Practices</strong></h2>\n<h3 id=\"data-size-and-performance\"><strong>Data Size and Performance</strong></h3>\n<ul>\n<li><strong>Data Size:</strong> Loading all site content into the browser can be heavy, especially for large sites.</li>\n<li><strong>Optimization:</strong> Consider limiting the amount of data, or implement lazy loading strategies.</li>\n</ul>\n<h3 id=\"security\"><strong>Security</strong></h3>\n<ul>\n<li><strong>Client-Side Data Exposure:</strong> All data loaded into the client is accessible to users. Avoid including sensitive information.</li>\n<li><strong>Data Sanitization:</strong> Ensure all data is properly sanitized to prevent XSS attacks.</li>\n</ul>\n<h3 id=\"browser-compatibility\"><strong>Browser Compatibility</strong></h3>\n<ul>\n<li><strong>WASM Support:</strong> Ensure your target audience uses browsers that support WebAssembly.</li>\n<li><strong>Fallbacks:</strong> Provide fallback mechanisms or messages for unsupported browsers.</li>\n</ul>\n<h3 id=\"licensing-and-permissions\"><strong>Licensing and Permissions</strong></h3>\n<ul>\n<li><strong>SurrealDB License:</strong> Ensure compliance with SurrealDB's licensing terms when using it client-side.</li>\n<li><strong>Attribution:</strong> Provide necessary attributions if required.</li>\n</ul>\n<hr />\n<h2 id=\"alternative-solutions\"><strong>Alternative Solutions</strong></h2>\n<p>If using SurrealDB client-side becomes challenging due to data size, performance, or compatibility, consider these alternatives:</p>\n<h3 id=\"lunr-js\"><strong>Lunr.js</strong></h3>\n<ul>\n<li>A lightweight full-text search library for the browser.</li>\n<li>Indexes can be generated during the build process.</li>\n<li>Suitable for static sites.</li>\n</ul>\n<h3 id=\"elasticlunr-js\"><strong>Elasticlunr.js</strong></h3>\n<ul>\n<li>Similar to Lunr.js but with additional features.</li>\n</ul>\n<h3 id=\"algolia\"><strong>Algolia</strong></h3>\n<ul>\n<li>Provides hosted search solutions.</li>\n<li>Can be integrated with static sites via APIs.</li>\n</ul>\n<h3 id=\"stork-search\"><strong>Stork Search</strong></h3>\n<ul>\n<li>A WASM-based full-text search engine designed for static sites.</li>\n<li>Can generate search indexes at build time.</li>\n</ul>\n<hr />\n<h2 id=\"conclusion\"><strong>Conclusion</strong></h2>\n<p>By generating a SurrealDB SQL file during your site's build process and leveraging the in-browser capabilities of SurrealDB with WebAssembly, you can create a powerful and dynamic search experience on your static site.</p>\n<ul>\n<li><strong>Benefits:</strong>\n<ul>\n<li>Leverages the power of SurrealDB's querying and indexing.</li>\n<li>Provides a rich, interactive experience without server-side dependencies.</li>\n</ul>\n</li>\n</ul>\n<hr />\n<h2 id=\"next-steps\"><strong>Next Steps</strong></h2>\n<ul>\n<li><strong>Implement the steps above in your project.</strong></li>\n<li><strong>Test thoroughly across different browsers and devices.</strong></li>\n<li><strong>Monitor performance and optimize as needed.</strong></li>\n</ul>\n<hr />\n<p><strong>If you need further assistance with any of these steps or have additional questions, feel free to ask! I'm here to help you create the best possible experience for your site.</strong></p>\n",
      summary: null,
      date: "2025-01-10T00:00:00Z",
      metadata: {},
      tags: [],
      categories: [],
      series: [],
      projects: []
  };



  
  
  
  

  
    
  
  
  
  

  CREATE post CONTENT {
      title: "featured post",
      slug: "featured-post",
      path: "https://parkerjones.dev/posts/featured-post/",
      content: "<p>what in tarnation</p>\n<span id=\"continue-reading\"></span>",
      summary: "<p>what in tarnation</p>\n",
      date: "2025-01-08T00:00:00Z",
      metadata: {},
      tags: ["deleteme","todo","featured"],
      categories: [],
      series: [],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  
    
  

  CREATE post CONTENT {
      title: "create a new post script",
      slug: "create-a-new-post-script",
      path: "https://parkerjones.dev/posts/create-a-new-post-script/",
      content: "<p>ok so, #zola, #ai</p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">#!&#x2F;bin&#x2F;bash\n\n# Determine the directory of the script\nSCRIPT_DIR=&quot;$(cd &quot;$(dirname &quot;${BASH_SOURCE[0]}&quot;)&quot; &amp;&amp; pwd)&quot;\n\n# Determine the project root directory (assuming scripts folder is in the project root)\nPROJECT_ROOT=&quot;$(dirname &quot;$SCRIPT_DIR&quot;)&quot;\n\n# Set the content directory path\ncontent_dir=&quot;$PROJECT_ROOT&#x2F;content&quot;\n\n# Ensure the content directory exists\nif [ ! -d &quot;$content_dir&quot; ]; then\n  echo &quot;Content directory not found at $content_dir. Please ensure you&#x27;re running the script within your Zola project structure.&quot;\n  exit 1\nfi\n\n# Get the title from arguments or set a default\nif [ &quot;$#&quot; -gt 0 ]; then\n  title=&quot;$*&quot;\nelse\n  datetime=$(date +&quot;%Y-%m-%d %H:%M:%S&quot;)\n  title=&quot;new draft $datetime&quot;\nfi\n\n# Generate the date for the frontmatter\ndate_today=$(date +&quot;%Y-%m-%d&quot;)\n\n# Clean the title to create a filename-friendly string\ncleaned_title=$(echo &quot;$title&quot; | tr &#x27;[:upper:]&#x27; &#x27;[:lower:]&#x27; | tr &#x27; &#x27; &#x27;-&#x27; | tr -cd &#x27;[:alnum:]-&#x27;)\n\n# Set the filename and path\nfilename=&quot;${cleaned_title}.md&quot;\nfilepath=&quot;${content_dir}&#x2F;${filename}&quot;\n\n# Create the frontmatter in the markdown file\ncat &lt;&lt;EOF &gt; &quot;$filepath&quot;\n+++\ntitle = &quot;$title&quot;\ndate = $date_today\n\n[taxonomies]\ncategories = []\ntags = []\n+++\nEOF\n\necho &quot;New markdown file created at $filepath&quot;\n\n# Open the new markdown file in Neovim\nnvim &quot;$filepath&quot;\n</code></pre>\n<p>#ai</p>\n<hr />\n<p><strong>Explanation of the Updates:</strong></p>\n<ol>\n<li>\n<p><strong>Added Neovim Command:</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\"># Open the new markdown file in Neovim\nnvim &quot;$filepath&quot;\n</code></pre>\n<ul>\n<li>After creating the new markdown file and confirming its creation, this line opens the file in #neovim.</li>\n<li>It uses the <code>nvim</code> command followed by the path to the new file.</li>\n</ul>\n</li>\n<li>\n<p><strong>Ensuring Neovim is Installed:</strong></p>\n<ul>\n<li>The script assumes that Neovim (<code>nvim</code>) is installed and available in your system's PATH.</li>\n<li>If Neovim isn't installed, you'll need to install it, or you can modify the script to use a different editor (e.g., <code>vim</code>, <code>nano</code>, <code>code</code>, etc.).</li>\n</ul>\n</li>\n</ol>\n<hr />\n<p><strong>Usage Instructions:</strong></p>\n<ol>\n<li>\n<p><strong>Place the Script:</strong></p>\n<ul>\n<li>\n<p>Save the script as <code>new_post.sh</code> inside the <code>scripts</code> folder of your project:</p>\n<pre><code>your-project&#x2F;\n├── content&#x2F;\n├── scripts&#x2F;\n│   └── new_post.sh\n└── ...\n</code></pre>\n</li>\n</ul>\n</li>\n<li>\n<p><strong>Make the Script Executable:</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">chmod +x scripts&#x2F;new_post.sh\n</code></pre>\n</li>\n<li>\n<p><strong>Run the Script:</strong></p>\n<ul>\n<li>\n<p><strong>From the Project Root:</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">.&#x2F;scripts&#x2F;new_post.sh &quot;My Custom Blog Post Title&quot;\n</code></pre>\n</li>\n<li>\n<p><strong>From the Scripts Directory:</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">cd scripts\n.&#x2F;new_post.sh &quot;My Custom Blog Post Title&quot;\n</code></pre>\n</li>\n<li>\n<p><strong>Without a Title (uses default):</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">.&#x2F;scripts&#x2F;new_post.sh\n</code></pre>\n</li>\n</ul>\n</li>\n<li>\n<p><strong>Editing the New Post:</strong></p>\n<ul>\n<li>\n<p>After running the script, it will:</p>\n<ul>\n<li>Create a new markdown file in your <code>content</code> directory.</li>\n<li>Output a confirmation message showing the path to the new file.</li>\n<li>Immediately open the new file in Neovim for editing.</li>\n</ul>\n</li>\n<li>\n<p>For example:</p>\n<pre><code>New markdown file created at &#x2F;path&#x2F;to&#x2F;your&#x2F;project&#x2F;content&#x2F;my-custom-blog-post-title.md\n</code></pre>\n<p>Neovim will open with the new markdown file loaded, ready for you to start writing your blog post.</p>\n</li>\n</ul>\n</li>\n</ol>\n<hr />\n<p><strong>Additional Notes:</strong></p>\n<ul>\n<li>\n<p><strong>Customizing the Editor:</strong></p>\n<ul>\n<li>\n<p>If you prefer to use a different editor, replace <code>nvim</code> in the script with your editor's command.</p>\n<ul>\n<li>\n<p><strong>For Vim:</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">vim &quot;$filepath&quot;\n</code></pre>\n</li>\n<li>\n<p><strong>For Nano:</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">nano &quot;$filepath&quot;\n</code></pre>\n</li>\n<li>\n<p><strong>For Visual Studio Code:</strong></p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">code &quot;$filepath&quot;\n</code></pre>\n<ul>\n<li>\n<p>You might need to add the <code>--wait</code> flag if you want the script to pause until the editor is closed:</p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">code --wait &quot;$filepath&quot;\n</code></pre>\n</li>\n</ul>\n</li>\n</ul>\n</li>\n</ul>\n</li>\n<li>\n<p><strong>Handling Editor Availability:</strong></p>\n<ul>\n<li>Ensure that the editor you specify is installed and accessible from the command line.</li>\n<li>If the editor is not found, the script will output an error like <code>command not found: nvim</code>.</li>\n</ul>\n</li>\n<li>\n<p><strong>Running the Script from Anywhere:</strong></p>\n<ul>\n<li>\n<p>Because the script calculates paths relative to its own location, you can run it from any directory, and it will still correctly locate your project's <code>content</code> directory. #web</p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">&#x2F;full&#x2F;path&#x2F;to&#x2F;your&#x2F;project&#x2F;scripts&#x2F;new_post.sh &quot;Another Blog Post&quot;\n</code></pre>\n</li>\n</ul>\n</li>\n<li>\n<p><strong>Error Handling:</strong></p>\n<ul>\n<li>The script includes checks to ensure the <code>content</code> directory exists.</li>\n<li>If the <code>content</code> directory is missing or mislocated, you'll receive an error message.</li>\n</ul>\n</li>\n<li>\n<p><strong>Shebang Line:</strong></p>\n<ul>\n<li>The shebang line (<code>#!/bin/bash</code>) assumes that <code>bash</code> is located at <code>/bin/bash</code>.</li>\n<li>If your system uses a different path for <code>bash</code>, adjust the shebang line accordingly (e.g., <code>#!/usr/bin/env bash</code>).</li>\n</ul>\n</li>\n<li>\n<p><strong>Permissions:</strong></p>\n<ul>\n<li>Make sure you have the necessary permissions to create files in the <code>content</code> directory and to execute the script.</li>\n</ul>\n</li>\n</ul>\n<hr />\n<p><strong>Summary:</strong></p>\n<p>With this updated script, you can streamline your blogging workflow:</p>\n<ul>\n<li><strong>Create:</strong> Run the script to generate a new markdown file with the appropriate frontmatter.</li>\n<li><strong>Edit:</strong> The script automatically opens the new file in Neovim, allowing you to start writing immediately.</li>\n</ul>\n<p>Feel free to customize the script to fit your workflow or editor preferences. If you have any questions or need further assistance, don't hesitate to ask!</p>\n<p>#bfs</p>\n",
      summary: null,
      date: "2025-01-06T00:00:00Z",
      metadata: {},
      tags: ["zola","bash","ai","o1-preview-2024-09-12"],
      categories: ["software"],
      series: ["zola"],
      projects: ["scripts","zola"]
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Databses in the Browser",
      slug: "databses-in-the-browser",
      path: "https://parkerjones.dev/posts/databses-in-the-browser/",
      content: "",
      summary: null,
      date: "2025-01-06T00:00:00Z",
      metadata: {},
      tags: ["surrealdb","sqllite"],
      categories: ["software"],
      series: ["databases"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "wtf is columnar data",
      slug: "wtf-is-columnar-data",
      path: "https://parkerjones.dev/posts/wtf-is-columnar-data/",
      content: "<h3 id=\"what-is-columnar-storage\">What is Columnar Storage?</h3>\n<p>Columnar storage is a way of organizing data in a database where values of a single column are stored together, rather than storing all the data for a single row together. This storage method is optimized for <strong>read-heavy, analytical workloads</strong> where queries often operate on a subset of columns over many rows.</p>\n<h4 id=\"how-it-works\">How It Works:</h4>\n<p>In <strong>row-oriented databases</strong>, like MySQL or PostgreSQL, data is stored row by row:</p>\n<pre><code>Row 1: [ID1, Name1, Age1, Salary1]\nRow 2: [ID2, Name2, Age2, Salary2]\n</code></pre>\n<p>In <strong>column-oriented databases</strong>, data is stored column by column:</p>\n<pre><code>ID:   [ID1, ID2, ID3, ...]\nName: [Name1, Name2, Name3, ...]\nAge:  [Age1, Age2, Age3, ...]\nSalary: [Salary1, Salary2, Salary3, ...]\n</code></pre>\n<hr />\n<h3 id=\"key-differences-between-columnar-and-row-oriented-storage\">Key Differences Between Columnar and Row-Oriented Storage</h3>\n<table><thead><tr><th><strong>Aspect</strong></th><th><strong>Row-Oriented Storage</strong></th><th><strong>Columnar Storage</strong></th></tr></thead><tbody>\n<tr><td><strong>Data Layout</strong></td><td>Stores data row by row.</td><td>Stores data column by column.</td></tr>\n<tr><td><strong>Best Use Case</strong></td><td>Transactional workloads (OLTP) like updating user accounts, processing orders, etc.</td><td>Analytical workloads (OLAP) like aggregations, filtering, and reporting.</td></tr>\n<tr><td><strong>Query Performance</strong></td><td>Good for queries involving entire rows.</td><td>Fast for queries involving a few columns over many rows.</td></tr>\n<tr><td><strong>Data Compression</strong></td><td>Less effective; different data types in rows make compression harder.</td><td>Highly compressible; similar data types in columns lead to better compression.</td></tr>\n<tr><td><strong>Write Performance</strong></td><td>Faster for frequent row-level updates.</td><td>Slower for updates, as entire columns may need rewriting.</td></tr>\n<tr><td><strong>Examples</strong></td><td>MySQL, PostgreSQL, SQLite.</td><td>DuckDB, Apache Parquet, ClickHouse.</td></tr>\n</tbody></table>\n<hr />\n<h3 id=\"why-columnar-storage-matters\"><strong>Why Columnar Storage Matters</strong></h3>\n<h4 id=\"1-efficient-analytical-queries\">1. <strong>Efficient Analytical Queries</strong></h4>\n<p>Columnar storage is optimized for queries that:</p>\n<ul>\n<li>Retrieve specific columns (e.g., <code>SELECT Age FROM table</code>).</li>\n<li>Perform aggregates over large datasets (e.g., <code>SUM(Salary)</code> or <code>AVG(Age)</code>).</li>\n</ul>\n<p>By only reading the columns needed, columnar storage avoids wasting time scanning irrelevant data.</p>\n<h4 id=\"2-better-compression\">2. <strong>Better Compression</strong></h4>\n<p>Because all the data in a column is of the same type (e.g., integers, floats, strings), columnar storage compresses data better. For example:</p>\n<ul>\n<li>The \"Age\" column (integers) can be compressed using run-length encoding.</li>\n<li>The \"Name\" column (strings) can be compressed using dictionary encoding.</li>\n</ul>\n<p>This reduces the size of data on disk and speeds up queries.</p>\n<h4 id=\"3-vectorized-processing\">3. <strong>Vectorized Processing</strong></h4>\n<p>Columnar databases often use vectorized execution, which processes chunks of columns in batches. This is faster than row-by-row processing.</p>\n<hr />\n<h3 id=\"when-to-use-columnar-vs-row-oriented-databases\"><strong>When to Use Columnar vs. Row-Oriented Databases</strong></h3>\n<h4 id=\"use-columnar-storage-for\">Use Columnar Storage for:</h4>\n<ul>\n<li><strong>Analytics</strong>: Dashboards, reporting systems, or BI tools.</li>\n<li><strong>Big Data</strong>: Handling datasets with billions of rows where you often run aggregations.</li>\n<li><strong>Data Warehousing</strong>: Storing historical data for periodic analysis.</li>\n</ul>\n<h4 id=\"use-row-oriented-storage-for\">Use Row-Oriented Storage for:</h4>\n<ul>\n<li><strong>Transaction Processing</strong>: Web apps, e-commerce systems, or CRM tools where data is frequently updated.</li>\n<li><strong>APIs</strong>: Use cases that involve frequent read/write operations with complete records.</li>\n</ul>\n<hr />\n<h3 id=\"example-comparison\"><strong>Example Comparison</strong></h3>\n<h4 id=\"scenario-salary-data-for-employees\">Scenario: Salary Data for Employees</h4>\n<p>If you want to find the <strong>average salary</strong> of employees:</p>\n<ul>\n<li><strong>Row-Oriented</strong>:\n<ul>\n<li>Reads all rows (including <code>ID</code>, <code>Name</code>, and <code>Age</code>) even though only the <code>Salary</code> column is needed.</li>\n</ul>\n</li>\n<li><strong>Columnar</strong>:\n<ul>\n<li>Reads just the <code>Salary</code> column, skipping <code>ID</code>, <code>Name</code>, and <code>Age</code>.</li>\n</ul>\n</li>\n</ul>\n<p>This makes columnar storage faster and more resource-efficient for this type of query.</p>\n<hr />\n<h3 id=\"columnar-storage-in-action\">Columnar Storage in Action</h3>\n<p>Modern tools and formats like <strong>DuckDB</strong>, <strong>Apache Parquet</strong>, and <strong>Amazon Redshift</strong> leverage columnar storage to handle analytics efficiently.</p>\n<p>Would you like help setting up a columnar database or converting your data into columnar formats like Parquet?</p>\n",
      summary: null,
      date: "2025-01-06T00:00:00Z",
      metadata: {},
      tags: ["engineering","databses","mysql","postgres","duckdb","apache","aws","redshift"],
      categories: ["software"],
      series: ["databases"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "wtf is duckDB",
      slug: "wtf-is-duckdb",
      path: "https://parkerjones.dev/posts/wtf-is-duckdb/",
      content: "<h3 id=\"what-duckdb-is\">What DuckDB is:</h3>\n<p>Think of DuckDB as a tool that helps you <strong>analyze data quickly</strong> without setting up a big database server. It's great when you have a pile of data in files (like CSVs or Parquet) and you want to ask questions about it using SQL, but you don't want to deal with a complicated database system.</p>\n<h3 id=\"why-you-might-use-it\">Why you might use it:</h3>\n<p>As a web developer or programmer, you might encounter these situations where DuckDB shines:</p>\n<ol>\n<li>\n<p><strong>Analyzing Data Locally</strong>:</p>\n<ul>\n<li>You have a large log file, some user data, or analytics data stored in CSVs or JSON, and you want to summarize, filter, or manipulate it quickly using SQL.</li>\n<li>Example: \"How many unique users logged in each day last month?\"</li>\n</ul>\n</li>\n<li>\n<p><strong>Building Tools or Features with Data Analysis</strong>:</p>\n<ul>\n<li>You’re building a dashboard, a reporting system, or a feature that needs fast data summarization (e.g., showing trends, counts, averages).</li>\n<li>Example: A quick data report generator for a web app.</li>\n</ul>\n</li>\n<li>\n<p><strong>Lightweight Analytics</strong>:</p>\n<ul>\n<li>You need to do some heavy analysis on a dataset but don't want to set up a big database server like Postgres or MySQL.</li>\n<li>Example: Quickly querying through 1GB of CSV data for patterns without loading it into a traditional database.</li>\n</ul>\n</li>\n<li>\n<p><strong>Simplifying Data Workflows</strong>:</p>\n<ul>\n<li>You use Python, JavaScript, or another language and don’t want to leave your environment. DuckDB runs inside your app, just like a library.</li>\n<li>Example: You’re working with Python, have a pandas DataFrame, and want to use SQL to manipulate it.</li>\n</ul>\n</li>\n</ol>\n<h3 id=\"what-you-don-t-need-duckdb-for\">What you don’t need DuckDB for:</h3>\n<ul>\n<li>Day-to-day CRUD operations in your app (you have databases like Postgres or MySQL for that).</li>\n<li>Serving data to users in real-time through APIs.</li>\n</ul>\n<p>DuckDB is <strong>like SQLite for analyzing data</strong>: lightweight, easy to use, and good for one-off or embedded analytics tasks. Would you like to see a real-world example with data you’re familiar with?</p>\n<p>Here’s a real-world example:</p>\n<h3 id=\"scenario-analyzing-web-server-logs\">Scenario: Analyzing Web Server Logs</h3>\n<p>Imagine you’ve got a <strong>web server log</strong> file in CSV format, like this:</p>\n<pre data-lang=\"csv\" class=\"language-csv \"><code class=\"language-csv\" data-lang=\"csv\">timestamp, user_id, endpoint, response_time\n2024-12-01T10:15:30Z, 12345, &#x2F;home, 200\n2024-12-01T10:16:00Z, 12346, &#x2F;about, 350\n2024-12-01T10:17:15Z, 12345, &#x2F;contact, 180\n2024-12-01T10:18:20Z, 12347, &#x2F;home, 220\n2024-12-01T10:19:00Z, 12345, &#x2F;about, 300\n</code></pre>\n<p>You want to answer questions like:</p>\n<ul>\n<li>Which endpoint is the slowest on average?</li>\n<li>How many users accessed each endpoint?</li>\n</ul>\n<h3 id=\"using-duckdb-to-answer-these-questions\">Using DuckDB to Answer These Questions</h3>\n<h4 id=\"step-1-install-duckdb\">Step 1: Install DuckDB</h4>\n<p>You can install it in your environment. For Python:</p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">pip install duckdb\n</code></pre>\n<h4 id=\"step-2-write-a-quick-script\">Step 2: Write a Quick Script</h4>\n<p>Here’s how you could load the CSV and analyze it:</p>\n<pre data-lang=\"python\" class=\"language-python \"><code class=\"language-python\" data-lang=\"python\">import duckdb\n\n# Load the log file into DuckDB\ncon = duckdb.connect()  # Start DuckDB\ncon.execute(&quot;CREATE TABLE logs AS SELECT * FROM read_csv_auto(&#x27;server_logs.csv&#x27;)&quot;)\n\n# Query 1: Find the slowest endpoint on average\nslowest_endpoint = con.execute(&quot;&quot;&quot;\n    SELECT endpoint, AVG(response_time) AS avg_response_time\n    FROM logs\n    GROUP BY endpoint\n    ORDER BY avg_response_time DESC\n    LIMIT 1\n&quot;&quot;&quot;).fetchall()\nprint(&quot;Slowest Endpoint:&quot;, slowest_endpoint)\n\n# Query 2: Count accesses per endpoint\nendpoint_counts = con.execute(&quot;&quot;&quot;\n    SELECT endpoint, COUNT(*) AS num_accesses\n    FROM logs\n    GROUP BY endpoint\n    ORDER BY num_accesses DESC\n&quot;&quot;&quot;).fetchall()\nprint(&quot;Endpoint Access Counts:&quot;)\nfor row in endpoint_counts:\n    print(row)\n\n# Clean up\ncon.close()\n</code></pre>\n<h4 id=\"step-3-example-output\">Step 3: Example Output</h4>\n<p>Let’s say the script gives you:</p>\n<pre><code>Slowest Endpoint: [(&#x27;&#x2F;about&#x27;, 325.0)]\nEndpoint Access Counts:\n(&#x27;&#x2F;home&#x27;, 2)\n(&#x27;&#x2F;about&#x27;, 2)\n(&#x27;&#x2F;contact&#x27;, 1)\n</code></pre>\n<h3 id=\"what-happened\">What Happened?</h3>\n<ol>\n<li><strong>Load the Data</strong>: DuckDB read the CSV and treated it like a mini-database table.</li>\n<li><strong>Query with SQL</strong>: You wrote SQL queries to find averages, counts, and ordered results.</li>\n<li><strong>Got Answers Fast</strong>: Even with a large dataset, DuckDB would handle this in seconds.</li>\n</ol>\n<h3 id=\"why-duckdb-over-alternatives\">Why DuckDB Over Alternatives?</h3>\n<ul>\n<li>No need to load the data into a heavy database like Postgres or MySQL.</li>\n<li>Works directly on CSVs and other file formats.</li>\n<li>Simple setup and blazing-fast performance for analytical tasks.</li>\n</ul>\n<p>Would you like help setting this up with sample data, or does this example align with your needs?</p>\n<h3 id=\"why-duckdb-works-well-in-lambda\">Why DuckDB Works Well in Lambda</h3>\n<ol>\n<li>\n<p><strong>In-Process Database</strong>:</p>\n<ul>\n<li>DuckDB runs entirely in memory (unless you use its persistent storage features).</li>\n<li>No external database connection or server is required, which aligns well with Lambda’s stateless nature.</li>\n</ul>\n</li>\n<li>\n<p><strong>Lightweight</strong>:</p>\n<ul>\n<li>DuckDB is compact and can be included in your Lambda deployment package without much overhead.</li>\n</ul>\n</li>\n<li>\n<p><strong>Temporary Data Processing</strong>:</p>\n<ul>\n<li>Perfect for processing on-the-fly data, like analyzing files (e.g., CSVs or Parquet) stored in <strong>S3</strong>.</li>\n</ul>\n</li>\n</ol>\n<hr />\n<h3 id=\"example-analyzing-a-csv-in-s3-with-lambda-and-duckdb\">Example: Analyzing a CSV in S3 with Lambda and DuckDB</h3>\n<h4 id=\"problem\">Problem:</h4>\n<p>You have a CSV file in an S3 bucket containing website analytics data. You want a Lambda function to:</p>\n<ol>\n<li>Load the file.</li>\n<li>Count the number of visits per page.</li>\n</ol>\n<h4 id=\"step-1-lambda-setup\">Step 1: Lambda Setup</h4>\n<p>Install DuckDB and package it with your function code.</p>\n<ol>\n<li>Create a virtual environment:<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">python -m venv venv\nsource venv&#x2F;bin&#x2F;activate\npip install duckdb boto3\n</code></pre>\n</li>\n<li>Package your environment and code:<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">zip -r lambda_function.zip lambda_function.py venv&#x2F;lib&#x2F;python3.*&#x2F;site-packages&#x2F;\n</code></pre>\n</li>\n</ol>\n<h4 id=\"step-2-lambda-function-code\">Step 2: Lambda Function Code</h4>\n<p>Here’s the code for your Lambda:</p>\n<pre data-lang=\"python\" class=\"language-python \"><code class=\"language-python\" data-lang=\"python\">import duckdb\nimport boto3\nimport os\n\ns3 = boto3.client(&#x27;s3&#x27;)\n\ndef lambda_handler(event, context):\n    # Input from event: S3 bucket and key\n    bucket_name = event[&#x27;bucket&#x27;]\n    file_key = event[&#x27;key&#x27;]\n    \n    # Download the file from S3\n    local_file = &#x27;&#x2F;tmp&#x2F;data.csv&#x27;\n    s3.download_file(bucket_name, file_key, local_file)\n\n    # Initialize DuckDB and process the CSV\n    con = duckdb.connect()\n    con.execute(&quot;CREATE TABLE data AS SELECT * FROM read_csv_auto(?)&quot;, [local_file])\n\n    # Query: Count visits per page\n    results = con.execute(&quot;&quot;&quot;\n        SELECT page, COUNT(*) AS visits\n        FROM data\n        GROUP BY page\n        ORDER BY visits DESC\n    &quot;&quot;&quot;).fetchall()\n    \n    # Clean up\n    os.remove(local_file)\n    con.close()\n\n    return {&quot;page_visits&quot;: results}\n</code></pre>\n<h4 id=\"step-3-deploy-to-aws\">Step 3: Deploy to AWS</h4>\n<p>Upload the zipped function to Lambda and configure the required IAM role to allow <strong>S3 access</strong>.</p>\n<hr />\n<h3 id=\"example-event-trigger\">Example Event Trigger:</h3>\n<p>To invoke the Lambda function, you could use an event like this:</p>\n<pre data-lang=\"json\" class=\"language-json \"><code class=\"language-json\" data-lang=\"json\">{\n    &quot;bucket&quot;: &quot;your-s3-bucket-name&quot;,\n    &quot;key&quot;: &quot;path&#x2F;to&#x2F;your-data.csv&quot;\n}\n</code></pre>\n<h3 id=\"example-output\">Example Output:</h3>\n<p>If your CSV has a <code>page</code> column, the output might look like:</p>\n<pre data-lang=\"json\" class=\"language-json \"><code class=\"language-json\" data-lang=\"json\">{\n    &quot;page_visits&quot;: [\n        [&quot;&#x2F;home&quot;, 1234],\n        [&quot;&#x2F;about&quot;, 567],\n        [&quot;&#x2F;contact&quot;, 234]\n    ]\n}\n</code></pre>\n<hr />\n<h3 id=\"key-considerations\">Key Considerations:</h3>\n<ol>\n<li>\n<p><strong>Temporary Storage</strong>:</p>\n<ul>\n<li>Lambda has a 512MB <code>/tmp</code> directory where you can store temporary files like the CSV.</li>\n</ul>\n</li>\n<li>\n<p><strong>Memory and Timeout</strong>:</p>\n<ul>\n<li>Ensure your Lambda has enough memory and timeout to process large datasets.</li>\n<li>DuckDB is very efficient, but Lambda functions have a max timeout of 15 minutes.</li>\n</ul>\n</li>\n<li>\n<p><strong>Cold Start</strong>:</p>\n<ul>\n<li>Including DuckDB in your Lambda package increases its size slightly, which may affect cold start times. Use AWS Lambda Layers to manage dependencies separately if needed.</li>\n</ul>\n</li>\n<li>\n<p><strong>File Formats</strong>:</p>\n<ul>\n<li>DuckDB supports <strong>Parquet</strong>, which is better for large-scale data analysis than CSV. Consider converting your data to Parquet for better performance.</li>\n</ul>\n</li>\n</ol>\n<hr />\n<p>Here's a step-by-step guide to set up a workflow for <strong>using DuckDB in AWS Lambda</strong> to process data files from S3.</p>\n<hr />\n<h3 id=\"step-1-install-dependencies-locally\">Step 1: Install Dependencies Locally</h3>\n<p>First, install DuckDB and <code>boto3</code> (for AWS S3 interaction) in a Python virtual environment.</p>\n<ol>\n<li>\n<p>Create and activate a virtual environment:</p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">python3 -m venv venv\nsource venv&#x2F;bin&#x2F;activate\n</code></pre>\n</li>\n<li>\n<p>Install the required libraries:</p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">pip install duckdb boto3\n</code></pre>\n</li>\n<li>\n<p>Package the dependencies:</p>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">mkdir python\npip install --target=python duckdb boto3\nzip -r lambda_dependencies.zip python\n</code></pre>\n</li>\n</ol>\n<p>This creates a <strong>Lambda Layer</strong> zip file containing the dependencies. A Lambda Layer is a way to include shared libraries without bundling them with each function.</p>\n<hr />\n<h3 id=\"step-2-write-the-lambda-function-code\">Step 2: Write the Lambda Function Code</h3>\n<p>Here’s the complete function code for analyzing a CSV stored in S3:</p>\n<pre data-lang=\"python\" class=\"language-python \"><code class=\"language-python\" data-lang=\"python\">import duckdb\nimport boto3\nimport os\n\ns3 = boto3.client(&#x27;s3&#x27;)\n\ndef lambda_handler(event, context):\n    # Input from the event: S3 bucket and key\n    bucket_name = event[&#x27;bucket&#x27;]\n    file_key = event[&#x27;key&#x27;]\n    \n    # Download the file to Lambda&#x27;s &#x2F;tmp directory\n    local_file = &#x27;&#x2F;tmp&#x2F;data.csv&#x27;\n    s3.download_file(bucket_name, file_key, local_file)\n\n    try:\n        # Initialize DuckDB and process the CSV\n        con = duckdb.connect()\n        con.execute(&quot;CREATE TABLE data AS SELECT * FROM read_csv_auto(?)&quot;, [local_file])\n\n        # Query: Count visits per page\n        results = con.execute(&quot;&quot;&quot;\n            SELECT page, COUNT(*) AS visits\n            FROM data\n            GROUP BY page\n            ORDER BY visits DESC\n        &quot;&quot;&quot;).fetchall()\n\n        # Prepare the results for return\n        output = [{&quot;page&quot;: row[0], &quot;visits&quot;: row[1]} for row in results]\n    finally:\n        # Clean up the temporary file\n        if os.path.exists(local_file):\n            os.remove(local_file)\n\n    return {&quot;page_visits&quot;: output}\n</code></pre>\n<p>Save this as <code>lambda_function.py</code>.</p>\n<hr />\n<h3 id=\"step-3-package-the-lambda-function\">Step 3: Package the Lambda Function</h3>\n<ol>\n<li>Zip your <code>lambda_function.py</code>:<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">zip lambda_function.zip lambda_function.py\n</code></pre>\n</li>\n</ol>\n<hr />\n<h3 id=\"step-4-deploy-to-aws\">Step 4: Deploy to AWS</h3>\n<h4 id=\"upload-the-layer\">Upload the Layer</h4>\n<ol>\n<li>Go to the <strong>AWS Lambda Console</strong> → Layers → Create Layer.</li>\n<li>Upload the <code>lambda_dependencies.zip</code> file created earlier.</li>\n<li>Choose a runtime compatible with your function (e.g., Python 3.9).</li>\n</ol>\n<h4 id=\"create-the-lambda-function\">Create the Lambda Function</h4>\n<ol>\n<li>Go to the <strong>Lambda Console</strong> and create a new function:\n<ul>\n<li><strong>Runtime</strong>: Python 3.x</li>\n</ul>\n</li>\n<li>Upload the <code>lambda_function.zip</code> file.</li>\n<li>Add the <strong>DuckDB Layer</strong> to the function:\n<ul>\n<li>Go to the Lambda function’s <strong>Layers</strong> section and add the previously created layer.</li>\n</ul>\n</li>\n</ol>\n<hr />\n<h3 id=\"step-5-configure-iam-permissions\">Step 5: Configure IAM Permissions</h3>\n<p>Ensure the Lambda function has the necessary permissions to access S3. Attach an <strong>IAM role</strong> with the following policy:</p>\n<pre data-lang=\"json\" class=\"language-json \"><code class=\"language-json\" data-lang=\"json\">{\n    &quot;Version&quot;: &quot;2012-10-17&quot;,\n    &quot;Statement&quot;: [\n        {\n            &quot;Effect&quot;: &quot;Allow&quot;,\n            &quot;Action&quot;: &quot;s3:GetObject&quot;,\n            &quot;Resource&quot;: &quot;arn:aws:s3:::your-bucket-name&#x2F;*&quot;\n        }\n    ]\n}\n</code></pre>\n<p>Replace <code>your-bucket-name</code> with your actual S3 bucket name.</p>\n<hr />\n<h3 id=\"step-6-test-the-function\">Step 6: Test the Function</h3>\n<p>Trigger the function with the following test event:</p>\n<pre data-lang=\"json\" class=\"language-json \"><code class=\"language-json\" data-lang=\"json\">{\n    &quot;bucket&quot;: &quot;your-s3-bucket-name&quot;,\n    &quot;key&quot;: &quot;path&#x2F;to&#x2F;your-data.csv&quot;\n}\n</code></pre>\n<p>Example output:</p>\n<pre data-lang=\"json\" class=\"language-json \"><code class=\"language-json\" data-lang=\"json\">{\n    &quot;page_visits&quot;: [\n        {&quot;page&quot;: &quot;&#x2F;home&quot;, &quot;visits&quot;: 1234},\n        {&quot;page&quot;: &quot;&#x2F;about&quot;, &quot;visits&quot;: 567},\n        {&quot;page&quot;: &quot;&#x2F;contact&quot;, &quot;visits&quot;: 234}\n    ]\n}\n</code></pre>\n<hr />\n<h3 id=\"step-7-optimize-for-performance\">Step 7: Optimize for Performance</h3>\n<ol>\n<li>\n<p><strong>File Formats</strong>: If the data grows large, use <strong>Parquet</strong> files instead of CSV. DuckDB handles Parquet files natively and much faster.</p>\n<pre data-lang=\"python\" class=\"language-python \"><code class=\"language-python\" data-lang=\"python\">con.execute(&quot;CREATE TABLE data AS SELECT * FROM read_parquet(?)&quot;, [local_file])\n</code></pre>\n</li>\n<li>\n<p><strong>Memory Allocation</strong>: Increase the Lambda function's memory (e.g., 512MB or higher) for larger datasets.</p>\n</li>\n<li>\n<p><strong>Timeouts</strong>: Ensure the function’s timeout is long enough to process larger files (e.g., 30 seconds).</p>\n</li>\n</ol>\n<p>Creating Parquet files is straightforward and can be done using several tools and programming libraries. Here's how you can create Parquet files depending on your preferred workflow:</p>\n<hr />\n<h3 id=\"1-using-python-pandas\"><strong>1. Using Python (Pandas)</strong></h3>\n<p>If you already have a CSV or DataFrame, you can convert it to Parquet with the <code>pandas</code> library.</p>\n<h4 id=\"install-required-libraries\">Install Required Libraries:</h4>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">pip install pandas pyarrow\n</code></pre>\n<h4 id=\"example-code\">Example Code:</h4>\n<pre data-lang=\"python\" class=\"language-python \"><code class=\"language-python\" data-lang=\"python\">import pandas as pd\n\n# Create a sample DataFrame\ndata = {\n    &quot;timestamp&quot;: [&quot;2024-12-01T10:15:30Z&quot;, &quot;2024-12-01T10:16:00Z&quot;],\n    &quot;user_id&quot;: [12345, 12346],\n    &quot;endpoint&quot;: [&quot;&#x2F;home&quot;, &quot;&#x2F;about&quot;],\n    &quot;response_time&quot;: [200, 350],\n}\ndf = pd.DataFrame(data)\n\n# Save as Parquet file\ndf.to_parquet(&quot;data.parquet&quot;, engine=&quot;pyarrow&quot;, index=False)\n</code></pre>\n<p>This will create a <code>data.parquet</code> file.</p>\n<hr />\n<h3 id=\"2-using-python-duckdb\"><strong>2. Using Python (DuckDB)</strong></h3>\n<p>DuckDB itself can create Parquet files directly.</p>\n<h4 id=\"install-duckdb\">Install DuckDB:</h4>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">pip install duckdb\n</code></pre>\n<h4 id=\"example-code-1\">Example Code:</h4>\n<pre data-lang=\"python\" class=\"language-python \"><code class=\"language-python\" data-lang=\"python\">import duckdb\n\n# Sample CSV file\ncsv_file = &quot;data.csv&quot;\n\n# Convert CSV to Parquet using DuckDB\ncon = duckdb.connect()\ncon.execute(&quot;COPY (SELECT * FROM read_csv_auto(?)) TO &#x27;data.parquet&#x27; (FORMAT PARQUET)&quot;, [csv_file])\ncon.close()\n</code></pre>\n<p>This reads the CSV file and saves it as a Parquet file.</p>\n<hr />\n<h3 id=\"3-using-apache-spark\"><strong>3. Using Apache Spark</strong></h3>\n<p>If you're working with large datasets, <strong>Apache Spark</strong> is a powerful tool for handling Parquet files.</p>\n<h4 id=\"example-in-pyspark\">Example in PySpark:</h4>\n<ol>\n<li>Install PySpark:<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">pip install pyspark\n</code></pre>\n</li>\n<li>Example Code:<pre data-lang=\"python\" class=\"language-python \"><code class=\"language-python\" data-lang=\"python\">from pyspark.sql import SparkSession\n\n# Initialize Spark\nspark = SparkSession.builder.appName(&quot;CSV to Parquet&quot;).getOrCreate()\n\n# Read CSV file\ndf = spark.read.csv(&quot;data.csv&quot;, header=True, inferSchema=True)\n\n# Write to Parquet\ndf.write.parquet(&quot;data.parquet&quot;)\n</code></pre>\n</li>\n</ol>\n<hr />\n<h3 id=\"4-using-command-line-tools\"><strong>4. Using Command-Line Tools</strong></h3>\n<p>If you don’t want to write code, you can use tools like <strong>Apache Arrow’s parquet-cli</strong> to create Parquet files from CSVs.</p>\n<h4 id=\"install-parquet-tools\">Install parquet-tools:</h4>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">brew install parquet-tools  # Mac\n# or use another package manager\n</code></pre>\n<h4 id=\"convert-csv-to-parquet\">Convert CSV to Parquet:</h4>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">csv2parquet data.csv data.parquet\n</code></pre>\n<hr />\n<h3 id=\"5-using-aws-services\"><strong>5. Using AWS Services</strong></h3>\n<p>If your data is in AWS, you can convert it using AWS Glue or Athena:</p>\n<ul>\n<li><strong>AWS Glue</strong>: Create an ETL job to transform CSVs to Parquet.</li>\n<li><strong>AWS Athena</strong>: Run a <code>CREATE TABLE AS SELECT</code> query on your S3-stored CSV to produce a Parquet file.</li>\n</ul>\n<hr />\n<h3 id=\"why-use-parquet\">Why Use Parquet?</h3>\n<ul>\n<li><strong>Compact</strong>: Parquet is a columnar storage format, reducing file size.</li>\n<li><strong>Efficient</strong>: Optimized for analytical queries, making it much faster than CSV for DuckDB or other analytical tools.</li>\n<li><strong>Schema Support</strong>: Preserves data types and structures.</li>\n</ul>\n",
      summary: null,
      date: "2025-01-06T00:00:00Z",
      metadata: {},
      tags: ["engineering","databses","mysql","postgres","duckdb","apache","aws","redshift"],
      categories: ["software"],
      series: ["databases"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
  
    
  

  CREATE post CONTENT {
      title: "Lina's Firmware Updates",
      slug: "lina-3d-firmware-debugging",
      path: "https://parkerjones.dev/posts/lina-3d-firmware-debugging/",
      content: "<p>Hello! Let's learn how to update your 3D printer's firmware and how to ask for help if you need it.</p>\n<h1 id=\"error-analysis\">Error Analysis</h1>\n<p>The \"cold extrusion prevented\" error occurs when a 3D printer's firmware blocks extrusion because it detects that the nozzle temperature is below a predefined threshold. This safety feature is designed to prevent damage to the extruder and ensure proper filament flow. To troubleshoot this issue, consider the following steps:</p>\n<ol>\n<li>\n<p><strong>Verify Nozzle Temperature Settings:</strong></p>\n<ul>\n<li><strong>Check Printing Temperature:</strong> Ensure that your slicer settings specify an appropriate temperature for the filament you're using. For instance, PLA typically prints around 190-220°C, while ABS requires 220-250°C. If the set temperature is below the filament's recommended range, the firmware may block extrusion.</li>\n<li><strong>Monitor Temperature Stability:</strong> Observe the nozzle temperature during printing to confirm it reaches and maintains the target temperature. Fluctuations or failure to reach the desired temperature can trigger the error.</li>\n</ul>\n</li>\n<li>\n<p><strong>Check for Firmware Errors:</strong></p>\n<ul>\n<li><strong>Error Messages:</strong> Some users have reported receiving \"cold extrusion prevented\" errors due to firmware glitches or communication issues. If the printer's temperature readings are stable and correctly configured, yet the error persists, consider updating or reinstalling the printer's firmware.</li>\n</ul>\n</li>\n</ol>\n<p>By systematically checking these aspects, you can identify and resolve the cause of the \"cold extrusion prevented\" error, ensuring smoother and uninterrupted 3D printing operations.</p>\n<h2 id=\"what-is-firmware\">What is Firmware?</h2>\n<p>Firmware is like the brain of your 3D printer. It tells the printer how to move and create objects. Sometimes, updating the firmware can make your printer work better.</p>\n<h2 id=\"getting-help\">Getting Help</h2>\n<p>If you have questions or something isn't working, here's how to get help:</p>\n<ol>\n<li>\n<p><strong>Email Tronxy Support</strong>:</p>\n<ul>\n<li>Write an email to <a href=\"mailto:support@tronxy.com\">TronXY Support</a>.</li>\n<li>Include:\n<ul>\n<li>Your printer's model name.</li>\n<li>A description of the problem.</li>\n<li>Pictures or videos showing the problem, if possible.</li>\n</ul>\n</li>\n</ul>\n</li>\n<li>\n<p><strong>Visit the Support Center</strong>:</p>\n<ul>\n<li>Go to the <a href=\"https://www.tronxy3d.com/pages/support-center-1\">Tronxy Support Center</a> for guides and answers.</li>\n</ul>\n</li>\n</ol>\n<p>Remember, it's okay to ask for help. Updating firmware can be a great way to learn more about your 3D printer!</p>\n<h1 id=\"updating-the-firmware\">Updating the Firmware</h1>\n<p>Updating the firmware of your Tronxy Crux 1 3D printer can enhance its performance and resolve potential issues. Here's how to proceed:</p>\n<p><strong>1. Identify Your Printer's Mainboard Chip:</strong></p>\n<p>The Crux 1 may have either an STM32F446ZET6 or GD32F4xx mainboard chip. To determine yours:</p>\n<ul>\n<li>\n<p><strong>Via the Printer Menu:</strong> Navigate to <code>System</code> &gt; <code>Info</code> on your printer. If \"GD32\" appears, your printer uses the GD32F4xx chip. If not, it likely uses the STM32F446ZET6 chip.</p>\n</li>\n<li>\n<p><strong>Physical Inspection:</strong> Alternatively, open the printer's casing and inspect the mainboard directly. The chip will be labeled with its model number.</p>\n</li>\n</ul>\n<p><strong>2. Download the Appropriate Firmware:</strong></p>\n<p>Once you've identified your chip, download the corresponding firmware:</p>\n<ul>\n<li>\n<p><strong>Official Firmware:</strong> Tronxy provides firmware for both chip types on their website.</p>\n<ul>\n<li>\n<p>For STM32F446ZET6:</p>\n</li>\n<li>\n<p>For GD32F4xx:</p>\n</li>\n</ul>\n<p><a href=\"https://tronxyglobal.com/pages/firmware-of-crux-series\">Official Firmware</a></p>\n</li>\n<li>\n<p><strong>Open-Source Firmware:</strong> If you prefer open-source options, Tronxy offers firmware on their GitHub repository.</p>\n<ul>\n<li>\n<p>For STM32F446ZET6:</p>\n</li>\n<li>\n<p>For GD32F4xx:</p>\n</li>\n</ul>\n<p><a href=\"https://github.com/tronxy3d/F4xx-SIM240x320\">TronXY Github Repo for Crux 1</a></p>\n</li>\n</ul>\n<p><strong>3. Prepare for the Update:</strong></p>\n<ul>\n<li>\n<p><strong>Format an SD Card:</strong> Use a blank SD card formatted to FAT32.</p>\n</li>\n<li>\n<p><strong>Create an 'update' Folder:</strong> On the SD card, create a folder named <code>update</code>.</p>\n</li>\n<li>\n<p><strong>Copy Firmware Files:</strong> Place the downloaded firmware files into the <code>update</code> folder.</p>\n</li>\n</ul>\n<p><strong>4. Update the Firmware:</strong></p>\n<ul>\n<li>\n<p><strong>Power Off the Printer:</strong> Ensure the printer is completely turned off.</p>\n</li>\n<li>\n<p><strong>Insert the SD Card:</strong> Place the prepared SD card into the printer's SD card slot.</p>\n</li>\n<li>\n<p><strong>Power On the Printer:</strong> Turn the printer back on. It should automatically detect the new firmware and initiate the update process.</p>\n</li>\n<li>\n<p><strong>Wait for Completion:</strong> Allow the printer to complete the update. Once finished, it will restart, and the new firmware will be active.</p>\n</li>\n</ul>\n<p><strong>5. Verify the Update:</strong></p>\n<ul>\n<li><strong>Check Firmware Version:</strong> After the update, navigate to <code>System</code> &gt; <code>Info</code> to confirm that the firmware version matches the one you installed.</li>\n</ul>\n<p><strong>Important Considerations:</strong></p>\n<ul>\n<li>\n<p><strong>Backup Settings:</strong> Before updating, note your current printer settings, as the update may reset them to defaults.</p>\n</li>\n<li>\n<p><strong>Compatibility:</strong> Ensure the firmware version matches your printer model and mainboard chip to prevent potential issues.</p>\n</li>\n</ul>\n<h2 id=\"seek-assistance-if-needed\">Seek Assistance if Needed</h2>\n<p>If you encounter difficulties during the update process, consult Tronxy's support resources or contact their technical support for guidance.</p>\n<p>Here are several online platforms where you can ask questions and engage with communities about 3D printing:</p>\n<p><strong>Email:</strong></p>\n<ul>\n<li><strong>Technical Support:</strong> <a href=\"mailto:support@tronxy.com\">support@tronxy.com</a></li>\n</ul>\n<p><strong>Phone:</strong></p>\n<ul>\n<li><strong>Customer Support:</strong> <a href=\"tel:+8675589968500\">Call Customer Support</a></li>\n</ul>\n<p><strong>WhatsApp:</strong></p>\n<ul>\n<li><strong>Customer Support:</strong> <a href=\"https://wa.me/8618123972792\">Chat on WhatsApp</a></li>\n</ul>\n<p><strong>Online Contact Form:</strong></p>\n<ul>\n<li>Visit Tronxy's <a href=\"https://www.tronxy.com/contact/\">Contact Us</a> page to fill out an online form with your inquiry.</li>\n</ul>\n<p><strong>Support Center:</strong></p>\n<ul>\n<li>For technical support, you can also visit Tronxy's <a href=\"https://www.tronxy.com/technical-support/\">Technical Support</a> page, where you can fill out a form to receive assistance.</li>\n</ul>\n<p>When reaching out, it's helpful to provide your order number, serial number, purchase channel, and a detailed description of the issue, including any relevant photos or videos. This information will assist the support team in addressing your concerns more efficiently.</p>\n<p>Tronxy's business hours are Monday to Friday, 9:00 AM to 6:30 PM (UTC+08:00, Beijing Time).</p>\n<p>For more information, you can visit Tronxy's official website: <a href=\"https://www.tronxy.com\">https://www.tronxy.com</a>.</p>\n<p><strong>Reddit Communities:</strong></p>\n<ul>\n<li>\n<p><a href=\"https://www.reddit.com/r/3DPrinting/\">r/3DPrinting</a>: A subreddit dedicated to all aspects of 3D printing, including discussions, questions, and sharing projects.</p>\n</li>\n<li>\n<p><a href=\"https://www.reddit.com/r/FixMyPrint/\">r/FixMyPrint</a>: Focused on troubleshooting 3D printing issues, this community helps users resolve printing problems.</p>\n</li>\n<li>\n<p><a href=\"https://www.reddit.com/r/functionalprint/\">r/functionalprint</a>: Dedicated to 3D prints that have practical applications in daily life.</p>\n</li>\n</ul>\n<p><strong>Online Forums:</strong></p>\n<ul>\n<li>\n<p><a href=\"https://3dprintboard.com/\">3D Print Board</a>: A forum for discussions on 3D printers, design, scanning, and related technologies.</p>\n</li>\n<li>\n<p><a href=\"https://www.3dprintingforum.us/\">3D Printing Forum</a>: A platform covering various topics, including hardware, software, and specific printer models.</p>\n</li>\n<li>\n<p><a href=\"https://forum.3dprintbeginner.com/\">3DPrintBeginner Forum</a>: A place for discussing 3D printing-related topics, including hardware, firmware, and software.</p>\n</li>\n<li>\n<p><a href=\"https://www.3dprintingforum.org/\">3D Printing Forum - 3DPrintingForum.org</a>: A community for 3D printing enthusiasts to discuss various aspects of 3D printing.</p>\n</li>\n<li>\n<p><a href=\"https://soliforum.com/\">SoliForum - 3D Printing Community</a>: A forum for discussions on 3D printers, materials, and related topics.</p>\n</li>\n</ul>\n<p><strong>Specialized Communities:</strong></p>\n<ul>\n<li>\n<p><a href=\"https://www.thingiverse.com/\">Thingiverse</a>: A design-sharing website where users can share and discuss 3D printable models.</p>\n</li>\n<li>\n<p><a href=\"https://www.myminifactory.com/\">MyMiniFactory</a>: A platform for sharing 3D printable objects, with an active community for discussions and support.</p>\n</li>\n<li>\n<p><a href=\"https://cults3d.com/\">Cults</a>: A 3D printing marketplace and social network where users can share and discuss 3D models.</p>\n</li>\n<li>\n<p><a href=\"https://www.hackster.io/3d-printing\">Hackster.io 3D Printing Community</a>: A community dedicated to learning hardware, including 3D printing, where you can share projects and ask questions.</p>\n</li>\n<li>\n<p><a href=\"https://forum.makerforums.info/c/3d-printing/5\">Maker Forums - 3D Printing</a>: A forum for discussing 3D printing hardware, software, firmware, and designs.</p>\n</li>\n<li>\n<p><a href=\"https://forum.creality.com/\">Creality Community Forum</a>: A hub for discussions related to Creality 3D printers, scanners, and accessories.</p>\n</li>\n</ul>\n<p>These platforms offer a wealth of information and support for both beginners and experienced 3D printing enthusiasts.</p>\n",
      summary: null,
      date: "2024-12-24T00:00:00Z",
      metadata: {},
      tags: ["guide","tronxy","crux1","lina"],
      categories: ["lab"],
      series: [],
      projects: ["printing"]
  };



  
  
  
  

  
    
  
  
    
  
  
  
    
  

  CREATE post CONTENT {
      title: "Lina's christmas 3d printing survival guide",
      slug: "lina-3d-printer-survival-guide",
      path: "https://parkerjones.dev/posts/lina-3d-printer-survival-guide/",
      content: "<p>Welcome to Lina’s Christmas 3D Printing Survival Guide! This special guide is designed to make your Christmas morning magical by introducing you to the fascinating world of 3D printing. With the <a href=\"https://cdn.shopify.com/s/files/1/0506/3996/2290/files/CRUX_1_Installation_Manual-EN_V2.0.pdf?v=1680076369\">Tronxy Crux1</a> and some creativity, you’ll be crafting custom ornaments, toys, and keepsakes in no time.</p>\n<p>This guide is dedicated to Lina, our star maker-in-the-making! Lina, if you’re reading this, we’re so proud of you and can’t wait to see all the amazing creations you’ll bring to life. Your imagination is your superpower, and this guide is here to help you unleash it.</p>\n<p>Let’s get started!</p>\n<h2 id=\"step-1-setting-up-the-tronxy-crux1\"><strong>Step 1: Setting Up the Tronxy Crux1</strong></h2>\n<ol>\n<li>\n<p><strong>Unboxing and Assembly</strong>:</p>\n<ul>\n<li>Carefully unpack the printer. Most Tronxy Crux1 printers come pre-assembled. If any assembly is required, refer to the manual.</li>\n<li>Make sure the printer is placed on a stable and level surface.</li>\n</ul>\n</li>\n<li>\n<p><strong>Level the Print Bed</strong>:</p>\n<ul>\n<li>Power on the printer.</li>\n<li>Use the manual leveling knobs under the bed to adjust each corner.</li>\n<li>Slide a piece of paper between the nozzle and the bed. Adjust until there’s slight friction as the nozzle moves.</li>\n</ul>\n</li>\n<li>\n<p><strong>Load Filament</strong>:</p>\n<ul>\n<li>Preheat the nozzle to the filament’s recommended temperature (usually 190–210°C for PLA).</li>\n<li>Insert the PLA filament into the extruder until it extrudes smoothly from the nozzle.</li>\n</ul>\n</li>\n<li>\n<p><strong>Test Print</strong>:</p>\n<ul>\n<li>Load the included SD card and select a preloaded test model. This ensures the printer is working correctly.</li>\n</ul>\n</li>\n</ol>\n<h2 id=\"step-2-tinkercad-designing-a-christmas-themed-ornament\"><strong>Step 2: TinkerCAD - Designing a Christmas-Themed Ornament</strong></h2>\n<ol>\n<li>\n<p><strong>Create a Free TinkerCAD Account</strong>:</p>\n<ul>\n<li>Go to <a href=\"https://www.tinkercad.com\">TinkerCAD</a> and create a free account.</li>\n</ul>\n</li>\n<li>\n<p><strong>Start a New Project</strong>:</p>\n<ul>\n<li>Open the TinkerCAD editor and select “Create New Design.”</li>\n</ul>\n</li>\n<li>\n<p><strong>Design a Christmas Ornament</strong>:</p>\n<ul>\n<li><strong>Step 1: Add a Base Shape</strong>:\n<ul>\n<li>Drag a sphere from the shapes menu to the workspace. This will be the base of your ornament.</li>\n<li>Resize it to about 50mm in diameter.</li>\n</ul>\n</li>\n<li><strong>Step 2: Add a Hole for Hanging</strong>:\n<ul>\n<li>Drag a cylinder into the workspace and mark it as a \"hole\" in the shape menu.</li>\n<li>Resize it to about 3mm in diameter and place it near the top of the sphere.</li>\n<li>Group the shapes to subtract the hole.</li>\n</ul>\n</li>\n<li><strong>Step 3: Add Festive Decorations</strong>:\n<ul>\n<li>Drag stars, hearts, or other shapes onto the sphere and attach them as decorations.</li>\n<li>Optionally, add text (e.g., “Merry Christmas” or the child’s name) using the text tool.</li>\n</ul>\n</li>\n</ul>\n</li>\n<li>\n<p><strong>Export the Design</strong>:</p>\n<ul>\n<li>Once the design is complete, click <strong>Export</strong> &gt; <strong>STL</strong>.</li>\n</ul>\n</li>\n</ol>\n<h2 id=\"step-3-printing-the-ornament\"><strong>Step 3: Printing the Ornament</strong></h2>\n<ol>\n<li>\n<p><strong>Prepare the File in a Slicer</strong>:</p>\n<ul>\n<li>Install slicing software (e.g., Ultimaker Cura).</li>\n<li>Import the STL file into Cura and select the Tronxy Crux1 as the printer model.</li>\n<li>Adjust settings:\n<ul>\n<li>Material: PLA</li>\n<li>Layer height: 0.2mm</li>\n<li>Infill: 20%</li>\n<li>Support: None (unless your design requires it)</li>\n</ul>\n</li>\n<li>Save the file as a G-code file.</li>\n</ul>\n</li>\n<li>\n<p><strong>Print the Ornament</strong>:</p>\n<ul>\n<li>Transfer the G-code to the printer via an SD card.</li>\n<li>Start the print and watch the magic happen!</li>\n</ul>\n</li>\n</ol>\n<h2 id=\"step-4-decorating-the-ornament\"><strong>Step 4: Decorating the Ornament</strong></h2>\n<ol>\n<li>\n<p><strong>Paint or Add Glitter</strong>:</p>\n<ul>\n<li>Use acrylic paints or markers to add color.</li>\n<li>Apply a small amount of glue and sprinkle glitter for extra sparkle.</li>\n</ul>\n</li>\n<li>\n<p><strong>Add a String</strong>:</p>\n<ul>\n<li>Thread a piece of string or ribbon through the hole and tie it into a loop for hanging.</li>\n</ul>\n</li>\n</ol>\n<h2 id=\"step-5-create-a-keepsake-memory\"><strong>Step 5: Create a Keepsake Memory</strong></h2>\n<ul>\n<li>Write the date and the child’s name on the ornament for a lifelong keepsake.</li>\n<li>Capture photos of the process to commemorate the fun Christmas morning activity.</li>\n</ul>\n<h3 id=\"safety-tips\"><strong>Safety Tips</strong></h3>\n<ul>\n<li>Supervise the child when handling the printer.</li>\n<li>Keep the nozzle area away from small hands as it can get extremely hot.</li>\n<li>Ensure good ventilation while printing.</li>\n</ul>\n<h1 id=\"3d-printing-vocabulary-with-links-and-references\">3D Printing Vocabulary with Links and References</h1>\n<p><strong>3D Printer</strong><br />\nA machine that creates three-dimensional objects by building them layer by layer from a digital design.<br />\n<a href=\"https://en.wikipedia.org/wiki/3D_printing\">Learn more about 3D printing</a></p>\n<p><strong>Filament</strong><br />\nThe \"ink\" for the 3D printer, usually made of plastic like PLA or ABS.<br />\n<a href=\"https://all3dp.com/1/3d-printer-filament-types-3d-printing-3d-filament/\">What is filament in 3D printing?</a></p>\n<p><strong>PLA</strong><br />\nA common type of filament made from plants. It's safe and easy to use for beginners.<br />\n<a href=\"https://all3dp.com/1/pla-filament-3d-printing-what-is-it/\">PLA filament explained</a></p>\n<p><strong>Extruder</strong><br />\nThe part of the printer that heats up the filament and pushes it out to create layers.<br />\n<a href=\"https://all3dp.com/2/3d-printer-extruder-basics/\">How does a 3D printer extruder work?</a></p>\n<p><strong>Nozzle</strong><br />\nThe small opening at the end of the extruder where the melted filament comes out.<br />\n<a href=\"https://all3dp.com/2/3d-printer-nozzle-size-comparison-best-guide/\">Choosing a nozzle for 3D printing</a></p>\n<p><strong>Build Plate (Bed)</strong><br />\nThe flat surface where the printer creates the object.<br />\n<a href=\"https://all3dp.com/2/3d-printer-bed-adhesion/\">How to maintain a 3D printer bed</a></p>\n<p><strong>Leveling</strong><br />\nAdjusting the printer bed to make sure it is flat and even with the nozzle.<br />\n<a href=\"https://all3dp.com/2/3d-printer-bed-leveling-best-tips/\">A guide to bed leveling</a></p>\n<p><strong>Layer Height</strong><br />\nThe thickness of each layer of filament that the printer lays down.<br />\n<a href=\"https://all3dp.com/2/layer-height-3d-printing-guide/\">Understanding layer height</a></p>\n<p><strong>Infill</strong><br />\nThe pattern and density of material inside the object. Makes the object strong without using too much filament.<br />\n<a href=\"https://all3dp.com/2/3d-printing-infill-guide/\">What is infill in 3D printing?</a></p>\n<p><strong>Slicer</strong><br />\nSoftware that turns a 3D design into instructions (G-code) the printer can understand.<br />\n<a href=\"https://all3dp.com/1/best-3d-slicer-software-3d-printer/\">Best slicer software for 3D printing</a></p>\n<p><strong>G-code</strong><br />\nThe file type that tells the 3D printer how to move and what to do.<br />\n<a href=\"https://www.matterhackers.com/articles/g-code-explained\">Introduction to G-code</a></p>\n<p><strong>STL File</strong><br />\nA type of file used to store 3D designs.<br />\n<a href=\"https://all3dp.com/what-is-stl-file-format-extension-3d-printing/\">What is an STL file?</a></p>\n<p><strong>Support Material</strong><br />\nExtra material printed to support parts of the design that hang in the air. Removed after printing.<br />\n<a href=\"https://all3dp.com/2/3d-printing-supports-all-you-need-to-know/\">How to use support materials</a></p>\n<p><strong>Raft/Brim</strong><br />\nA base layer of material that helps the object stick to the print bed.<br />\n<a href=\"https://all3dp.com/2/raft-vs-brim-vs-skirt-3d-printing/\">Difference between raft and brim</a></p>\n<p><strong>Overhang</strong><br />\nParts of a 3D print that stick out and may need support to print properly.<br />\n<a href=\"https://all3dp.com/2/3d-printing-overhang-all-you-need-to-know/\">Tips for handling overhangs</a></p>\n<p><strong>Cooling Fan</strong><br />\nA fan that cools the filament as it comes out of the nozzle to make it set faster.<br />\n<a href=\"https://all3dp.com/2/3d-printer-cooling-fan-guide/\">Cooling fans in 3D printing</a></p>\n<h3 id=\"tinkercad-vocabulary-with-links-and-references\">TinkerCAD Vocabulary with Links and References</h3>\n<p><strong>3D Design</strong><br />\nA digital model of something you want to print in 3D.<br />\n<a href=\"https://www.tinkercad.com/learn\">Getting started with TinkerCAD</a></p>\n<p><strong>Shape</strong><br />\nBasic building blocks like cubes, spheres, and cylinders used to create designs.<br />\n<a href=\"https://blog.tinkercad.com/shape-generator\">Creating with shapes in TinkerCAD</a></p>\n<p><strong>Workspace</strong><br />\nThe area where you build your 3D designs.<br />\n<a href=\"https://www.instructables.com/Tinkercad-Tutorial-Basics/\">TinkerCAD workspace basics</a></p>\n<p><strong>Resize</strong><br />\nChanging the size of a shape by dragging its edges or corners.<br />\n<a href=\"https://blog.tinkercad.com/tinkertip-precision-resize\">How to resize shapes in TinkerCAD</a></p>\n<p><strong>Align</strong><br />\nA tool to make shapes line up neatly.<br />\n<a href=\"https://blog.tinkercad.com/tinkertip-align-tool\">Aligning shapes in TinkerCAD</a></p>\n<p><strong>Group</strong><br />\nA tool that combines two or more shapes into one.<br />\n<a href=\"https://blog.tinkercad.com/tinkertip-grouping-shapes\">Using the group tool in TinkerCAD</a></p>\n<p><strong>Hole</strong><br />\nA shape used to create empty spaces or cut-outs in your design.<br />\n<a href=\"https://blog.tinkercad.com/tinkertip-holes\">Creating holes in TinkerCAD</a></p>\n<p><strong>Text Tool</strong><br />\nA feature that lets you add words or letters to your design.<br />\n<a href=\"https://blog.tinkercad.com/tinkertip-adding-text\">Adding text in TinkerCAD</a></p>\n<p><strong>Export</strong><br />\nSaving your 3D design as a file (like an STL) so it can be printed.<br />\n<a href=\"https://www.tinkercad.com/learn/exporting-your-design\">How to export files in TinkerCAD</a></p>\n<p>You can copy this directly into your notes for quick access! The links provide additional detailed resources for deeper learning.</p>\n<h3 id=\"free-3d-design-repositories\"><strong>Free 3D Design Repositories</strong></h3>\n<ol>\n<li>\n<p><strong><a href=\"https://www.thingiverse.com/\">Thingiverse</a></strong></p>\n<ul>\n<li>One of the largest repositories of free 3D models.</li>\n<li>Great for beginners and features a variety of categories.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.printables.com/\">Printables by Prusa</a></strong></p>\n<ul>\n<li>Community-driven platform by Prusa.</li>\n<li>Offers high-quality and curated designs.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.myminifactory.com/\">MyMiniFactory</a></strong></p>\n<ul>\n<li>Focuses on verified and tested models.</li>\n<li>Ideal for high-quality prints.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://cults3d.com/\">Cults</a></strong></p>\n<ul>\n<li>Offers free and premium designs.</li>\n<li>Features unique and artistic models.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.youmagine.com/\">YouMagine</a></strong></p>\n<ul>\n<li>Free designs shared by a collaborative community.</li>\n<li>Focuses on simplicity and easy downloads.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.stlfinder.com/\">STLFinder</a></strong></p>\n<ul>\n<li>A search engine for STL files across multiple repositories.</li>\n<li>Helps discover designs from various sources in one place.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://pinshape.com/\">Pinshape</a></strong></p>\n<ul>\n<li>Free and paid designs.</li>\n<li>User-friendly interface with ratings for designs.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://3dexport.com/free-3d-models/\">3DExport</a></strong></p>\n<ul>\n<li>A mix of free and paid models, including STL files for printing.</li>\n<li>Good for detailed and professional models.</li>\n</ul>\n</li>\n</ol>\n<h3 id=\"premium-3d-design-platforms\"><strong>Premium 3D Design Platforms</strong></h3>\n<ol>\n<li>\n<p><strong><a href=\"https://www.turbosquid.com/\">TurboSquid</a></strong></p>\n<ul>\n<li>High-quality models for professionals.</li>\n<li>Offers some free options alongside premium designs.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.cgtrader.com/\">CGTrader</a></strong></p>\n<ul>\n<li>A marketplace for 3D designs, including print-ready files.</li>\n<li>Both free and paid options available.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.gambody.com/\">Gambody</a></strong></p>\n<ul>\n<li>Specializes in high-detail collectible models, especially for gaming.</li>\n<li>Premium designs with intricate details.</li>\n</ul>\n</li>\n</ol>\n<h3 id=\"niche-3d-model-repositories\"><strong>Niche 3D Model Repositories</strong></h3>\n<ol>\n<li>\n<p><strong><a href=\"https://nasa3d.arc.nasa.gov/\">NASA 3D Resources</a></strong></p>\n<ul>\n<li>Free 3D models related to space and astronomy.</li>\n<li>Perfect for educational or thematic projects.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://3d.si.edu/\">Smithsonian 3D Digitization</a></strong></p>\n<ul>\n<li>Free models of artifacts from museums.</li>\n<li>Focuses on cultural and historical objects.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://3dwarehouse.sketchup.com/\">3D Warehouse</a></strong></p>\n<ul>\n<li>A repository for SketchUp designs.</li>\n<li>Includes many 3D-printable models.</li>\n</ul>\n</li>\n</ol>\n<h3 id=\"community-platforms-for-collaboration\"><strong>Community Platforms for Collaboration</strong></h3>\n<ol>\n<li>\n<p><strong><a href=\"https://grabcad.com/library\">GrabCAD</a></strong></p>\n<ul>\n<li>Features engineering-focused designs.</li>\n<li>Community-driven platform with CAD and STL files.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.tinkercad.com/gallery/\">TinkerCAD Community Gallery</a></strong></p>\n<ul>\n<li>Browse designs shared by other TinkerCAD users.</li>\n<li>Perfect for simple and beginner-friendly projects.</li>\n</ul>\n</li>\n</ol>\n<h3 id=\"educational-resources-for-learning-and-customization\"><strong>Educational Resources for Learning and Customization</strong></h3>\n<ol>\n<li>\n<p><strong><a href=\"https://www.openscad.org/\">OpenSCAD Libraries</a></strong></p>\n<ul>\n<li>Great for learning to create parametric designs.</li>\n<li>Models that can be customized with code.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://www.instructables.com/3D-Printing/\">Instructables 3D Printing Projects</a></strong></p>\n<ul>\n<li>A collection of step-by-step guides with downloadable files.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://sketchfab.com/\">Sketchfab</a></strong></p>\n<ul>\n<li>Focused on viewing and downloading 3D models.</li>\n<li>Some models are free for 3D printing.</li>\n</ul>\n</li>\n</ol>\n<h3 id=\"search-aggregators\"><strong>Search Aggregators</strong></h3>\n<ol>\n<li>\n<p><strong><a href=\"https://www.yeggi.com/\">Yeggi</a></strong></p>\n<ul>\n<li>Search engine for 3D printable models from multiple platforms.</li>\n<li>Filters for free and paid designs.</li>\n</ul>\n</li>\n<li>\n<p><strong><a href=\"https://thangs.com/\">Thangs</a></strong></p>\n<ul>\n<li>Search, share, and collaborate on 3D designs.</li>\n<li>Includes a geometric search feature for finding similar designs.</li>\n</ul>\n</li>\n</ol>\n<h1 id=\"more-project-ideas\">More Project Ideas</h1>\n<h3 id=\"christmas-themed-3d-printable-designs\">Christmas-Themed 3D Printable Designs</h3>\n<ol>\n<li>\n<p><strong>Cults3D - Christmas Ornaments</strong><br />\nA collection of free 3D models for Christmas ornaments, including snowflakes, baubles, and festive decorations.<br />\n<a href=\"https://cults3d.com/en/tags/christmas%2Bornaments?only_free=true\">Explore designs</a></p>\n</li>\n<li>\n<p><strong>Printables - Winter &amp; Christmas &amp; New Year's</strong><br />\nOffers various decorations like snowflakes, bells, and Santa Claus miniatures.<br />\n<a href=\"https://www.printables.com/model?category=70\">Browse models</a></p>\n</li>\n<li>\n<p><strong>Thingiverse - Christmas Collection</strong><br />\nA diverse range of Christmas-themed designs, from ornaments to festive gadgets.<br />\n<a href=\"https://www.thingiverse.com/search?q=christmas&amp;dwh=0e8b1b\">View collection</a></p>\n</li>\n<li>\n<p><strong>Yeggi - Free Christmas Ornaments</strong><br />\nAggregates free Christmas ornament designs from various sources.<br />\n<a href=\"https://www.yeggi.com/q/free%2Bchristmas%2Bornaments/\">Find models</a></p>\n</li>\n</ol>\n<h3 id=\"3d-printable-toys-for-kids\">3D Printable Toys for Kids</h3>\n<ol>\n<li>\n<p><strong>Cults3D - Toys</strong><br />\nA vast selection of free toy designs, including action figures, puzzles, and educational models.<br />\n<a href=\"https://cults3d.com/en/tags/toys?only_free=true\">Explore toys</a></p>\n</li>\n<li>\n<p><strong>Printables - Toys &amp; Games</strong><br />\nFeatures a variety of 3D printed games and toys for both indoor and outdoor play.<br />\n<a href=\"https://www.printables.com/model?category=30\">Browse toys</a></p>\n</li>\n<li>\n<p><strong>Yeggi - Free Kid Toy Models</strong><br />\nSearches multiple repositories for free 3D printable toy designs suitable for children.<br />\n<a href=\"https://www.yeggi.com/q/free%2Bkid%2Btoy/\">Find toys</a></p>\n</li>\n<li>\n<p><strong>All3DP - 3D Printed Toys</strong><br />\nAn article featuring a curated list of 3D printed toys that can engage and entertain kids.<br />\n<a href=\"https://all3dp.com/2/3d-printed-toys-distract-kids/\">Read more</a></p>\n</li>\n</ol>\n<h3 id=\"tips-for-printing\">Tips for Printing</h3>\n<ul>\n<li>\n<p><strong>Check Print Settings</strong>: Ensure your printer settings match the requirements of the chosen design, including layer height, infill, and support structures.</p>\n</li>\n<li>\n<p><strong>Material Selection</strong>: PLA is a common and safe filament choice for printing toys and ornaments.</p>\n</li>\n<li>\n<p><strong>Supervision</strong>: Always supervise children when handling 3D printed toys, especially if they contain small parts.</p>\n</li>\n</ul>\n<p>These resources should provide a great starting point for creating festive decorations and fun toys with your 3D printer. Happy printing!</p>\n",
      summary: null,
      date: "2024-12-24T00:00:00Z",
      metadata: {},
      tags: ["guide","tronxy","crux1","lina"],
      categories: ["lab"],
      series: [],
      projects: ["printing"]
  };



  
  
  
  

  
    
  
  
    
  
  
  

  CREATE post CONTENT {
      title: "Rust Memory Cheatsheet",
      slug: "rust-memory-cheatsheet",
      path: "https://parkerjones.dev/posts/rust-memory-cheatsheet/",
      content: "<p><strong>Rust Memory Container Cheat Sheet</strong> and the <strong>Rust Container Cheat Sheet</strong>. These visual aids provide concise and clear representations of Rust's memory containers, aiding in understanding their structures and relationships.</p>\n<h2 id=\"rust-memory-container-cheat-sheet\">Rust Memory Container Cheat Sheet</h2>\n<img src=\"/rust-container-cheat-sheet.png\" alt=\"Rust Memory Container Cheat Sheet\" width=\"800\" height=\"600\" >\n<p><em>Image Source: <a href=\"https://github.com/usagi/rust-memory-container-cs\">Rust Memory Container Cheat Sheet by Usagi Ito</a></em></p>\n<p>This cheat sheet, created by Usagi Ito, offers a comprehensive overview of Rust's memory containers, illustrating their layouts and how they manage memory. It's an excellent reference for both beginners and seasoned Rustaceans aiming to deepen their understanding of Rust's memory management.</p>\n<p>For more details and additional formats, visit the <a href=\"https://github.com/usagi/rust-memory-container-cs\">GitHub repository</a>.</p>\n<h2 id=\"rust-container-cheat-sheet\">Rust Container Cheat Sheet</h2>\n<img src=\"/rust-memory-container-cs.png\" alt=\"Rust Memory Container Cheat Sheet\" width=\"800\" height=\"600\" >\n<p><em>Image Source: <a href=\"https://handbook.dataland.engineering/assets/rust-container-cheat-sheet.pdf\">Rust Container Cheat Sheet by Raph Levien</a></em></p>\n<p>This cheat sheet, designed by Raph Levien, provides a detailed look at Rust's container types, their memory layouts, and associated methods. It's a handy tool for developers seeking to grasp the nuances of Rust's standard library containers.</p>\n<p>You can download the PDF version <a href=\"https://handbook.dataland.engineering/assets/rust-container-cheat-sheet.pdf\">here</a>.</p>\n<p><em>Note: All credit for these cheat sheets goes to their respective authors. Please refer to the original sources for the most up-to-date versions and additional information.</em></p>\n<h2 id=\"rust-memory-container-types\">Rust Memory Container Types</h2>\n<p>Rust's standard library offers a variety of memory container types, each designed for specific use cases. Here are some commonly used containers:</p>\n<ul>\n<li>\n<p><strong>Vectors (<code>Vec&lt;T&gt;</code>):</strong> A dynamic array that can grow or shrink in size. <a href=\"https://doc.rust-lang.org/std/vec/\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Strings (<code>String</code>):</strong> A growable, mutable, UTF-8 encoded string. <a href=\"https://doc.rust-lang.org/std/string/struct.String.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Hash Maps (<code>HashMap&lt;K, V&gt;</code>):</strong> A hash map implemented with quadratic probing and SIMD lookup. <a href=\"https://doc.rust-lang.org/std/collections/struct.HashMap.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>B-Trees (<code>BTreeMap&lt;K, V&gt;</code>):</strong> An ordered map based on a B-Tree. <a href=\"https://doc.rust-lang.org/std/collections/struct.BTreeMap.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Hash Sets (<code>HashSet&lt;T&gt;</code>):</strong> A hash set implemented as a <code>HashMap</code> where the value is <code>()</code>. <a href=\"https://doc.rust-lang.org/std/collections/struct.HashSet.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>B-Tree Sets (<code>BTreeSet&lt;T&gt;</code>):</strong> An ordered set based on a B-Tree. <a href=\"https://doc.rust-lang.org/std/collections/struct.BTreeSet.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Linked Lists (<code>LinkedList&lt;T&gt;</code>):</strong> A doubly-linked list. <a href=\"https://doc.rust-lang.org/std/collections/struct.LinkedList.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Vec Deques (<code>VecDeque&lt;T&gt;</code>):</strong> A double-ended queue implemented with a growable ring buffer. <a href=\"https://doc.rust-lang.org/std/collections/struct.VecDeque.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Binary Heaps (<code>BinaryHeap&lt;T&gt;</code>):</strong> A priority queue implemented with a binary heap. <a href=\"https://doc.rust-lang.org/std/collections/struct.BinaryHeap.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Reference Counting (<code>Rc&lt;T&gt;</code>):</strong> A single-threaded reference-counting pointer. <a href=\"https://doc.rust-lang.org/std/rc/struct.Rc.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Atomic Reference Counting (<code>Arc&lt;T&gt;</code>):</strong> A thread-safe reference-counting pointer. <a href=\"https://doc.rust-lang.org/std/sync/struct.Arc.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Boxes (<code>Box&lt;T&gt;</code>):</strong> A pointer type for heap allocation. <a href=\"https://doc.rust-lang.org/std/boxed/struct.Box.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Cells (<code>Cell&lt;T&gt;</code>):</strong> A mutable memory location with 'interior mutability'. <a href=\"https://doc.rust-lang.org/std/cell/struct.Cell.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>RefCells (<code>RefCell&lt;T&gt;</code>):</strong> A single-threaded mutable memory location with dynamic borrow checking. <a href=\"https://doc.rust-lang.org/std/cell/struct.RefCell.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Mutexes (<code>Mutex&lt;T&gt;</code>):</strong> A mutual exclusion primitive useful for protecting shared data. <a href=\"https://doc.rust-lang.org/std/sync/struct.Mutex.html\">Documentation</a>.</p>\n</li>\n<li>\n<p><strong>Read-Write Locks (<code>RwLock&lt;T&gt;</code>):</strong> A reader-writer lock allowing multiple readers or one writer. <a href=\"https://doc.rust-lang.org/std/sync/struct.RwLock.html\">Documentation</a>.</p>\n</li>\n</ul>\n<h2 id=\"identifying-performance-issues-from-unnecessary-clones\">Identifying Performance Issues from Unnecessary Clones</h2>\n<p>Unnecessary <code>clone</code> operations can lead to performance degradation due to redundant memory allocations. To detect and address these issues:</p>\n<ol>\n<li>\n<p><strong>Profiling Tools:</strong></p>\n<ul>\n<li>\n<p><strong><code>cargo flamegraph</code>:</strong> Generates flamegraphs to visualize CPU usage, helping identify performance bottlenecks, including excessive cloning. <a href=\"https://github.com/flamegraph-rs/flamegraph\">GitHub Repository</a>.</p>\n</li>\n<li>\n<p><strong><code>perf</code>:</strong> A powerful performance analyzing tool on Linux that can profile Rust applications to detect inefficient code paths. <a href=\"https://nnethercote.github.io/perf-book/profiling.html\">Rust Performance Book - Profiling</a>.</p>\n</li>\n</ul>\n</li>\n<li>\n<p><strong>Static Analysis:</strong></p>\n<ul>\n<li><strong><code>cargo clippy</code>:</strong> A linter that provides warnings about common mistakes, including unnecessary clones. Running <code>cargo clippy</code> can highlight instances where cloning is avoidable. <a href=\"https://rust-unofficial.github.io/patterns/anti_patterns/borrow_clone.html\">Rust Design Patterns - Clone to Satisfy the Borrow Checker</a>.</li>\n</ul>\n</li>\n<li>\n<p><strong>Code Review:</strong></p>\n<ul>\n<li><strong>Manual Inspection:</strong> Review your codebase to identify <code>clone</code> calls. Assess whether ownership transfer or borrowing (<code>&amp;T</code> or <code>&amp;mut T</code>) is more appropriate.</li>\n</ul>\n</li>\n<li>\n<p><strong>Benchmarking:</strong></p>\n<ul>\n<li><strong><code>criterion.rs</code>:</strong> A benchmarking tool to measure and compare the performance of Rust code, useful for assessing the impact of removing unnecessary clones. <a href=\"https://github.com/bheisler/criterion.rs\">Criterion.rs</a>.</li>\n</ul>\n</li>\n</ol>\n<p>By utilizing these tools and practices, you can effectively identify and mitigate performance issues arising from unnecessary cloning in your Rust applications.</p>\n",
      summary: null,
      date: "2024-12-23T00:00:00Z",
      metadata: {},
      tags: ["rust","memory","systems"],
      categories: ["misc"],
      series: [],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Mastering C with Effective C: Introduction to Makefiles",
      slug: "mastering-c-makefiles",
      path: "https://parkerjones.dev/posts/mastering-c-makefiles/",
      content: "<h3 id=\"mastering-c-with-effective-c-introduction-to-makefiles\">Mastering C with Effective C: Introduction to Makefiles</h3>\n<h4 id=\"introduction\">Introduction</h4>\n<p>Welcome to the first post in the <a href=\"/mastering-c\">\"Mastering C with Effective C\" series</a>! In this post, we'll explore the basics of Makefiles and how they can help you manage and automate the build process of your C projects. Makefiles are an essential tool for any C programmer, simplifying the compilation process, managing dependencies, and ensuring efficient builds.</p>\n<h4 id=\"what-is-a-makefile\">What is a Makefile?</h4>\n<p>A Makefile is a special file, containing a set of directives used by the <code>make</code> build automation tool to compile and link a program. It defines rules on how to build different parts of your project, making it easier to manage larger projects with multiple source files.</p>\n<h4 id=\"benefits-of-using-makefiles\">Benefits of Using Makefiles</h4>\n<ul>\n<li><strong>Automation</strong>: Automates the compilation process, reducing the chances of human error.</li>\n<li><strong>Efficiency</strong>: Only rebuilds the parts of the project that have changed, saving time during development.</li>\n<li><strong>Organization</strong>: Keeps build instructions in a single, easy-to-read file.</li>\n<li><strong>Portability</strong>: Ensures that your project can be built consistently across different environments.</li>\n</ul>\n<h4 id=\"basic-structure-of-a-makefile\">Basic Structure of a Makefile</h4>\n<p>A Makefile typically consists of rules. Each rule defines how to build a target from its dependencies. The general structure is:</p>\n<pre data-lang=\"makefile\" class=\"language-makefile \"><code class=\"language-makefile\" data-lang=\"makefile\">target: dependencies\n    command\n</code></pre>\n<ul>\n<li><strong>target</strong>: The file to be generated (e.g., executable or object file).</li>\n<li><strong>dependencies</strong>: The files that the target depends on (e.g., source files).</li>\n<li><strong>command</strong>: The command to generate the target from the dependencies (e.g., compile command).</li>\n</ul>\n<h4 id=\"example-a-simple-makefile\">Example: A Simple Makefile</h4>\n<p>Let's start with a simple example. Suppose we have a project with three source files: <code>main.c</code>, <code>file1.c</code>, and <code>file2.c</code>. Here's a basic Makefile to compile these files into a single executable:</p>\n<pre data-lang=\"makefile\" class=\"language-makefile \"><code class=\"language-makefile\" data-lang=\"makefile\"># the compiler to use\nCC = clang\n\n# compiler flags:\n#  -g    adds debugging information to the executable file\n#  -Wall turns on most, but not all, compiler warnings\nCFLAGS  = -g -Wall\n  \n# files to link:\nLFLAGS = #-lcs50\n  \n# the name to use for the output file:\nTARGET = my_program\n  \n# the list of source files\nSRCS = main.c file1.c file2.c\n  \n# the list of object files (derived from the source files)\nOBJS = $(SRCS:.c=.o)\n  \nall: $(TARGET)\n  \n$(TARGET): $(OBJS)\n\t$(CC) $(CFLAGS) -o $(TARGET) $(OBJS) $(LFLAGS)\n  \n%.o: %.c\n\t$(CC) $(CFLAGS) -c $&lt; -o $@\n  \nclean:\n\trm -f $(OBJS) $(TARGET)\n</code></pre>\n<h4 id=\"breaking-down-the-makefile\">Breaking Down the Makefile</h4>\n<ul>\n<li><strong>Compiler and Flags</strong>: We specify the compiler (<code>clang</code>) and the compiler flags (<code>CFLAGS</code>).</li>\n<li><strong>Source and Object Files</strong>: <code>SRCS</code> lists the source files, and <code>OBJS</code> lists the corresponding object files.</li>\n<li><strong>Build Rules</strong>:\n<ul>\n<li><code>all: $(TARGET)</code>: The default target, which builds the executable.</li>\n<li><code>$(TARGET): $(OBJS)</code>: The rule to link the object files into the final executable.</li>\n<li><code>%.o: %.c</code>: A pattern rule to compile source files into object files.</li>\n</ul>\n</li>\n<li><strong>Clean Rule</strong>: The <code>clean</code> rule removes the generated object files and the executable.</li>\n</ul>\n<h4 id=\"using-the-makefile\">Using the Makefile</h4>\n<p>To use the Makefile, simply run the <code>make</code> command in the terminal within the project directory. This will compile and link the source files into the <code>my_program</code> executable.</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">make\n</code></pre>\n<p>To clean up the generated files, run:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">make clean\n</code></pre>\n<h4 id=\"additional-resources\">Additional Resources</h4>\n<p>For more information and deeper dives into Makefiles, check out these resources:</p>\n<ul>\n<li><a href=\"https://www.gnu.org/software/make/manual/make.html\">GNU Make Manual</a></li>\n<li><a href=\"https://en.wikipedia.org/wiki/Make_(software)\">GNU Make on Wikipedia</a></li>\n<li><a href=\"https://www.tutorialspoint.com/makefile/index.htm\">Makefile Tutorial - TutorialsPoint</a></li>\n<li><a href=\"https://makefiletutorial.com/\">Makefile Tutorial by Example</a></li>\n<li><a href=\"https://cmake.org/documentation/\">CMake Documentation</a></li>\n</ul>\n<h4 id=\"conclusion\">Conclusion</h4>\n<p>Makefiles are a powerful tool for managing the build process of your C projects. By automating compilation and linking, they save time and reduce errors. In this post, we covered the basics of Makefiles, their benefits, and how to create a simple Makefile for a C project.</p>\n<h4 id=\"next-up\">Next Up</h4>\n<p>Stay tuned for the next post in the \"Mastering C with Effective C\" series, where we'll dive into data types and variables in C. We will explore different data types, how to declare and use variables, and the importance of understanding data representation.</p>\n<hr />\n<p>Happy coding!</p>\n",
      summary: null,
      date: "2024-06-14T00:00:00Z",
      metadata: {},
      tags: ["make","tutorial","ai","series","learning","c"],
      categories: ["software"],
      series: ["c"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Mastering C: Building a Project with Nix Flakes",
      slug: "mastering-c-nix-flake",
      path: "https://parkerjones.dev/posts/mastering-c-nix-flake/",
      content: "<h1 id=\"upgrading-your-c-project-build-to-use-nix-flakes-a-modern-guide\">Upgrading Your C Project Build to Use Nix Flakes: A Modern Guide</h1>\n<p>Hello again, fellow C enthusiasts! In our previous blog post, we explored building a C program with external library dependencies using Nix. Today, we’re taking it a step further by upgrading our Nix setup to use Nix flakes, a more structured and modern way to manage dependencies and ensure reproducibility. This post will guide you through updating your existing Nix build to a Nix flake and explain each component in detail. Let’s dive in!</p>\n<h2 id=\"what-are-nix-flakes\">What Are Nix Flakes?</h2>\n<p>Nix flakes introduce a standardized way to define Nix projects, providing better dependency management, versioning, and reproducibility. They offer:</p>\n<ul>\n<li><strong>Consistency</strong>: Ensures builds are reproducible across different environments.</li>\n<li><strong>Ease of Use</strong>: Simplifies the setup and management of dependencies.</li>\n<li><strong>Modularity</strong>: Allows sharing and composing Nix expressions easily.</li>\n</ul>\n<p>For more details, you can check out the <a href=\"https://nixos.wiki/wiki/Flakes\">Nix Flakes documentation</a>.</p>\n<h2 id=\"transitioning-from-a-classic-nix-setup-to-nix-flakes\">Transitioning from a Classic Nix Setup to Nix Flakes</h2>\n<p>We’ll start with our <code>simple-parse-example</code> C program, which uses the <code>jansson</code> library for JSON parsing. We’ll convert our existing <code>default.nix</code> to a <code>flake.nix</code> file.</p>\n<h3 id=\"original-default-nix\">Original <code>default.nix</code></h3>\n<p>Here’s a quick look at our original <code>default.nix</code>:</p>\n<pre data-lang=\"nix\" class=\"language-nix \"><code class=\"language-nix\" data-lang=\"nix\">{ pkgs ? import &lt;nixpkgs&gt; {} }:\n\nlet\n  jansson = pkgs.jansson;\nin\npkgs.stdenv.mkDerivation {\n  pname = &quot;simple-parse-example&quot;;\n  version = &quot;1.0&quot;;\n\n  src = .&#x2F;.;\n\n  nativeBuildInputs = [ pkgs.clang ];\n  buildInputs = [ jansson ];\n\n  buildPhase = &#x27;&#x27;\n    clang -g -Wall -o simple-parse-example simple-parse-example.c -ljansson\n  &#x27;&#x27;;\n\n  installPhase = &#x27;&#x27;\n    mkdir -p $out&#x2F;bin\n    cp simple-parse-example $out&#x2F;bin&#x2F;\n  &#x27;&#x27;;\n\n  meta = with pkgs.lib; {\n    description = &quot;A simple program using jansson&quot;;\n    license = licenses.mit;\n    maintainers = [ maintainers.yourname ];\n    platforms = platforms.unix;\n  };\n}\n</code></pre>\n<h3 id=\"converting-to-flake-nix\">Converting to <code>flake.nix</code></h3>\n<p>We’ll now convert this setup to use Nix flakes.</p>\n<h4 id=\"step-1-creating-flake-nix\">Step 1: Creating <code>flake.nix</code></h4>\n<p>Create a file named <code>flake.nix</code> in your project directory with the following content:</p>\n<pre data-lang=\"nix\" class=\"language-nix \"><code class=\"language-nix\" data-lang=\"nix\">{\n  description = &quot;A simple C program using jansson built with Nix flakes&quot;;\n\n  inputs = {\n    nixpkgs.url = &quot;github:NixOS&#x2F;nixpkgs&#x2F;nixpkgs-unstable&quot;;\n  };\n\n  outputs = { self, nixpkgs }: {\n    packages = nixpkgs.lib.genAttrs [ &quot;aarch64-darwin&quot; &quot;x86_64-linux&quot; ] (system:\n    let\n      pkgs = import nixpkgs { inherit system; };\n    in\n    rec {\n      simple-parse-example = pkgs.stdenv.mkDerivation {\n        pname = &quot;simple-parse-example&quot;;\n        version = &quot;1.0&quot;;\n\n        src = .&#x2F;.;\n\n        nativeBuildInputs = [ pkgs.clang ];\n        buildInputs = [ pkgs.jansson ];\n\n        buildPhase = &#x27;&#x27;\n          clang -g -Wall -o simple-parse-example simple-parse-example.c -ljansson\n        &#x27;&#x27;;\n\n        installPhase = &#x27;&#x27;\n          mkdir -p $out&#x2F;bin\n          cp simple-parse-example $out&#x2F;bin&#x2F;\n        &#x27;&#x27;;\n\n        meta = with pkgs.lib; {\n          description = &quot;A simple program using jansson&quot;;\n          license = licenses.mit;\n          maintainers = [ maintainers.yourname ];\n          platforms = platforms.unix;\n        };\n      };\n    });\n\n    defaultPackage = {\n      aarch64-darwin = self.packages.aarch64-darwin.simple-parse-example;\n      x86_64-linux = self.packages.x86_64-linux.simple-parse-example;\n    };\n\n    defaultApp = {\n      forAllSystems = nixpkgs.lib.mapAttrs&#x27; (system: pkg: {\n        inherit system;\n        defaultApp = {\n          type = &quot;app&quot;;\n          program = &quot;${pkg.simple-parse-example}&#x2F;bin&#x2F;simple-parse-example&quot;;\n        };\n      }) self.packages;\n    };\n  };\n}\n</code></pre>\n<h3 id=\"breaking-down-the-flake-configuration\">Breaking Down the Flake Configuration</h3>\n<p>Let’s break down each component of the <code>flake.nix</code> file to understand what it does.</p>\n<h4 id=\"1-description\">1. <strong>description</strong></h4>\n<pre data-lang=\"nix\" class=\"language-nix \"><code class=\"language-nix\" data-lang=\"nix\">description = &quot;A simple C program using jansson built with Nix flakes&quot;;\n</code></pre>\n<p>This provides a brief description of the flake. It’s useful for documentation and understanding the purpose of the flake.</p>\n<h4 id=\"2-inputs\">2. <strong>inputs</strong></h4>\n<pre data-lang=\"nix\" class=\"language-nix \"><code class=\"language-nix\" data-lang=\"nix\">inputs = {\n  nixpkgs.url = &quot;github:NixOS&#x2F;nixpkgs&#x2F;nixpkgs-unstable&quot;;\n};\n</code></pre>\n<p>The <code>inputs</code> section defines dependencies for the flake. Here, we are specifying that we want to use the unstable branch of the Nixpkgs repository. This is where we’ll get our packages like <code>clang</code> and <code>jansson</code>.</p>\n<ul>\n<li><strong>Documentation</strong>: <a href=\"https://nixos.wiki/wiki/Nixpkgs\">Nixpkgs Input</a></li>\n</ul>\n<h4 id=\"3-outputs\">3. <strong>outputs</strong></h4>\n<pre data-lang=\"nix\" class=\"language-nix \"><code class=\"language-nix\" data-lang=\"nix\">outputs = { self, nixpkgs }: {\n  packages = nixpkgs.lib.genAttrs [ &quot;aarch64-darwin&quot; &quot;x86_64-linux&quot; ] (system:\n  let\n    pkgs = import nixpkgs { inherit system; };\n  in\n  rec {\n    simple-parse-example = pkgs.stdenv.mkDerivation {\n      pname = &quot;simple-parse-example&quot;;\n      version = &quot;1.0&quot;;\n\n      src = .&#x2F;.;\n\n      nativeBuildInputs = [ pkgs.clang ];\n      buildInputs = [ pkgs.jansson ];\n\n      buildPhase = &#x27;&#x27;\n        clang -g -Wall -o simple-parse-example simple-parse-example.c -ljansson\n      &#x27;&#x27;;\n\n      installPhase = &#x27;&#x27;\n        mkdir -p $out&#x2F;bin\n        cp simple-parse-example $out&#x2F;bin&#x2F;\n      &#x27;&#x27;;\n\n      meta = with pkgs.lib; {\n        description = &quot;A simple program using jansson&quot;;\n        license = licenses.mit;\n        maintainers = [ maintainers.yourname ];\n        platforms = platforms.unix;\n      };\n    };\n  });\n\n  defaultPackage = {\n    aarch64-darwin = self.packages.aarch64-darwin.simple-parse-example;\n    x86_64-linux = self.packages.x86_64-linux.simple-parse-example;\n  };\n\n  defaultApp = {\n    forAllSystems = nixpkgs.lib.mapAttrs&#x27; (system: pkg: {\n      inherit system;\n      defaultApp = {\n        type = &quot;app&quot;;\n        program = &quot;${pkg.simple-parse-example}&#x2F;bin&#x2F;simple-parse-example&quot;;\n      };\n    }) self.packages;\n  };\n};\n</code></pre>\n<ul>\n<li>\n<p><strong>packages</strong>: This section uses <code>nixpkgs.lib.genAttrs</code> to define packages for multiple systems (<code>aarch64-darwin</code> and <code>x86_64-linux</code>). For each system, we import the appropriate version of <code>nixpkgs</code> and define a derivation for <code>simple-parse-example</code>.</p>\n</li>\n<li>\n<p><strong>defaultPackage</strong>: Specifies the default package for each system. This is used to tell Nix which package to build by default for the current system.</p>\n</li>\n<li>\n<p><strong>defaultApp</strong>: Specifies the default application for each system using <code>mapAttrs'</code> to iterate over systems and create default apps.</p>\n</li>\n<li>\n<p><strong>Documentation</strong>:</p>\n<ul>\n<li><a href=\"https://nixos.org/manual/nixpkgs/stable/#sec-functions-lib-genAttrs\">genAttrs</a></li>\n<li><a href=\"https://nixos.org/manual/nixpkgs/stable/#chap-stdenv\">mkDerivation</a></li>\n</ul>\n</li>\n</ul>\n<h3 id=\"building-and-running-with-nix-flakes\">Building and Running with Nix Flakes</h3>\n<p>To ensure that everything works as expected, follow these steps:</p>\n<ol>\n<li>\n<p><strong>Navigate to your project directory</strong>:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">cd ~&#x2F;dev&#x2F;learning-c&#x2F;parsing-json\n</code></pre>\n</li>\n<li>\n<p><strong>Enable Flakes</strong>:\nMake sure flakes are enabled in your Nix configuration:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">echo &quot;experimental-features = nix-command flakes&quot; &gt;&gt; ~&#x2F;.config&#x2F;nix&#x2F;nix.conf\n</code></pre>\n</li>\n<li>\n<p><strong>Build the project</strong>:\nBuild the project using:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">nix build .#simple-parse-example\n</code></pre>\n</li>\n<li>\n<p><strong>Run the binary</strong>:\nAfter building, you can run the binary with:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">.&#x2F;result&#x2F;bin&#x2F;simple-parse-example\n</code></pre>\n</li>\n</ol>\n<h3 id=\"conclusion\">Conclusion</h3>\n<p>Congratulations! You've successfully updated your C project to use Nix flakes. This modern approach ensures your builds are reproducible and dependencies are well-managed. Nix flakes provide a powerful way to handle complex projects with ease, making your development process smoother and more efficient.</p>\n<h3 id=\"exercise-prompt\">Exercise Prompt</h3>\n<p>Try converting another one of your existing projects to use Nix flakes. Start by writing a <code>flake.nix</code> file that includes all your dependencies and build instructions. Share your experience, any challenges you encountered, and how Nix flakes improved your build process.</p>\n<p>Happy coding, and may your builds always be reproducible!</p>\n",
      summary: null,
      date: "2024-06-14T00:00:00Z",
      metadata: {},
      tags: ["tutorial","ai","series","nix","tutorial","learning","c","mastering-c"],
      categories: ["software"],
      series: ["mastering c","nix"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Mastering C: Building a Project with Nix",
      slug: "mastering-c-nix",
      path: "https://parkerjones.dev/posts/mastering-c-nix/",
      content: "<p>Welcome, fellow C enthusiasts, to a whimsical journey through the world of library dependencies in C programming! This is the second post in the series <a href=\"/mastering-c\">\"Mastering C with Effective C\" series</a>!  Not at all suprising to me, I've yet to write anything at all from the book, and I'm still poking around with build systems.  I've been learning to use nix as a build system, and some things are becoming clear when learning C, which has no built in package manager...</p>\n<p>Today, we will dive into the exciting realm of building C programs with external libraries, and we’ll showcase how to do it using Nix, a modern build system that promises reproducibility and simplicity. But don't worry, we’ll also touch upon the classic Makefile approach for comparison. Let’s get started!</p>\n<p>The code from this post can be found <a href=\"https://github.com/parallaxisjones/c/tree/main/parsing-json\">here</a></p>\n<h2 id=\"the-challenge-of-library-dependencies-in-c\">The Challenge of Library Dependencies in C</h2>\n<p>Before we embark on our adventure, let's acknowledge the challenge: managing external libraries in C. While modern languages often come with robust package managers, C developers often find themselves navigating the rugged terrain of manual downloads and builds. But fear not! With the power of Nix, we can transform this daunting task into a delightful experience.</p>\n<h2 id=\"introducing-our-hero-the-simple-json-parsing-example\">Introducing Our Hero: The Simple JSON Parsing Example</h2>\n<p>To illustrate our journey, we'll create a C program called <code>simple-parse-example</code>. This program will demonstrate basic JSON parsing using the <code>jansson</code> library. Here’s what it will do:</p>\n<ul>\n<li>Create a JSON object.</li>\n<li>Serialize the JSON object to a string.</li>\n<li>Parse a JSON string back into an object.</li>\n<li>Print the values to prove we’ve successfully navigated the JSON landscape.</li>\n</ul>\n<h3 id=\"the-c-program-simple-parse-example\">The C Program: <code>simple-parse-example</code></h3>\n<p>Here’s our simple C program using the <code>jansson</code> library:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">&#x2F;&#x2F; simple-parse-example.c\n#include &lt;stdio.h&gt;\n#include &lt;jansson.h&gt;\n\nint main() {\n    &#x2F;&#x2F; Creating a JSON object\n    json_t *object = json_object();\n    json_object_set_new(object, &quot;name&quot;, json_string(&quot;Candlehopper&quot;));\n    json_object_set_new(object, &quot;level&quot;, json_integer(5));\n    json_object_set_new(object, &quot;score&quot;, json_integer(12345));\n\n    &#x2F;&#x2F; Serialize JSON object to string\n    char *json_str = json_dumps(object, JSON_INDENT(2));\n    if (!json_str) {\n        fprintf(stderr, &quot;Error serializing JSON object.\\n&quot;);\n        return 1;\n    }\n    printf(&quot;Serialized JSON:\\n%s\\n&quot;, json_str);\n\n    &#x2F;&#x2F; Free the serialized string\n    free(json_str);\n\n    &#x2F;&#x2F; JSON string to parse\n    const char *json_input = &quot;{\\&quot;name\\&quot;: \\&quot;Candlehopper\\&quot;, \\&quot;level\\&quot;: 5, \\&quot;score\\&quot;: 12345}&quot;;\n\n    &#x2F;&#x2F; Parse JSON string\n    json_error_t error;\n    json_t *parsed_object = json_loads(json_input, 0, &amp;error);\n    if (!parsed_object) {\n        fprintf(stderr, &quot;Error parsing JSON string: %s\\n&quot;, error.text);\n        return 1;\n    }\n\n    &#x2F;&#x2F; Extract values\n    json_t *name = json_object_get(parsed_object, &quot;name&quot;);\n    json_t *level = json_object_get(parsed_object, &quot;level&quot;);\n    json_t *score = json_object_get(parsed_object, &quot;score&quot;);\n\n    if (json_is_string(name) &amp;&amp; json_is_integer(level) &amp;&amp; json_is_integer(score)) {\n        printf(&quot;Parsed JSON:\\n&quot;);\n        printf(&quot;name: %s\\n&quot;, json_string_value(name));\n        printf(&quot;level: %lld\\n&quot;, json_integer_value(level));\n        printf(&quot;score: %lld\\n&quot;, json_integer_value(score));\n    } else {\n        fprintf(stderr, &quot;Error extracting values from JSON object.\\n&quot;);\n        json_decref(parsed_object);\n        return 1;\n    }\n\n    &#x2F;&#x2F; Decrement reference counts to free memory\n    json_decref(object);\n    json_decref(parsed_object);\n\n    return 0;\n}\n</code></pre>\n<h2 id=\"the-classic-approach-using-makefile\">The Classic Approach: Using Makefile</h2>\n<p>First, let’s explore how to build this program using a Makefile. Here’s a simple Makefile to compile <code>simple-parse-example</code> with the <code>jansson</code> library:</p>\n<h3 id=\"makefile\">Makefile</h3>\n<pre data-lang=\"makefile\" class=\"language-makefile \"><code class=\"language-makefile\" data-lang=\"makefile\"># the compiler to use\nCC = clang\n\n# compiler flags:\n#  -g    adds debugging information to the executable file\n#  -Wall turns on most, but not all, compiler warnings\nCFLAGS  = -g -Wall\n  \n# files to link:\nLFLAGS = -ljansson\n  \n# the names to use for both the target source files, and the output files:\nTARGETS = simple-parse-example\n  \nall: $(TARGETS)\n  \nsimple-parse-example: simple-parse-example.c\n\t$(CC) $(CFLAGS) -o simple-parse-example simple-parse-example.c $(LFLAGS)\n\nclean:\n\trm -f $(TARGETS)\n</code></pre>\n<p>To build the project:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">make\n</code></pre>\n<p>To clean up the build files:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">make clean\n</code></pre>\n<h3 id=\"pros-and-cons-of-the-makefile-approach\">Pros and Cons of the Makefile Approach</h3>\n<p><strong>Pros:</strong></p>\n<ul>\n<li>Simple and straightforward.</li>\n<li>Easily integrates with various compilers and tools.</li>\n</ul>\n<p><strong>Cons:</strong></p>\n<ul>\n<li>Manual management of dependencies.</li>\n<li>Non-reproducible builds.</li>\n<li>Platform-specific issues.</li>\n</ul>\n<h2 id=\"the-modern-approach-using-nix\">The Modern Approach: Using Nix</h2>\n<p>Now, let's switch gears and harness the power of Nix to build our project. Nix provides reproducibility and isolation, ensuring our builds are consistent across different environments.</p>\n<h3 id=\"nix-setup\">Nix Setup</h3>\n<p>Here’s how you can set up a <code>default.nix</code> file to build <code>simple-parse-example</code>:</p>\n<pre data-lang=\"nix\" class=\"language-nix \"><code class=\"language-nix\" data-lang=\"nix\"># default.nix\n{ pkgs ? import &lt;nixpkgs&gt; {} }:\n\nlet\n  jansson = pkgs.jansson;\nin\npkgs.stdenv.mkDerivation {\n  pname = &quot;simple-parse-example&quot;;\n  version = &quot;1.0&quot;;\n\n  src = .&#x2F;.;\n\n  nativeBuildInputs = [ pkgs.clang ];\n  buildInputs = [ jansson ];\n\n  buildPhase = &#x27;&#x27;\n    clang -g -Wall -o simple-parse-example simple-parse-example.c -ljansson\n  &#x27;&#x27;;\n\n  installPhase = &#x27;&#x27;\n    mkdir -p $out&#x2F;bin\n    cp simple-parse-example $out&#x2F;bin&#x2F;\n  &#x27;&#x27;;\n\n  meta = with pkgs.lib; {\n    description = &quot;A simple program using jansson&quot;;\n    license = licenses.mit;\n    maintainers = [ maintainers.yourname ];\n    platforms = platforms.unix;\n  };\n}\n</code></pre>\n<h3 id=\"building-with-nix\">Building with Nix</h3>\n<p>To build your project using Nix:</p>\n<ol>\n<li>\n<p><strong>Navigate to your project directory</strong>:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">cd myproject\n</code></pre>\n</li>\n<li>\n<p><strong>Build the project</strong>:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">nix-build\n</code></pre>\n</li>\n<li>\n<p><strong>Run the binary</strong>:</p>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">.&#x2F;result&#x2F;bin&#x2F;simple-parse-example\n</code></pre>\n</li>\n</ol>\n<h3 id=\"pros-and-cons-of-the-nix-approach\">Pros and Cons of the Nix Approach</h3>\n<p><strong>Pros:</strong></p>\n<ul>\n<li>Reproducible builds.</li>\n<li>Easy dependency management.</li>\n<li>Environment isolation.</li>\n</ul>\n<p><strong>Cons:</strong></p>\n<ul>\n<li>Steeper learning curve.</li>\n<li>Requires Nix installation.</li>\n</ul>\n<h2 id=\"exploring-other-build-tools\">Exploring Other Build Tools</h2>\n<p>While Makefiles and Nix are fantastic tools, there are other build systems and package managers worth exploring:</p>\n<ol>\n<li><strong>CMake</strong>: A cross-platform build system that automates the configuration process. <a href=\"https://cmake.org/\">Learn more</a>.</li>\n<li><strong>vcpkg</strong>: A C/C++ library manager from Microsoft. <a href=\"https://github.com/microsoft/vcpkg\">Learn more</a>.</li>\n<li><strong>Conan</strong>: A decentralized package manager for C/C++. <a href=\"https://conan.io/\">Learn more</a>.</li>\n</ol>\n<h2 id=\"conclusion\">Conclusion</h2>\n<p>In this quirky adventure, we’ve explored the classic Makefile approach and the modern Nix approach to building a C program with external library dependencies. We’ve seen how Nix can bring reproducibility and ease of use to the table, making it a fantastic choice for C developers looking to streamline their build processes.</p>\n<!-- ### Exercise Prompt -->\n<!---->\n<!-- Try converting one of your existing C projects to use Nix for its build process. Start by writing a simple `default.nix` file that includes all your dependencies and build instructions. Share your experience and any challenges you encountered along the way. -->\n<p>Happy coding, and may your builds always be reproducible!</p>\n",
      summary: null,
      date: "2024-06-14T00:00:00Z",
      metadata: {},
      tags: ["c","tutorial","ai","dependencies","systems"],
      categories: ["software"],
      series: ["c","nix"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Blog Post Series: Mastering C with Effective C",
      slug: "mastering-c",
      path: "https://parkerjones.dev/posts/mastering-c/",
      content: "<h2 id=\"introduction\">Introduction</h2>\n<p>Welcome to my blog series, \"Mastering C with Effective C\"! This series is dedicated to exploring the fundamental concepts and advanced techniques of the C programming language. Inspired by the book <a href=\"https://www.amazon.com/Effective-Introduction-Professional-Robert-Seacord/dp/1718501048\">Effective C</a> by <a href=\"https://en.wikipedia.org/wiki/Robert_C._Seacord\">Robert C. Seacord</a>, each post will delve into different aspects of C as I work through this book and make an effort to write a blog post every day.</p>\n<p>My ultimate goal is to become comfortable with the C programming language, but ultimately use <a href=\"https://zig.guide/\">Zig</a>.... but first we're going to touch the stove. You have to get burned to understand why you need the safety and quality of life improvements.</p>\n<p>For the past few years I've gone pretty hard learning and working every day in Rust. I began my Rust journey as a (primarily) Typescript and Javascript develper. I began my software engineer as a web developer writing PHP, JavaScript, Typescript, HTML,and CSS.</p>\n<p>As a self taught developer, I've built my career on learning side quests, which I've always considered par for the course as a software engineer.</p>\n<p>I'll be working through examples and sharing the code from the series in the following <a href=\"https://github.com/parallaxisjones/c\">Git Repo</a></p>\n<h2 id=\"what-to-expect\">What to Expect</h2>\n<p>In this series, we'll cover a wide range of topics, starting from the basics and gradually moving towards more advanced concepts. Here are some of the key areas we'll explore:</p>\n<ol>\n<li><strong>Getting Started with C</strong>: Understanding the basic structure of a C program, setting up the development environment, and writing your first C program.</li>\n<li><strong>Data Types and Variables</strong>: Exploring different data types in C, how to declare and use variables, and the importance of understanding data representation.</li>\n<li><strong>Control Structures</strong>: Learning about various control structures such as loops, conditional statements, and how to control the flow of your program.</li>\n<li><strong>Functions and Modular Programming</strong>: Understanding the role of functions in C, how to define and call them, and the benefits of modular programming.</li>\n<li><strong>Pointers and Memory Management</strong>: Delving into pointers, dynamic memory allocation, and techniques for effective memory management in C.</li>\n<li><strong>Advanced Topics</strong>: Covering topics such as file I/O, multi-threading, and network programming.</li>\n</ol>\n<p>Each post will include practical examples, exercises, and explanations to help solidify your understanding of the concepts.</p>\n<h4 id=\"link-to-subsequent-posts\">Link to Subsequent Posts</h4>\n<p>This section will be updated with links to each post in the series as they are published. Stay tuned for new content and be sure to check back regularly!</p>\n<ol>\n<li><strong>Introduction to Makefiles</strong>: <a href=\"/mastering-c-makefiles\">Read Post</a></li>\n<li><strong>Converting Makefile to a nix build</strong>: <a href=\"/mastering-c-nix\">Read Post</a></li>\n<li>Neovim Configuration for Writing C: [Coming Soon]</li>\n<li>Data Types and Variables: [Coming Soon]</li>\n<li>Control Structures: [Coming Soon]</li>\n<li>Functions and Modular Programming: [Coming Soon]</li>\n<li>Pointers and Memory Management: [Coming Soon]</li>\n<li>Advanced Topics: [Coming Soon]</li>\n</ol>\n<h4 id=\"next-up-introduction-to-makefiles\">Next Up: Introduction to Makefiles</h4>\n<p>In the next post, we'll dive into the world of Makefiles. We'll explore how to automate the build process, manage dependencies, and simplify your workflow using Makefiles. This will include practical examples and a step-by-step guide to creating your own Makefile for a C project.</p>\n<p>Stay tuned and happy coding!</p>\n",
      summary: null,
      date: "2024-06-14T00:00:00Z",
      metadata: {},
      tags: ["tutorial","ai","series"],
      categories: ["software"],
      series: ["c"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Drawing a Rotating Cube in C: A Beginner-Friendly Guide",
      slug: "cube-beginner",
      path: "https://parkerjones.dev/posts/cube-beginner/",
      content: "<video width=\"320\" height=\"240\" controls>\n  <source src=\"/Screen Recording 2024-06-12 at 8.38.01 PM.mov\" type=\"video/mp4\">\n</video>\n<p><a href=\"https://github.com/parallaxisjones/c/blob/main/c3.c\">github repo</a></p>\n<p>Have you ever wondered how to create simple animations on your computer screen? In this post, we'll walk through how to draw a rotating cube using the C programming language. Don't worry if you're not familiar with coding or computer science – we'll break it down step-by-step.</p>\n<h2 id=\"introduction\">Introduction</h2>\n<p>This project will involve drawing a 3D cube on a 2D terminal screen and making it rotate to create an animation effect. We'll use some math and programming concepts, but we'll explain them in simple terms.</p>\n<h2 id=\"ingredients-the-building-blocks\">Ingredients: The Building Blocks</h2>\n<h3 id=\"including-essential-libraries\">Including Essential Libraries</h3>\n<p>First, we include some standard libraries that provide tools for our code:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">#include &lt;stdio.h&gt;    &#x2F;&#x2F; For standard input and output functions\n#include &lt;string.h&gt;   &#x2F;&#x2F; For manipulating text strings\n#include &lt;unistd.h&gt;   &#x2F;&#x2F; For creating delays\n#include &lt;math.h&gt;     &#x2F;&#x2F; For mathematical functions like cosine and sine\n#include &lt;stdlib.h&gt;   &#x2F;&#x2F; For general functions like absolute value\n</code></pre>\n<h3 id=\"setting-up-the-canvas\">Setting Up the Canvas</h3>\n<p>We define the size of our canvas (screen):</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">#define W 80          &#x2F;&#x2F; Width of the canvas\n#define H 22          &#x2F;&#x2F; Height of the canvas\n</code></pre>\n<h3 id=\"defining-3d-points-and-projections\">Defining 3D Points and Projections</h3>\n<p>We'll use macros (short snippets of code) to handle 3D points and project them onto a 2D screen:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">#define A(x, y, z, a, b, c) float a = x, b = y, c = z\n#define M(a, b, x, y, z) float f = 2 &#x2F; (z + 5); a = 40 + 15 * x * f; b = 10 + 15 * y * f\n#define P(i, j) (m[i][0] * x + m[i][1] * y + m[i][2] * z)\n#define R(a, b, c, d) float c = cos(a), d = sin(a); m[0][0] = 1; m[0][1] = 0; m[0][2] = 0; m[1][0] = 0; m[1][1] = c; m[1][2] = -d; m[2][0] = 0; m[2][1] = d; m[2][2] = c\n</code></pre>\n<ul>\n<li><strong>A</strong>: Defines a 3D point.</li>\n<li><strong>M</strong>: Projects a 3D point onto the 2D screen.</li>\n<li><strong>P</strong>: Multiplies coordinates by a rotation matrix.</li>\n<li><strong>R</strong>: Sets up a rotation matrix for rotating the cube.</li>\n</ul>\n<h2 id=\"step-by-step-animation-creation\">Step-by-Step Animation Creation</h2>\n<h3 id=\"initial-setup\">Initial Setup</h3>\n<p>We start by defining some variables and initializing our canvas:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">int main() {\n    float m[3][3], t = 0; &#x2F;&#x2F; Rotation matrix and time variable\n    int x, y, z, i, j, k; &#x2F;&#x2F; Loop counters and temporary variables\n    char c[W * H]; &#x2F;&#x2F; Screen buffer\n    float vertices[8][3], transformed[8][2]; &#x2F;&#x2F; Arrays for 3D and 2D points\n    int faces[6][4] = {\n        {0, 1, 3, 2},\n        {4, 5, 7, 6},\n        {0, 1, 5, 4},\n        {2, 3, 7, 6},\n        {0, 2, 6, 4},\n        {1, 3,7, 5}\n    }; &#x2F;&#x2F; Definitions of cube faces\n    char shades[] = &quot; .:-=+*#%@&quot;; &#x2F;&#x2F; Shading characters\n\n    memset(c, &#x27; &#x27;, sizeof(c)); &#x2F;&#x2F; Initialize the screen buffer with spaces\n</code></pre>\n<h3 id=\"main-loop-drawing-frames\">Main Loop: Drawing Frames</h3>\n<p>We continuously draw new frames to create the animation:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">    for (;;) {\n        t += 0.05; &#x2F;&#x2F; Increment time\n        R(t, 1, c1, s1); &#x2F;&#x2F; Set up the rotation matrix\n</code></pre>\n<h3 id=\"calculate-3d-to-2d-projection\">Calculate 3D to 2D Projection</h3>\n<p>For each corner of the cube, we calculate its new position after rotating and project it onto the 2D screen:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">        for (i = 0; i &lt; 8; i++) {\n            A((i &amp; 1) * 2 - 1, ((i &gt;&gt; 1) &amp; 1) * 2 - 1, ((i &gt;&gt; 2) &amp; 1) * 2 - 1, x, y, z);\n            float px = P(0, 1), py = P(1, 1), pz = P(2, 1);\n            vertices[i][0] = px;\n            vertices[i][1] = py;\n            vertices[i][2] = pz;\n            float a, b;\n            M(a, b, px, py, pz);\n            transformed[i][0] = a;\n            transformed[i][1] = b;\n        }\n</code></pre>\n<h3 id=\"draw-cube-faces-with-shading\">Draw Cube Faces with Shading</h3>\n<p>We calculate the shading for each face and draw the edges:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">        for (i = 0; i &lt; 6; i++) {\n            float normal_x = 0, normal_y = 0, normal_z = 0;\n            for (j = 0; j &lt; 4; j++) {\n                int next = (j + 1) % 4;\n                normal_x += (vertices[faces[i][j]][1] - vertices[faces[i][next]][1]) * (vertices[faces[i][j]][2] + vertices[faces[i][next]][2]);\n                normal_y += (vertices[faces[i][j]][2] - vertices[faces[i][next]][2]) * (vertices[faces[i][j]][0] + vertices[faces[i][next]][0]);\n                normal_z += (vertices[faces[i][j]][0] - vertices[faces[i][next]][0]) * (vertices[faces[i][j]][1] + vertices[faces[i][next]][1]);\n            }\n            float shade = normal_z &#x2F; sqrt(normal_x * normal_x + normal_y * normal_y + normal_z * normal_z);\n            char shade_char = shades[(int)((shade + 1) * 4.5)];\n\n            for (j = 0; j &lt; 4; j++) {\n                int next = (j + 1) % 4;\n                int x0 = (int)transformed[faces[i][j]][0], y0 = (int)transformed[faces[i][j]][1];\n                int x1 = (int)transformed[faces[i][next]][0], y1 = (int)transformed[faces[i][next]][1];\n                int dx = abs(x1 - x0), dy = abs(y1 - y0);\n                int sx = x0 &lt; x1 ? 1 : -1, sy = y0 &lt; y1 ? 1 : -1;\n                int err = (dx &gt; dy ? dx : -dy) &#x2F; 2, e2;\n                while (1) {\n                    if (x0 &gt;= 0 &amp;&amp; x0 &lt; W &amp;&amp; y0 &gt;= 0 &amp;&amp; y0 &lt; H) c[y0 * W + x0] = shade_char;\n                    if (x0 == x1 &amp;&amp; y0 == y1) break;\n                    e2 = err;\n                    if (e2 &gt; -dx) { err -= dy; x0 += sx; }\n                    if (e2 &lt; dy) { err += dx; y0 += sy; }\n                }\n            }\n        }\n</code></pre>\n<h3 id=\"display-the-frame\">Display the Frame</h3>\n<p>We print the current frame and set up for the next:</p>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">        for (i = 0; i &lt; H; i++) {\n            for (j = 0; j &lt; W; j++) putchar(c[i * W + j]);\n            putchar(&#x27;\\n&#x27;);\n        }\n\n        usleep(100000); &#x2F;&#x2F; Wait for 100 milliseconds\n        printf(&quot;\\x1b[%dA&quot;, H); &#x2F;&#x2F; Move the cursor back to the top of the screen\n        memset(c, &#x27; &#x27;, sizeof(c)); &#x2F;&#x2F; Clear the screen buffer\n    }\n}\n</code></pre>\n<h2 id=\"understanding-the-math\">Understanding the Math</h2>\n<h3 id=\"3d-to-2d-projection\">3D to 2D Projection</h3>\n<p>To convert a 3D point to a 2D point:</p>\n<pre><code>f = 2 &#x2F; (z + 5)\na = 40 + 15 * x * f\nb = 10 + 15 * y * f\n</code></pre>\n<p>Here, <code>(x, y, z)</code> are the 3D coordinates, and <code>(a, b)</code> are the 2D coordinates on the screen.</p>\n<h3 id=\"rotation-matrix\">Rotation Matrix</h3>\n<p>For rotating around the z-axis:</p>\n<pre><code>[ cos(θ) -sin(θ) 0 ]\n[ sin(θ)  cos(θ) 0 ]\n[    0       0    1 ]\n</code></pre>\n<p>This matrix multiplies with the coordinates to get the new rotated coordinates.</p>\n<h3 id=\"normal-vector\">Normal Vector</h3>\n<p>The normal vector is a vector that is perpendicular to a surface. It helps in determining the shading:</p>\n<pre><code>Normal = (v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x)\n</code></pre>\n<p>Here, <code>v1</code> and <code>v2</code> are vectors on the face of</p>\n<p>the cube.</p>\n<h2 id=\"conclusion\">Conclusion</h2>\n<p>By following these steps, you can create a simple but fascinating rotating cube animation on your terminal. This project is a great way to get started with graphics programming and understand how 3D objects can be represented on 2D screens. Happy coding!</p>\n<h2 id=\"full-code-example\">Full Code Example</h2>\n<pre data-lang=\"c\" class=\"language-c \"><code class=\"language-c\" data-lang=\"c\">\n#include &lt;stdio.h&gt;\n#include &lt;string.h&gt;\n#include &lt;unistd.h&gt;\n#include &lt;math.h&gt;\n#include &lt;stdlib.h&gt; &#x2F;&#x2F; For abs function\n\n#define W 80\n#define H 22\n#define A(x, y, z, a, b, c) float a = x, b = y, c = z\n#define M(a, b, x, y, z) float f = 2 &#x2F; (z + 5); a = 40 + 15 * x * f; b = 10 + 15 * y * f\n#define P(i, j) (m[i][0] * x + m[i][1] * y + m[i][2] * z)\n#define R(a, b, c, d) float c = cos(a), d = sin(a); m[0][0] = 1; m[0][1] = 0; m[0][2] = 0; m[1][0] = 0; m[1][1] = c; m[1][2] = -d; m[2][0] = 0; m[2][1] = d; m[2][2] = c\n\nint main() {\n    float m[3][3], t = 0;\n    int x, y, z, i, j, k;\n    char c[W * H];\n    float vertices[8][3], transformed[8][2];\n    int faces[6][4] = {\n        {0, 1, 3, 2},\n        {4, 5, 7, 6},\n        {0, 1, 5, 4},\n        {2, 3, 7, 6},\n        {0, 2, 6, 4},\n        {1, 3, 7, 5}\n    };\n    char shades[] = &quot; .:-=+*#%@&quot;;\n\n    memset(c, &#x27; &#x27;, sizeof(c));\n\n    for (;;) {\n        t += 0.05;\n        R(t, 1, c1, s1);\n\n        for (i = 0; i &lt; 8; i++) {\n            A((i &amp; 1) * 2 - 1, ((i &gt;&gt; 1) &amp; 1) * 2 - 1, ((i &gt;&gt; 2) &amp; 1) * 2 - 1, x, y, z);\n            float px = P(0, 1), py = P(1, 1), pz = P(2, 1);\n            vertices[i][0] = px;\n            vertices[i][1] = py;\n            vertices[i][2] = pz;\n            float a, b;\n            M(a, b, px, py, pz);\n            transformed[i][0] = a;\n            transformed[i][1] = b;\n        }\n\n        for (i = 0; i &lt; 6; i++) {\n            float normal_x = 0, normal_y = 0, normal_z = 0;\n            for (j = 0; j &lt; 4; j++) {\n                int next = (j + 1) % 4;\n                normal_x += (vertices[faces[i][j]][1] - vertices[faces[i][next]][1]) * (vertices[faces[i][j]][2] + vertices[faces[i][next]][2]);\n                normal_y += (vertices[faces[i][j]][2] - vertices[faces[i][next]][2]) * (vertices[faces[i][j]][0] + vertices[faces[i][next]][0]);\n                normal_z += (vertices[faces[i][j]][0] - vertices[faces[i][next]][0]) * (vertices[faces[i][j]][1] + vertices[faces[i][next]][1]);\n            }\n            float shade = normal_z &#x2F; sqrt(normal_x * normal_x + normal_y * normal_y + normal_z * normal_z);\n            char shade_char = shades[(int)((shade + 1) * 4.5)];\n\n            for (j = 0; j &lt; 4; j++) {\n                int next = (j + 1) % 4;\n                int x0 = (int)transformed[faces[i][j]][0], y0 = (int)transformed[faces[i][j]][1];\n                int x1 = (int)transformed[faces[i][next]][0], y1 = (int)transformed[faces[i][next]][1];\n                int dx = abs(x1 - x0), dy = abs(y1 - y0);\n                int sx = x0 &lt; x1 ? 1 : -1, sy = y0 &lt; y1 ? 1 : -1;\n                int err = (dx &gt; dy ? dx : -dy) &#x2F; 2, e2;\n                while (1) {\n                    if (x0 &gt;= 0 &amp;&amp; x0 &lt; W &amp;&amp; y0 &gt;= 0 &amp;&amp; y0 &lt; H) c[y0 * W + x0] = shade_char;\n                    if (x0 == x1 &amp;&amp; y0 == y1) break;\n                    e2 = err;\n                    if (e2 &gt; -dx) { err -= dy; x0 += sx; }\n                    if (e2 &lt; dy) { err += dx; y0 += sy; }\n                }\n            }\n        }\n\n        for (i = 0; i &lt; H; i++) {\n            for (j = 0; j &lt; W; j++) putchar(c[i * W + j]);\n            putchar(&#x27;\\n&#x27;);\n        }\n\n        usleep(100000);\n        printf(&quot;\\x1b[%dA&quot;, H);\n        memset(c, &#x27; &#x27;, sizeof(c));\n    }\n}\n\n</code></pre>\n",
      summary: null,
      date: "2024-06-12T00:00:00Z",
      metadata: {},
      tags: ["tutorial","games","c","projects","ai"],
      categories: ["post"],
      series: ["c"],
      projects: []
  };



  
  
  
  

  
  
  
  

  CREATE post CONTENT {
      title: "upgrade bookmark tool",
      slug: "upgrading-lambda-bookmarker",
      path: "https://parkerjones.dev/posts/upgrading-lambda-bookmarker/",
      content: "<pre data-lang=\"rust\" class=\"language-rust \"><code class=\"language-rust\" data-lang=\"rust\">&#x2F;&#x2F; Placeholder for main Rust implementation\nuse chrono::Local;\nuse aws_sdk_lambda::{Client, Error};\n\n&#x2F;&#x2F; Placeholder for the module to collect Lambda functions\nmod lambda_collector {\n    use aws_sdk_lambda::Client;\n    use aws_sdk_lambda::types::SdkError;\n    use aws_sdk_lambda::operation::list_functions::ListFunctionsError;\n    use serde::Deserialize;\n    use std::error::Error;\n\n    #[derive(Debug, Deserialize, Clone)]\n    pub struct LambdaFunction {\n        pub function_name: String,\n    }\n\n    pub async fn collect_lambda_functions(client: &amp;Client, search_term: &amp;str) -&gt; Result&lt;Vec&lt;LambdaFunction&gt;, Box&lt;dyn Error&gt;&gt; {\n        let mut functions = Vec::new();\n        let resp = client.list_functions().send().await?;\n\n        if let Some(lambda_functions) = resp.functions {\n            for function in lambda_functions {\n                if let Some(function_name) = function.function_name {\n                    if function_name.contains(search_term) {\n                        functions.push(LambdaFunction { function_name });\n                    }\n                }\n            }\n        }\n\n        Ok(functions)\n    }\n}\n\n&#x2F;&#x2F; Placeholder for the module to handle bookmark file generation\nmod bookmark_generator {\n    use std::fs::File;\n    use std::io::{BufWriter, Write};\n\n    pub struct BookmarkEntry {\n        pub title: String,\n        pub url: String,\n        pub add_date: i64,\n    }\n\n    pub fn generate_bookmark_file(bookmark_entries: Vec&lt;BookmarkEntry&gt;, output_file: &amp;str) -&gt; std::io::Result&lt;()&gt; {\n        let file = File::create(output_file)?;\n        let mut writer = BufWriter::new(file);\n\n        &#x2F;&#x2F; Start Netscape Bookmark File\n        writeln!(writer, &quot;&lt;!DOCTYPE NETSCAPE-Bookmark-file-1&gt;&quot;)?;\n        writeln!(writer, &quot;&lt;!-- This is an automatically generated file. --&gt;&quot;)?;\n        writeln!(writer, &quot;&lt;META HTTP-EQUIV=\\&quot;Content-Type\\&quot; CONTENT=\\&quot;text&#x2F;html; charset=UTF-8\\&quot;&gt;&quot;)?;\n        writeln!(writer, &quot;&lt;TITLE&gt;Bookmarks&lt;&#x2F;TITLE&gt;&quot;)?;\n        writeln!(writer, &quot;&lt;H1&gt;Bookmarks&lt;&#x2F;H1&gt;&quot;)?;\n        writeln!(writer, &quot;&lt;DL&gt;&lt;p&gt;&quot;)?;\n\n        for entry in bookmark_entries {\n            writeln!(writer, &quot;    &lt;DT&gt;&lt;A HREF=\\&quot;{}\\&quot; ADD_DATE=\\&quot;{}\\&quot;&gt;{}&lt;&#x2F;A&gt;&quot;, entry.url, entry.add_date, entry.title)?;\n        }\n\n        &#x2F;&#x2F; Close Netscape Bookmark File\n        writeln!(writer, &quot;&lt;&#x2F;DL&gt;&lt;p&gt;&quot;)\n    }\n}\n\n#[tokio::main]\nasync fn main() -&gt; Result&lt;(), Box&lt;dyn std::error::Error&gt;&gt; {\n    &#x2F;&#x2F; Placeholder for AWS SDK client initialization and function invocation\n    let shared_config = aws_config::load_from_env().await;\n    let client = Client::new(&amp;shared_config);\n    let search_term = std::env::args().nth(1).expect(&quot;Please provide a search term&quot;);\n\n    let lambda_functions = lambda_collector::collect_lambda_functions(&amp;client, &amp;search_term).await?;\n\n    let timestamp = Local::now().format(&quot;%Y-%m-%d_%H-%M-%S&quot;).to_string();\n    let output_file = format!(&quot;lambda_bookmarks_{}.html&quot;, timestamp);\n\n    let bookmark_entries: Vec&lt;bookmark_generator::BookmarkEntry&gt; = lambda_functions\n        .into_iter()\n        .map(|function| {\n            let region = std::env::var(&quot;AWS_REGION&quot;).unwrap_or_else(|_| &quot;us-east-1&quot;.to_string());\n            let log_group = format!(&quot;&#x2F;aws&#x2F;lambda&#x2F;{}&quot;, function.function_name);\n            let cloudwatch_logs_url = format!(\n                &quot;https:&#x2F;&#x2F;{}.console.aws.amazon.com&#x2F;cloudwatch&#x2F;home?region={}#logsV2:log-groups&#x2F;log-group&#x2F;{}&quot;,\n                region,\n                region,\n                log_group.replace(&quot;&#x2F;&quot;, &quot;%2F&quot;)\n            );\n            let lambda_function_url = format!(\n                &quot;https:&#x2F;&#x2F;{}.console.aws.amazon.com&#x2F;lambda&#x2F;home?region={}#&#x2F;functions&#x2F;{}?tab=code&quot;,\n                region,\n                region,\n                function.function_name\n            );\n\n            vec![\n                bookmark_generator::BookmarkEntry {\n                    title: format!(&quot;{} - CloudWatch Logs&quot;, function.function_name),\n                    url: cloudwatch_logs_url,\n                    add_date: Local::now().timestamp(),\n                },\n                bookmark_generator::BookmarkEntry {\n                    title: format!(&quot;{} - Lambda Function&quot;, function.function_name),\n                    url: lambda_function_url,\n                    add_date: Local::now().timestamp(),\n                },\n            ]\n        })\n        .flatten()\n        .collect();\n\n    bookmark_generator::generate_bookmark_file(bookmark_entries, &amp;output_file)?;\n    \n    println!(&quot;Bookmarks saved to {}&quot;, output_file);\n    Ok(())\n}\n\n\n</code></pre>\n<pre data-lang=\"rust\" class=\"language-rust \"><code class=\"language-rust\" data-lang=\"rust\">&#x2F;&#x2F; API Module for Rust Bookmark Tool\n\nuse axum::{routing::get, Router, Json};\nuse aws_sdk_lambda::{Client, Error};\nuse serde::Serialize;\nuse std::net::SocketAddr;\nuse lambda_collector::{collect_lambda_functions, LambdaFunction};\nuse bookmark_generator::BookmarkEntry;\nuse chrono::Local;\nuse tokio;\n\nmod lambda_collector {\n    use aws_sdk_lambda::Client;\n    use aws_sdk_lambda::types::SdkError;\n    use aws_sdk_lambda::operation::list_functions::ListFunctionsError;\n    use serde::Deserialize;\n    use std::error::Error;\n\n    #[derive(Debug, Deserialize, Clone)]\n    pub struct LambdaFunction {\n        pub function_name: String,\n    }\n\n    pub async fn collect_lambda_functions(client: &amp;Client, search_term: &amp;str) -&gt; Result&lt;Vec&lt;LambdaFunction&gt;, Box&lt;dyn Error&gt;&gt; {\n        let mut functions = Vec::new();\n        let resp = client.list_functions().send().await?;\n\n        if let Some(lambda_functions) = resp.functions {\n            for function in lambda_functions {\n                if let Some(function_name) = function.function_name {\n                    if function_name.contains(search_term) {\n                        functions.push(LambdaFunction { function_name });\n                    }\n                }\n            }\n        }\n\n        Ok(functions)\n    }\n}\n\nmod bookmark_generator {\n    use serde::Serialize;\n    use std::io::{Write};\n\n    #[derive(Serialize)]\n    pub struct BookmarkEntry {\n        pub title: String,\n        pub url: String,\n        pub add_date: i64,\n    }\n\n    pub fn generate_bookmark_file(bookmark_entries: Vec&lt;BookmarkEntry&gt;, output: &amp;mut dyn Write) -&gt; std::io::Result&lt;()&gt; {\n        &#x2F;&#x2F; Start Netscape Bookmark File\n        writeln!(output, &quot;&lt;!DOCTYPE NETSCAPE-Bookmark-file-1&gt;&quot;)?;\n        writeln!(output, &quot;&lt;!-- This is an automatically generated file. --&gt;&quot;)?;\n        writeln!(output, &quot;&lt;META HTTP-EQUIV=\\&quot;Content-Type\\&quot; CONTENT=\\&quot;text&#x2F;html; charset=UTF-8\\&quot;&gt;&quot;)?;\n        writeln!(output, &quot;&lt;TITLE&gt;Bookmarks&lt;&#x2F;TITLE&gt;&quot;)?;\n        writeln!(output, &quot;&lt;H1&gt;Bookmarks&lt;&#x2F;H1&gt;&quot;)?;\n        writeln!(output, &quot;&lt;DL&gt;&lt;p&gt;&quot;)?;\n\n        for entry in bookmark_entries {\n            writeln!(output, &quot;    &lt;DT&gt;&lt;A HREF=\\&quot;{}\\&quot; ADD_DATE=\\&quot;{}\\&quot;&gt;{}&lt;&#x2F;A&gt;&quot;, entry.url, entry.add_date, entry.title)?;\n        }\n\n        &#x2F;&#x2F; Close Netscape Bookmark File\n        writeln!(output, &quot;&lt;&#x2F;DL&gt;&lt;p&gt;&quot;)\n    }\n}\n\n#[derive(Serialize)]\nstruct ApiResponse {\n    lambda_functions: Vec&lt;LambdaFunction&gt;,\n    bookmark_entries: Vec&lt;BookmarkEntry&gt;,\n}\n\n#[tokio::main]\nasync fn main() -&gt; Result&lt;(), Error&gt; {\n    let app = Router::new().route(&quot;&#x2F;api&#x2F;lambda-bookmarks&quot;, get(get_lambda_bookmarks));\n\n    let addr = SocketAddr::from(([127, 0, 0, 1], 3000));\n    println!(&quot;Listening on {}&quot;, addr);\n    axum::Server::bind(&amp;addr)\n        .serve(app.into_make_service())\n        .await\n        .unwrap();\n\n    Ok(())\n}\n\nasync fn get_lambda_bookmarks() -&gt; Result&lt;Json&lt;ApiResponse&gt;, Error&gt; {\n    let shared_config = aws_config::load_from_env().await;\n    let client = Client::new(&amp;shared_config);\n    let search_term = &quot;example&quot;; &#x2F;&#x2F; placeholder search term\n\n    let lambda_functions = collect_lambda_functions(&amp;client, search_term).await?;\n\n    let bookmark_entries: Vec&lt;BookmarkEntry&gt; = lambda_functions\n        .iter()\n        .flat_map(|function| {\n            let region = std::env::var(&quot;AWS_REGION&quot;).unwrap_or_else(|_| &quot;us-east-1&quot;.to_string());\n            let log_group = format!(&quot;&#x2F;aws&#x2F;lambda&#x2F;{}&quot;, function.function_name);\n            let cloudwatch_logs_url = format!(\n                &quot;https:&#x2F;&#x2F;{}.console.aws.amazon.com&#x2F;cloudwatch&#x2F;home?region={}#logsV2:log-groups&#x2F;log-group&#x2F;{}&quot;,\n                region,\n                region,\n                log_group.replace(&quot;&#x2F;&quot;, &quot;%2F&quot;)\n            );\n            let lambda_function_url = format!(\n                &quot;https:&#x2F;&#x2F;{}.console.aws.amazon.com&#x2F;lambda&#x2F;home?region={}#&#x2F;functions&#x2F;{}?tab=code&quot;,\n                region,\n                region,\n                function.function_name\n            );\n\n            vec![\n                BookmarkEntry {\n                    title: format!(&quot;{} - CloudWatch Logs&quot;, function.function_name),\n                    url: cloudwatch_logs_url,\n                    add_date: Local::now().timestamp(),\n                },\n                BookmarkEntry {\n                    title: format!(&quot;{} - Lambda Function&quot;, function.function_name),\n                    url: lambda_function_url,\n                    add_date: Local::now().timestamp(),\n                },\n            ]\n        })\n        .collect();\n\n    Ok(Json(ApiResponse {\n        lambda_functions,\n        bookmark_entries,\n    }))\n}\n\n\n</code></pre>\n",
      summary: null,
      date: "2024-01-01T00:00:00Z",
      metadata: {},
      tags: [],
      categories: [],
      series: [],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
  
    
  

  CREATE post CONTENT {
      title: "Eatable Typescript and Rust",
      slug: "eatable",
      path: "https://parkerjones.dev/posts/eatable/",
      content: "<p>Idea: build a website of \"recipes that do not exist\"</p>\n<p>Build a website and training data, generate a recipe page with a hero image</p>\n<h1 id=\"the-website\">The Website</h1>\n<p>https://medium.com/technest/build-a-food-blog-with-next-js-mdx-tailwind-css-and-typescript-24380c1c6ed3</p>\n<h1 id=\"data-collection-tools\">Data Collection Tools</h1>\n<p>https://rolisz.ro/2020/03/01/web-crawler-in-rust/</p>\n<h1 id=\"gan-resources\">Gan Resources</h1>\n<p>https://towardsdatascience.com/generative-adversarial-network-gan-for-dummies-a-step-by-step-tutorial-fdefff170391</p>\n<p>http://marksabini.com/files/cs236__GAN-stronomy_Generative_Cooking_with_DCGANs__report.pdf</p>\n<p>https://openaccess.thecvf.com/content_WACV_2020/papers/Han_CookGAN_Meal_Image_Synthesis_from_Ingredients_WACV_2020_paper.pdf</p>\n<h1 id=\"recipe-ipsum\">recipe ipsum</h1>\n<p>https://github.com/recipe-ipsum/recipe-ipsum/blob/develop/src/js/index.js\nhttps://recipe-ipsum.com/</p>\n",
      summary: null,
      date: "2021-08-20T00:00:00Z",
      metadata: {},
      tags: ["graph","nextjs","rust","web"],
      categories: ["web"],
      series: [],
      projects: ["sites"]
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Typescript Graph Algorithms: BFS",
      slug: "graph-bfs-rust",
      path: "https://parkerjones.dev/posts/graph-bfs-rust/",
      content: "<h1 id=\"graph-algorithms-in-rust-breadth-first-search-bfs\">Graph Algorithms in Rust: Breadth First Search (BFS)</h1>\n<p>coming soon</p>\n<p>Reference Implementations</p>\n<ol>\n<li>HTTrack</li>\n<li>Cyotek WebCopy</li>\n<li>Content Grabber</li>\n<li>ParseHub</li>\n<li>OutWit Hub</li>\n<li>spidy</li>\n<li>Evine</li>\n</ol>\n<p>https://github.com/rivermont/spidy\nhttps://hakin9.org/evine-interactive-cli-web-crawler/\nhttps://docs.scrapy.org/en/latest/</p>\n<h1 id=\"web-crawler\">Web crawler</h1>\n<h2 id=\"features\">features</h2>\n<ul>\n<li>Nice Logging Features</li>\n<li>Portalbility</li>\n<li>Browser Spoofing: Make requests using User Agents from 4 popular web browsers, use a custom spidy bot one, or create your own!</li>\n<li>zip file output?</li>\n</ul>\n<h2 id=\"api\">api</h2>\n<h2 id=\"performance-comparison\">Performance comparison</h2>\n",
      summary: null,
      date: "2021-08-18T00:00:00Z",
      metadata: {},
      tags: ["graph","rust","algorithms"],
      categories: ["software"],
      series: ["algorithms"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Graph Algorithms in Typescript: Breadth First Search (BFS)",
      slug: "graph-bfs-ts",
      path: "https://parkerjones.dev/posts/graph-bfs-ts/",
      content: "<p>Part 2 in the series Graph Algorithms, Implementing Breadth First Search (BFS) in Typescript. For a primer on BFS and it's applications and procedure, read <a href=\"/graph-bfs\">Part 1: Graph Algorithms: Breadth First Search (BFS)</a> here.</p>\n<h1 id=\"data-structures\">Data Structures</h1>\n<p>For convenience and clarity of code, the following types were implmenented for use in BFS</p>\n<pre data-lang=\"ts\" class=\"language-ts \"><code class=\"language-ts\" data-lang=\"ts\">&#x2F;&#x2F; data structure that represents an edge,\n&#x2F;&#x2F; this really does nothing for the algorithm directly but provides type safety.\nexport type Edge = [src: number, dest: number];\n\nexport class Graph {\n  &#x2F;&#x2F; array of arrays to represent adjacency list\n  private adjList: number[][];\n  constructor(edges: Edge[], N: number) {\n    &#x2F;&#x2F;initialize the adjacency list with the size of the graph (number of nodes)\n    this.adjList = Array.from({ length: N }, () =&gt; []);\n\n    for (const [src, dest] of edges) {\n      this.adjList[src].push(dest);\n      this.adjList[dest].push(src);\n    }\n  }\n\n  public getAdjacent(index: number): number[] | null {\n    return this.adjList[index] ? this.adjList[index] : null;\n  }\n}\n&#x2F;&#x2F; this is not truly necessary to do for the algorith,\n&#x2F;&#x2F; moreso a nicety becuase there is no such thing as a queue in javascript,\n&#x2F;&#x2F; only arrays.\n&#x2F;&#x2F; This class encapsulates FIFO behavior to clean up the BFS implementation\nexport class Queue&lt;T&gt; {\n  private stack: T[] = [];\n  public push(element: T): void {\n    this.stack.push(element);\n  }\n  public pop(): T | null {\n    const el = this.stack.shift();\n\n    return el ?? null;\n  }\n\n  public empty(): boolean {\n    return this.stack.length === 0;\n  }\n}\n</code></pre>\n<h1 id=\"iterative-implementation-of-bfs\">Iterative Implementation of BFS</h1>\n<pre data-lang=\"ts\" class=\"language-ts \"><code class=\"language-ts\" data-lang=\"ts\">export const iterativeBFS = (\n  graph: Graph,\n  v: number,\n  discovered: boolean[]\n): void =&gt; {\n  &#x2F;&#x2F;create the queue\n  const q = new Queue&lt;number&gt;();\n  &#x2F;&#x2F; mark the root as having been discovered\n  discovered[v] = true;\n\n  &#x2F;&#x2F; push the starting index into the queue\n  q.push(v);\n\n  while (q.empty() !== true) {\n    &#x2F;&#x2F; while the queue isn&#x27;t empty, take from it\n    v = q.pop()!;\n\n    &#x2F;&#x2F;print the node to the console\n    console.log(&quot;node:&quot;, v);\n\n    &#x2F;&#x2F; get the adjacent nodes to v\n    const adjacent = graph.getAdjacent(v);\n    if (adjacent) {\n      adjacent.forEach((u) =&gt; {\n        if (!discovered[u]) {\n          discovered[u] = true;\n          q.push(u);\n        }\n      });\n    }\n  }\n};\n</code></pre>\n<h2 id=\"output\">Output</h2>\n<pre data-lang=\"ts\" class=\"language-ts \"><code class=\"language-ts\" data-lang=\"ts\">const edges: Edge[] = [\n  [1, 2],\n  [1, 3],\n  [1, 4],\n  [2, 5],\n  [2, 6],\n  [5, 9],\n  [5, 10],\n  [4, 7],\n  [4, 8],\n  [7, 11],\n  [7, 12],\n];\n&#x2F;&#x2F; vertex 0, 13, and 14 are single nodes\nconst N = 15;\n\n(() =&gt; {\n  let graph = new Graph(edges, N);\n  let discovered = Array.from({ length: N }, () =&gt; false);\n  Array.from({ length: N }, (_, idx) =&gt; {\n    if (!discovered[idx]) {\n      iterativeBFS(graph, idx, discovered);\n    }\n  });\n})();\n</code></pre>\n<pre data-lang=\"sh\" class=\"language-sh \"><code class=\"language-sh\" data-lang=\"sh\">$ npx tsc &amp;&amp; node dist\nnode: 0\nnode: 1\nnode: 2\nnode: 3\nnode: 4\nnode: 5\nnode: 6\nnode: 7\nnode: 8\nnode: 9\nnode: 10\nnode: 11\nnode: 12\nnode: 13\nnode: 14\n</code></pre>\n<h1 id=\"recursive-implementation-of-bfs\">Recursive Implementation of BFS</h1>\n<pre data-lang=\"ts\" class=\"language-ts \"><code class=\"language-ts\" data-lang=\"ts\">export const recursiveBFS = (\n  graph: Graph,\n  queue: Queue&lt;number&gt;,\n  discovered: boolean[]\n) =&gt; {\n  &#x2F;&#x2F; if our queue is empty, bail\n  if (queue.empty()) {\n    return;\n  }\n  &#x2F;&#x2F; retrieve the node off of the top of the queue\n  const v = queue.pop()!;\n\n  &#x2F;&#x2F; print the node to the console\n  console.log(&quot;node:&quot;, v);\n\n  &#x2F;&#x2F; get the adjacent nodes to the current index\n  const adjacent = graph.getAdjacent(v);\n\n  &#x2F;&#x2F; if there are adjacent nodes...\n  if (adjacent) {\n    &#x2F;&#x2F; filter down to the nodes that have not yet been discovered\n    adjacent\n      .filter((u) =&gt; !discovered[u])\n      .forEach((u) =&gt; {\n        &#x2F;&#x2F; for each undiscovered node, mark it &quot;visited&quot;&#x2F;discovered\n        discovered[u] = true;\n        &#x2F;&#x2F;push the discovered node onto the queue for it&#x27;s edges to be traced\n        queue.push(u);\n      });\n  }\n  recursiveBFS(graph, queue, discovered);\n};\n</code></pre>\n<h2 id=\"output-1\">output</h2>\n<pre data-lang=\"ts\" class=\"language-ts \"><code class=\"language-ts\" data-lang=\"ts\">const edges: Edge[] = [\n  [1, 2],\n  [1, 3],\n  [1, 4],\n  [2, 5],\n  [2, 6],\n  [5, 9],\n  [5, 10],\n  [4, 7],\n  [4, 8],\n  [7, 11],\n  [7, 12],\n];\n&#x2F;&#x2F; vertex 0, 13, and 14 are single nodes\nconst N = 15;\n\n(() =&gt; {\n  &#x2F;&#x2F; create the graph from edges\n  let graph = new Graph(edges, N);\n\n  &#x2F;&#x2F; create an array to track discovered nodes\n  let recursiveDiscovered = Array.from({ length: N }, () =&gt; false);\n\n  &#x2F;&#x2F;queue of nodes that need neighbor discovery\n  let recursiveQueue = new Queue&lt;number&gt;();\n  Array.from({ length: N }, (_, idx) =&gt; {\n    &#x2F;&#x2F; for each of our nodes, we&#x27;re not yet discovered, call the recursive bfs\n    if (!recursiveDiscovered[idx]) {\n      recursiveDiscovered[idx] = true;\n      recursiveQueue.push(idx);\n      recursiveBFS(graph, recursiveQueue, recursiveDiscovered);\n    }\n  });\n})();\n</code></pre>\n<pre><code>recursive approach:\nnode: 0\nnode: 1\nnode: 2\nnode: 3\nnode: 4\nnode: 5\nnode: 6\nnode: 7\nnode: 8\nnode: 9\nnode: 10\nnode: 11\nnode: 12\nnode: 13\nnode: 14\n</code></pre>\n",
      summary: null,
      date: "2021-08-18T00:00:00Z",
      metadata: {},
      tags: ["graph","ts","bfs","algorithms"],
      categories: ["software"],
      series: ["algorithms","ts"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
    
  
  

  CREATE post CONTENT {
      title: "Graph Algorithms: BFS",
      slug: "graph-bfs",
      path: "https://parkerjones.dev/posts/graph-bfs/",
      content: "<h1 id=\"graph-algorithms-bredth-first-search-bfs\">Graph Algorithms: Bredth First Search (BFS)</h1>\n<p>In preparation for programming interviews, and generally improving my own understanding of algorithms I've decided to put together a series of blog posts about popular algorithm interview questions.</p>\n<p>This is an introduction to Graph Algorithms written in Typescript. This is due to the fact that JavaScript is my strongst language, and that there is a deficit of examples in Typescript.</p>\n<p>I may do a second pass with in Rust.</p>\n<p>From the <a href=\"https://en.wikipedia.org/wiki/Breadth-first_search\">wikipedia</a>:</p>\n<p>Breadth-first search (BFS) is an algorithm for searching a tree data structure for a node that satisfies a given property. It starts at the tree root and explores all nodes at the present depth prior to moving on to the nodes at the next depth level. Extra memory, usually a queue, is needed to keep track of the child nodes that were encountered but not yet explored.</p>\n<h1 id=\"procedure\">Procedure</h1>\n<p><em>Input</em>: A graph <em>G</em> and a starting vertex (node) <em>root</em> of <em>G</em></p>\n<p><em>Output</em>: Goal State, the verticies that provide a traced shorted path back to the root node</p>\n<pre><code>Given G and root\n    let Q be a queue\n    mark the root as visited\n    enqueue root\n    while Q is not empty do:\n        let v equal a dequeued index value\n        if v is the target goal node then\n            return v\n        for all edges from v (current)\n            to u (next node) in adjoining adjacent edges to v\n        do:\n            if u has not been visited then\n                set u as visited\n                enqueue u\n</code></pre>\n<h1 id=\"applications\">Applications</h1>\n<ul>\n<li>Copying garbage collection, <a href=\"https://en.wikipedia.org/wiki/Cheney%27s_algorithm\">Cheney’s algorithm</a>.</li>\n<li>Finding the shortest path between two nodes <code>u</code> and <code>v</code>, with a path length measured by the total number of edges</li>\n<li>Testing a graph for <a href=\"https://mathworld.wolfram.com/BipartiteGraph.html\">bipartiteness</a></li>\n<li><a href=\"https://en.wikipedia.org/wiki/Minimum_spanning_tree\">Minimum Spanning Tree</a> for an unweighted graph.</li>\n<li>Web Crawler</li>\n<li>Finding nodes in any connected component graph</li>\n<li><a href=\"https://parkerjones.dev/posts/graph-bfs/Ford-Fulkerson\">Ford-Fulkerson</a> method for computing the maximum flow in a flow network (aka <a href=\"https://en.wikipedia.org/wiki/Edmonds%E2%80%93Karp_algorithm\">Edmonds-Karp</a>)</li>\n<li>Serialization/Deserialization of a binary tree</li>\n</ul>\n<p>For more concrete example problems, <a href=\"https://medium.com/techie-delight/top-20-breadth-first-search-bfs-practice-problems-ac2812283ab1\">this blog post</a> contains a list of the top 20 problems.</p>\n<p>Next in the Series:</p>\n<ol>\n<li><a href=\"/graph-bfs-ts\">Graph Algorithms in Typescript: Breadth-First Search</a></li>\n<li><a href=\"/graph-bfs-rust\">Graph Algorithms in Rust: Breadth-First Search</a></li>\n</ol>\n",
      summary: null,
      date: "2021-08-18T00:00:00Z",
      metadata: {},
      tags: ["graph","ts","algorithms"],
      categories: ["lab"],
      series: ["algorithms"],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
  

  CREATE post CONTENT {
      title: "Rust NES System: Part 1",
      slug: "nes1",
      path: "https://parkerjones.dev/posts/nes1/",
      content: "<p>The companion repo for this post can be found <a href=\"https://github.com/parallaxisjones/rustnes\" title=\"Companion Repo\">here</a>:</p>\n<p>This is a post detailing my progress on building an Emulator in Rust. This isn't something I'm building from scratch, but in pursuit of learning rust, and finding something interesting and approachable to work on I came across this <a href=\"https://bugzmanov.github.io/nes_ebook/chapter_1.html\" title=\"NES Rust E-Book\">NES Ebook</a>.</p>\n<span id=\"continue-reading\"></span>\n<p>This post is going to have notes about both learning Rust, setting up the project as well as resources and notes about the NES architecture, which apart from a lifelong love of this platform, the underlying system components are a new venture. Unlike the author, most of my working experience is in Full Stack Javascript development, and this blog and it's content is documentation of my journey \"closer to the metal\" and deeper into distributed systems.</p>\n<p>Goals:</p>\n<ol>\n<li>Better familiarity with Rust and WASM</li>\n<li><a href=\"https://www.confluent.io/learn/distributed-systems/\" title=\"What is a Distributed System\">Distributed Systems</a> content for this blog</li>\n<li>Build a web based NES emulator</li>\n</ol>\n<p>I'm not going to regurgitate the points in the NES Ebook, it's pretty clearly easy to follow. Instead, however, I want to point out the pieces I found interesting or want to expand on. Namely where I can go a little deeper into rust or software architecture points, since that's what this blog is primarily focused with.</p>\n<p>Where I want to expand upon the content in the Rust book is primarily testing, project structure and porting the emulator to run in the browser with web assembly.</p>\n<p>Notes on The NES. The NES is a distributed system.</p>\n<blockquote>\n<p>What's interesting is that CPU, PPU, and APU are independent of each other. This fact makes NES a distributed system in which separate components have to coordinate to generate one seamless gaming experience.</p>\n</blockquote>\n<blockquote>\n<p>With every company becoming software, any process that can be moved to software, will be. With computing systems growing in complexity, modern applications no longer run in isolation. The vast majority of products and applications rely on distributed systems</p>\n</blockquote>\n<h1 id=\"getting-started\">Getting Started</h1>\n<ol>\n<li>Bootstrapping the application</li>\n</ol>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">$ mkdir rustnes &amp;&amp; cd rustnes\n$ cargo init\n</code></pre>\n<ol start=\"2\">\n<li>Project Module heirarchy basics</li>\n</ol>\n<pre data-lang=\"bash\" class=\"language-bash \"><code class=\"language-bash\" data-lang=\"bash\">$ tree .\n.\n├── Cargo.lock\n├── Cargo.toml\n├── src\n│   ├── apu\n│   │   └── mod.rs\n│   ├── bus\n│   │   └── mod.rs\n│   ├── cpu\n│   │   └── mod.rs\n│   ├── joypads\n│   │   └── mod.rs\n│   ├── main.rs\n│   ├── ppu\n│   │   └── mod.rs\n│   └── rom\n│       └── mod.rs\n</code></pre>\n<h1 id=\"cpu-implementation\">CPU Implementation</h1>\n<blockquote>\n<p>NES implements typical <a href=\"https://en.wikipedia.org/wiki/Von_Neumann_architecture\" title=\"von Neumann architecture\">von Neumann architecture</a>: both data and the instructions are stored in memory. The executed code is data from the CPU perspective, and any data can potentially be interpreted as executable code. There is no way CPU can tell the difference. The only mechanism the CPU has is a <code>program_counter</code> register that keeps track of a position in the instructions stream.</p>\n</blockquote>\n<p>NES CPU can address 65536 memory cells.</p>\n<p>NES CPU uses Little-Endian addressing rather than Big-Endian: That means that the 8 least significant bits of an address will be stored before the 8 most significant bits.</p>\n<p>There are no opcodes that occupy more than 3 bytes. CPU instruction size can be either 1, 2, or 3 bytes.</p>\n<p>Relevant resources for this section:\n<a href=\"http://www.obelisk.me.uk/6502/reference.html\" title=\"6502 Instruction Reference\">6502 Instruction Reference</a>\n<a href=\"http://www.6502.org/tutorials/6502opcodes.html\" title=\"6502 OpCode tutorial\">6502 OpCode tutorial</a>\n<a href=\"https://bugzmanov.github.io/nes_ebook/chapter_3_1.html\" title=\"NES Ebook: Emulating the CPU\">Emulating the CPU</a></p>\n<p>The CPU works in a constant cycle:</p>\n<ul>\n<li>Fetch next execution instruction from the instruction memory</li>\n<li>Decode the instruction</li>\n<li>Execute the Instruction</li>\n<li>Repeat the cycle</li>\n</ul>\n<h2 id=\"cpu-registers\">CPU Registers</h2>\n<p>There are 7 registers on the NES CPU, the <a href=\"http://www.obelisk.me.uk/6502/reference.html\" title=\"6502 Instruction Reference\">6502 Instruction Reference</a></p>\n<h3 id=\"program-counter-pc\">Program Counter (PC)</h3>\n<p>The Program Counter holds the address for the next machine language instruction to be executed.</p>\n<h3 id=\"stack-pointer\">Stack Pointer</h3>\n<p>The stack pointer holds the address of the top of the memory space allocated for the stack. The NES Stack has 255 address alotted to it, from 0x0100 to 0x1FF.</p>\n<h3 id=\"accumulator\">Accumulator</h3>\n<p>This Register stores the result of arithmetic, logic and memory access operations. It's used as an input parameter for some operations</p>\n<h3 id=\"index-register-x-x\">Index Register X (X)</h3>\n<p>Used as an offset in specific memory addressing modes. Can be used for auxillary storage needs such as holding temporary values or being used as a counter.</p>\n<h3 id=\"index-register-y-y\">Index Register Y (Y)</h3>\n<p>similar use case as X.</p>\n<h3 id=\"processor-status-p\">Processor Status (P)</h3>\n<p>8-bit regsiter represents 7 status flags that can be set or unset depending on the result of the last executed instruction.</p>\n<p>As instructions are executed a set of processor flags are set or clear to record the results of the operation. This flags and some additional control flags are held in a special status register. Each flag has a single bit within the register.</p>\n<h4 id=\"flags\">Flags</h4>\n<p><a href=\"http://wiki.nesdev.com/w/index.php/Status_flags\">NESDEV - Status Flags</a></p>\n<pre><code>    &#x2F;&#x2F;&#x2F;\n    &#x2F;&#x2F;&#x2F;  7 6 5 4 3 2 1 0\n    &#x2F;&#x2F;&#x2F;  N V _ B D I Z C\n    &#x2F;&#x2F;&#x2F;  | |   | | | | +--- Carry Flag\n    &#x2F;&#x2F;&#x2F;  | |   | | | +----- Zero Flag\n    &#x2F;&#x2F;&#x2F;  | |   | | +------- Interrupt Disable\n    &#x2F;&#x2F;&#x2F;  | |   | +--------- Decimal Mode (not used on NES)\n    &#x2F;&#x2F;&#x2F;  | |   +----------- Break Command\n    &#x2F;&#x2F;&#x2F;  | +--------------- Overflow Flag\n    &#x2F;&#x2F;&#x2F;  +----------------- Negative Flag\n    &#x2F;&#x2F;&#x2F;\n</code></pre>\n<h5 id=\"c-carry-flag\">(C) Carry Flag</h5>\n<p>The carry flag is set if the last operation caused an overflow from bit 7 of the result or an underflow from bit 0. This condition is set during arithmetic, comparison and during logical shifts. It can be explicitly set using the <a href=\"http://www.obelisk.me.uk/6502/reference.html#SEC\">Set Carry Flag</a> instruction and cleared with <a href=\"http://www.obelisk.me.uk/6502/reference.html#CLC\">Clear Carry Flag</a>.</p>\n<h5 id=\"z-zero-flag\">(Z) Zero Flag</h5>\n<p>The zero flag is set if the result of the last operation as was zero.</p>\n<h5 id=\"i-interrupt-disable\">(I) Interrupt Disable</h5>\n<p>The interrupt disable flag is set if the program has executed a <a href=\"http://www.obelisk.me.uk/6502/reference.html#SEI\">Set Interrupt Disable</a> instruction. While this flag is set the processor will not respond to interrupts from devices until it is cleared by a <a href=\"http://www.obelisk.me.uk/6502/reference.html#CLI\">Clear Interrupt Disable</a> instruction.</p>\n<h5 id=\"d-decimal-mode\">(D) Decimal Mode</h5>\n<p>According to NESDEV wiki this is not used in the NES</p>\n<p>While the decimal mode flag is set the processor will obey the rules of <a href=\"https://en.wikipedia.org/wiki/Binary-coded_decimal\" title=\"Binary Coded Decimal Wiki Article\">Binary Coded Decimal (BCD)</a> arithmetic during addition and subtraction. The flag can be explicity set using <a href=\"http://www.obelisk.me.uk/6502/reference.html#SED\">Set Decimal Flag</a> and cleared with <a href=\"http://www.obelisk.me.uk/6502/reference.html#CLD\">Clear Decimal Flag</a></p>\n<h5 id=\"b-break-command\">(B) Break Command</h5>\n<p>The break command bit is set when a <a href=\"http://www.obelisk.me.uk/6502/reference.html#BRK\">BRK</a> instruction has been executed and an interrupt has been generated to process it.</p>\n<h5 id=\"v-overflow-flag\">(V) oVerflow Flag</h5>\n<p>The overflow flag is set during arithmetic operations if the result has yielded an invalid 2's complement result (e.g. adding to positive numbers and ending up with a negative result: 64 + 64 =&gt; -128). It is determined by looking at the carry between bits 6 and 7 and between bit 7 and the carry flag.</p>\n<h5 id=\"n-negative-flag\">(N) Negative Flag</h5>\n<p>The negative flag is set if the result of the last operation had bit 7 set to a one.</p>\n<h2 id=\"adressing-modes\">Adressing Modes</h2>\n<p>In short, the addressing mode is a property of an instruction that defines how the CPU should interpret the next 1 or 2 bytes in the instruction stream.</p>\n<h1 id=\"code-highlights\">Code Highlights</h1>\n<p>Getting started</p>\n<pre data-lang=\"rust\" class=\"language-rust \"><code class=\"language-rust\" data-lang=\"rust\">mod cpu;\n\npub struct CPU {\n    pub register_a: u8,\n    pub status: u8,\n    pub program_counter: u16,\n}\n\nimpl CPU {\n    pub fn new() -&gt; Self {\n        CPU {\n            register_a: 0,\n            status: 0,\n            program_counter: 0,\n        }\n    }\n\n    pub fn interpret(&amp;mut self, program: Vec&lt;u8&gt;) {\n        self.program_counter = 0;\n\n        loop {\n            let opscode = program[self.program_counter as usize];\n            self.program_counter += 1;\n\n            match opscode {\n                _ =&gt; todo!(&quot;ops code todos&quot;)\n            }\n        }\n    }\n}\n</code></pre>\n<h1 id=\"rust-references\">Rust References</h1>\n<p>Rust Feature Highlights that were interesting along the way\n<a href=\"https://doc.rust-lang.org/std/macro.todo.html\" title=\"Todo Macro\">Rust Todo Macro</a></p>\n<p><a href=\"https://doc.rust-lang.org/rust-by-example/mod/split.html\" title=\"Rust Modules\">Rust Modules</a></p>\n",
      summary: "<p>The companion repo for this post can be found <a href=\"https://github.com/parallaxisjones/rustnes\" title=\"Companion Repo\">here</a>:</p>\n<p>This is a post detailing my progress on building an Emulator in Rust. This isn't something I'm building from scratch, but in pursuit of learning rust, and finding something interesting and approachable to work on I came across this <a href=\"https://bugzmanov.github.io/nes_ebook/chapter_1.html\" title=\"NES Rust E-Book\">NES Ebook</a>.</p>\n",
      date: "2021-04-25T00:00:00Z",
      metadata: {},
      tags: ["emulators","games","rust","projects"],
      categories: ["software"],
      series: [],
      projects: []
  };



  
  
  
  

  
    
  
  
    
  
  
  
    
  

  CREATE post CONTENT {
      title: "Personal Website",
      slug: "personal-website",
      path: "https://parkerjones.dev/posts/personal-website/",
      content: "<p><a href=\"https://parkerjones.website/\">Link in Bio</a></p>\n<span id=\"continue-reading\"></span>\n<p>Not a terrible amount to say about this, it's a ripoff of the instagram style straight to the point link in bio style website. It's a landing page.</p>\n<p>This first iteration has information for HF contacts as well as verification information through keybase, payment information and a link to my devlog.</p>\n<p>When I launch the Parallax Labs sites or any software product I might link to that. The QR code doesn't make much sense but it's a start.</p>\n",
      summary: "<p><a href=\"https://parkerjones.website/\">Link in Bio</a></p>\n",
      date: "2021-04-25T00:00:00Z",
      metadata: {},
      tags: ["domains","sites","meta","link in bio","ham","radio"],
      categories: ["software"],
      series: [],
      projects: ["sites"]
  };



  
  
  
  

  
    
  
  
    
  
  
  
    
  

  CREATE post CONTENT {
      title: "New Blog",
      slug: "hello-world",
      path: "https://parkerjones.dev/posts/hello-world/",
      content: "<p>I decided I had to pull the trigger on my web domains.</p>\n<span id=\"continue-reading\"></span>\n<p>Especially since I just paid for renewal on some of them. These are the journeys of launching content on them. More to come</p>\n<p>For now these need ideas. Over the next few months each of these are going to get some site or app at least, and if not I need to release them... It's already ridiculous that I have paid hundreds of dollars for domains that have, now I need to pull the trigger on making at least one of them profitable. I cannot buy any more. These are going to launch or be deleted ☠️</p>\n<ol>\n<li>abotme.dev</li>\n<li>baneposting.com</li>\n<li>botme.dev</li>\n<li>eatable.us</li>\n<li>gnvdaos.app</li>\n<li>gnvdaos.com</li>\n<li>gnvdaos.dev</li>\n<li>gnvdaos.info</li>\n<li>gnvdaos.me</li>\n<li>gnvdaos.org</li>\n<li>gnvdaos.page</li>\n<li>gnvdaos.us</li>\n<li>groupodaos.org</li>\n<li>groupodaos.us</li>\n<li>parallax.financial</li>\n<li>parallax.vision</li>\n<li>parallax.website</li>\n<li>parallaxair.com</li>\n<li>parallaxairsurvey.com</li>\n<li>parallaxis.financial</li>\n<li>parallaxisfinance.com</li>\n<li>parallaxisfinancial.com</li>\n<li>parkerjones.website</li>\n<li>roasting.us</li>\n<li>voluntarily.app</li>\n<li>voluntarily.dev</li>\n<li>voluntarily.us</li>\n</ol>\n",
      summary: "<p>I decided I had to pull the trigger on my web domains.</p>\n",
      date: "2021-04-24T00:00:00Z",
      metadata: {},
      tags: ["domains","web"],
      categories: ["web"],
      series: [],
      projects: ["sites"]
  };



  
  
  
  

  
    
  
  
    
  
  
  

  CREATE post CONTENT {
      title: "Custom Jotform Widget",
      slug: "jotform-eg",
      path: "https://parkerjones.dev/posts/jotform-eg/",
      content: "<style>\n#parcel-widget {\n    margin: 20px;\n}\n\n#parcel-id {\n    width: 200px;\n    padding: 5px;\n    margin-right: 10px;\n}\n\n#fetch-btn {\n    padding: 5px 10px;\n}\n\n#loading {\n    margin-top: 20px;\n    font-size: 14px;\n    color: #fdfdfd;\n}\n\n#result {\n    margin-top: 20px;\n}\n\n\n</style>\n<div id=\"parcel-widget\">\n    <label for=\"parcel-id\">Parcel ID:</label>\n    <input type=\"text\" id=\"parcel-id\" name=\"parcel-id\" placeholder=\"20070-003-004\">\n    <button id=\"fetch-btn\">Fetch Data</button> \n    <div id=\"loading\" style=\"display:none;\">Loading...</div>\n    <div id=\"result\" style=\"display:none;\">\n        <p>Latitude: <span id=\"lat\"></span></p>\n        <p>Longitude: <span id=\"long\"></span></p>\n        <p>Google Maps: <a id=\"googlemap\" href=\"\" target=\"_blank\">View on Google Maps</a></p>\n        <p>Map Image:</p>\n        <img id=\"map-image\" src=\"\" alt=\"Map centered at the queried location\">\n    </div>\n</div>\n<script>\n\ndocument.getElementById('fetch-btn').addEventListener('click', function() {\n    const parcelId = document.getElementById('parcel-id').value;\n    const regex = /^[0-9]{5}-[0-9]{3}-[0-9]{3}$/;\n    \n    if (!regex.test(parcelId)) {\n        alert('Please enter a valid Parcel ID.');\n        return;\n    }\n    \n    document.getElementById('loading').style.display = 'block';\n    document.getElementById('result').style.display = 'none';\n\n    fetch(`https://taurus.at.geoplan.ufl.edu/arcgis/rest/services/fgdl/FDOR/Mapserver/0/query?where=PARCELID='${parcelId}'&outFields=LAT_DD,LONG_DD,GOOGLEMAP&returnGeometry=false&f=json`)\n        .then(response => response.json())\n        .then(data => {\n            document.getElementById('loading').style.display = 'none';\n            if (data.features && data.features.length > 0) {\n                const { LAT_DD, LONG_DD, GOOGLEMAP } = data.features[0].attributes;\n                document.getElementById('lat').innerText = LAT_DD;\n                document.getElementById('long').innerText = LONG_DD;\n                const googleMapLink = document.getElementById('googlemap');\n                googleMapLink.href = GOOGLEMAP;\n                googleMapLink.innerText = GOOGLEMAP;\n                \n                const mapImageUrl = `https://us-east1-map-demo-429019.cloudfunctions.net/static-map?parcelId=${parcelId}`;\n                document.getElementById('map-image').src = mapImageUrl;\n\n                document.getElementById('result').style.display = 'block';\n                \n                // Add hidden input fields to submit these values with the form\n                addHiddenField('latitude', LAT_DD);\n                addHiddenField('longitude', LONG_DD);\n                addHiddenField('googlemap_url', GOOGLEMAP);\n                addHiddenField('map_image_url', mapImageUrl);\n            } else {\n                alert('No data found for the given Parcel ID.');\n            }\n        })\n        .catch(error => {\n            document.getElementById('loading').style.display = 'none';\n            console.error('Error fetching data:', error);\n            alert('An error occurred while fetching data.');\n        });\n});\n\nfunction addHiddenField(name, value) {\n    let field = document.querySelector(`input[name=\"${name}\"]`);\n    if (!field) {\n        field = document.createElement('input');\n        field.type = 'hidden';\n        field.name = name;\n        document.getElementById('parcel-widget').appendChild(field);\n    }\n    field.value = value;\n}\n</script>\n<!-- <div id=\"main\"> -->\n<!--     <h3>This is my first widget.</h3> -->\n<!--     <span id=\"labelText\"></span> -->\n<!--     <input type=\"text\" id=\"userInput\"> -->\n<!-- </div> -->\n<!-- <script src=\"//js.jotform.com/JotFormCustomWidget.min.js\"></script> -->\n<!---->\n<!-- <script src='https://cdn.jotfor.ms/s/umd/latest/for-form-embed-handler.js'></script>  -->\n<!---->\n<!---->\n<!--  <iframe id=\"JotFormIFrame-241915920355154\" title=\"Appointment Request Form\" onload=\"window.parent.scrollTo(0,0)\" allowtransparency=\"true\" allow=\"geolocation; microphone; camera; fullscreen\" src=\"https://form.jotform.com/241915920355154\" frameborder=\"0\" style=\"min-width:100%;max-width:100%;height:539px;border:none;\" scrolling=\"no\" > </iframe> -->\n<!---->\n<!---->\n<!-- <script>window.jotformEmbedHandler(\"iframe[id='JotFormIFrame-241915920355154']\", \"https://form.jotform.com/\")</script> -->\n",
      summary: null,
      date: "2021-04-24T00:00:00Z",
      metadata: {},
      tags: ["demo","jotform","arcgis"],
      categories: ["web"],
      series: [],
      projects: []
  };