diff options
Diffstat (limited to '')
125 files changed, 2724 insertions, 3681 deletions
@@ -8,4 +8,4 @@ server: clean bundle exec jekyll server --watch publish: build - @rsync -avz --exclude Makefile --exclude README.md _site/ franck@lumberjaph.net:~/sites/lumberjaph.net + @rsync -avz --exclude Makefile --exclude README.md _site/ fcuny@lumberjaph.net:~/sites/lumberjaph.net diff --git a/_assets/stylesheets/foundation.css b/_assets/stylesheets/foundation.css deleted file mode 100644 index 33883cf..0000000 --- a/_assets/stylesheets/foundation.css +++ /dev/null @@ -1,1055 +0,0 @@ -*, -*:before, -*:after { - -moz-box-sizing: border-box; - -webkit-box-sizing: border-box; - box-sizing: border-box; } - -html, -body { - font-size: 100%; } - -body { - background: white; - color: #222222; - padding: 0; - margin: 0; - font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; - font-weight: normal; - font-style: normal; - line-height: 1; - position: relative; - cursor: default; } - -a:hover { - cursor: pointer; } - -a:focus { - outline: none; } - -img, -object, -embed { - max-width: 100%; - height: auto; } - -object, -embed { - height: 100%; } - -img { - -ms-interpolation-mode: bicubic; } - -#map_canvas img, -#map_canvas embed, -#map_canvas object, -.map_canvas img, -.map_canvas embed, -.map_canvas object { - max-width: none !important; } - -.left { - float: left !important; } - -.right { - float: right !important; } - -.text-left { - text-align: left !important; } - -.text-right { - text-align: right !important; } - -.text-center { - text-align: center !important; } - -.text-justify { - text-align: justify !important; } - -.hide { - display: none; } - -.antialiased { - -webkit-font-smoothing: antialiased; } - -img { - display: inline-block; - vertical-align: middle; } - -textarea { - height: auto; - min-height: 50px; } - -select { - width: 100%; } - -/* Grid HTML Classes */ -.row { - width: 100%; - margin-left: auto; - margin-right: auto; - margin-top: 0; - margin-bottom: 0; - max-width: 52em; - *zoom: 1; } - .row:before, .row:after { - content: " "; - display: table; } - .row:after { - clear: both; } - .row.collapse .column, - .row.collapse .columns { - position: relative; - padding-left: 0; - padding-right: 0; - float: left; } - .row .row { - width: auto; - margin-left: -0.9375em; - margin-right: -0.9375em; - margin-top: 0; - margin-bottom: 0; - max-width: none; - *zoom: 1; } - .row .row:before, .row .row:after { - content: " "; - display: table; } - .row .row:after { - clear: both; } - .row .row.collapse { - width: auto; - margin: 0; - max-width: none; - *zoom: 1; } - .row .row.collapse:before, .row .row.collapse:after { - content: " "; - display: table; } - .row .row.collapse:after { - clear: both; } - -.column, -.columns { - position: relative; - padding-left: 0.9375em; - padding-right: 0.9375em; - width: 100%; - float: left; } - -@media only screen { - .column, - .columns { - position: relative; - padding-left: 0.9375em; - padding-right: 0.9375em; - float: left; } - - .small-1 { - position: relative; - width: 8.33333%; } - - .small-2 { - position: relative; - width: 16.66667%; } - - .small-3 { - position: relative; - width: 25%; } - - .small-4 { - position: relative; - width: 33.33333%; } - - .small-5 { - position: relative; - width: 41.66667%; } - - .small-6 { - position: relative; - width: 50%; } - - .small-7 { - position: relative; - width: 58.33333%; } - - .small-8 { - position: relative; - width: 66.66667%; } - - .small-9 { - position: relative; - width: 75%; } - - .small-10 { - position: relative; - width: 83.33333%; } - - .small-11 { - position: relative; - width: 91.66667%; } - - .small-12 { - position: relative; - width: 100%; } - - .small-offset-0 { - position: relative; - margin-left: 0%; } - - .small-offset-1 { - position: relative; - margin-left: 8.33333%; } - - .small-offset-2 { - position: relative; - margin-left: 16.66667%; } - - .small-offset-3 { - position: relative; - margin-left: 25%; } - - .small-offset-4 { - position: relative; - margin-left: 33.33333%; } - - .small-offset-5 { - position: relative; - margin-left: 41.66667%; } - - .small-offset-6 { - position: relative; - margin-left: 50%; } - - .small-offset-7 { - position: relative; - margin-left: 58.33333%; } - - .small-offset-8 { - position: relative; - margin-left: 66.66667%; } - - .small-offset-9 { - position: relative; - margin-left: 75%; } - - .small-offset-10 { - position: relative; - margin-left: 83.33333%; } - - [class*="column"] + [class*="column"]:last-child { - float: right; } - - [class*="column"] + [class*="column"].end { - float: left; } - - .column.small-centered, - .columns.small-centered { - position: relative; - margin-left: auto; - margin-right: auto; - float: none !important; } } -/* Styles for screens that are atleast 768px; */ -@media only screen and (min-width: 768px) { - .large-1 { - position: relative; - width: 8.33333%; } - - .large-2 { - position: relative; - width: 16.66667%; } - - .large-3 { - position: relative; - width: 25%; } - - .large-4 { - position: relative; - width: 33.33333%; } - - .large-5 { - position: relative; - width: 41.66667%; } - - .large-6 { - position: relative; - width: 50%; } - - .large-7 { - position: relative; - width: 58.33333%; } - - .large-8 { - position: relative; - width: 66.66667%; } - - .large-9 { - position: relative; - width: 75%; } - - .large-10 { - position: relative; - width: 83.33333%; } - - .large-11 { - position: relative; - width: 91.66667%; } - - .large-12 { - position: relative; - width: 100%; } - - .row .large-offset-0 { - position: relative; - margin-left: 0%; } - - .row .large-offset-1 { - position: relative; - margin-left: 8.33333%; } - - .row .large-offset-2 { - position: relative; - margin-left: 16.66667%; } - - .row .large-offset-3 { - position: relative; - margin-left: 25%; } - - .row .large-offset-4 { - position: relative; - margin-left: 33.33333%; } - - .row .large-offset-5 { - position: relative; - margin-left: 41.66667%; } - - .row .large-offset-6 { - position: relative; - margin-left: 50%; } - - .row .large-offset-7 { - position: relative; - margin-left: 58.33333%; } - - .row .large-offset-8 { - position: relative; - margin-left: 66.66667%; } - - .row .large-offset-9 { - position: relative; - margin-left: 75%; } - - .row .large-offset-10 { - position: relative; - margin-left: 83.33333%; } - - .row .large-offset-11 { - position: relative; - margin-left: 91.66667%; } - - .push-1 { - position: relative; - left: 8.33333%; - right: auto; } - - .pull-1 { - position: relative; - right: 8.33333%; - left: auto; } - - .push-2 { - position: relative; - left: 16.66667%; - right: auto; } - - .pull-2 { - position: relative; - right: 16.66667%; - left: auto; } - - .push-3 { - position: relative; - left: 25%; - right: auto; } - - .pull-3 { - position: relative; - right: 25%; - left: auto; } - - .push-4 { - position: relative; - left: 33.33333%; - right: auto; } - - .pull-4 { - position: relative; - right: 33.33333%; - left: auto; } - - .push-5 { - position: relative; - left: 41.66667%; - right: auto; } - - .pull-5 { - position: relative; - right: 41.66667%; - left: auto; } - - .push-6 { - position: relative; - left: 50%; - right: auto; } - - .pull-6 { - position: relative; - right: 50%; - left: auto; } - - .push-7 { - position: relative; - left: 58.33333%; - right: auto; } - - .pull-7 { - position: relative; - right: 58.33333%; - left: auto; } - - .push-8 { - position: relative; - left: 66.66667%; - right: auto; } - - .pull-8 { - position: relative; - right: 66.66667%; - left: auto; } - - .push-9 { - position: relative; - left: 75%; - right: auto; } - - .pull-9 { - position: relative; - right: 75%; - left: auto; } - - .push-10 { - position: relative; - left: 83.33333%; - right: auto; } - - .pull-10 { - position: relative; - right: 83.33333%; - left: auto; } - - .push-11 { - position: relative; - left: 91.66667%; - right: auto; } - - .pull-11 { - position: relative; - right: 91.66667%; - left: auto; } - - .column.large-centered, - .columns.large-centered { - position: relative; - margin-left: auto; - margin-right: auto; - float: none !important; } - - .column.large-uncentered, - .columns.large-uncentered { - margin-left: 0; - margin-right: 0; - float: left !important; } - - .column.large-uncentered.opposite, - .columns.large-uncentered.opposite { - float: right !important; } } -/* Foundation Block Grids for below small breakpoint */ -@media only screen { - [class*="block-grid-"] { - display: block; - padding: 0; - margin: 0 -0.625em; - *zoom: 1; } - [class*="block-grid-"]:before, [class*="block-grid-"]:after { - content: " "; - display: table; } - [class*="block-grid-"]:after { - clear: both; } - [class*="block-grid-"] > li { - display: inline; - height: auto; - float: left; - padding: 0 0.625em 1.25em; } - - .small-block-grid-1 > li { - width: 100%; - padding: 0 0.625em 1.25em; } - .small-block-grid-1 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-1 > li:nth-of-type(1n+1) { - clear: both; } - - .small-block-grid-2 > li { - width: 50%; - padding: 0 0.625em 1.25em; } - .small-block-grid-2 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-2 > li:nth-of-type(2n+1) { - clear: both; } - - .small-block-grid-3 > li { - width: 33.33333%; - padding: 0 0.625em 1.25em; } - .small-block-grid-3 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-3 > li:nth-of-type(3n+1) { - clear: both; } - - .small-block-grid-4 > li { - width: 25%; - padding: 0 0.625em 1.25em; } - .small-block-grid-4 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-4 > li:nth-of-type(4n+1) { - clear: both; } - - .small-block-grid-5 > li { - width: 20%; - padding: 0 0.625em 1.25em; } - .small-block-grid-5 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-5 > li:nth-of-type(5n+1) { - clear: both; } - - .small-block-grid-6 > li { - width: 16.66667%; - padding: 0 0.625em 1.25em; } - .small-block-grid-6 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-6 > li:nth-of-type(6n+1) { - clear: both; } - - .small-block-grid-7 > li { - width: 14.28571%; - padding: 0 0.625em 1.25em; } - .small-block-grid-7 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-7 > li:nth-of-type(7n+1) { - clear: both; } - - .small-block-grid-8 > li { - width: 12.5%; - padding: 0 0.625em 1.25em; } - .small-block-grid-8 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-8 > li:nth-of-type(8n+1) { - clear: both; } - - .small-block-grid-9 > li { - width: 11.11111%; - padding: 0 0.625em 1.25em; } - .small-block-grid-9 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-9 > li:nth-of-type(9n+1) { - clear: both; } - - .small-block-grid-10 > li { - width: 10%; - padding: 0 0.625em 1.25em; } - .small-block-grid-10 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-10 > li:nth-of-type(10n+1) { - clear: both; } - - .small-block-grid-11 > li { - width: 9.09091%; - padding: 0 0.625em 1.25em; } - .small-block-grid-11 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-11 > li:nth-of-type(11n+1) { - clear: both; } - - .small-block-grid-12 > li { - width: 8.33333%; - padding: 0 0.625em 1.25em; } - .small-block-grid-12 > li:nth-of-type(n) { - clear: none; } - .small-block-grid-12 > li:nth-of-type(12n+1) { - clear: both; } } -/* Foundation Block Grids for above small breakpoint */ -@media only screen and (min-width: 768px) { - /* Remove small grid clearing */ - .small-block-grid-1 > li:nth-of-type(1n+1) { - clear: none; } - - .small-block-grid-2 > li:nth-of-type(2n+1) { - clear: none; } - - .small-block-grid-3 > li:nth-of-type(3n+1) { - clear: none; } - - .small-block-grid-4 > li:nth-of-type(4n+1) { - clear: none; } - - .small-block-grid-5 > li:nth-of-type(5n+1) { - clear: none; } - - .small-block-grid-6 > li:nth-of-type(6n+1) { - clear: none; } - - .small-block-grid-7 > li:nth-of-type(7n+1) { - clear: none; } - - .small-block-grid-8 > li:nth-of-type(8n+1) { - clear: none; } - - .small-block-grid-9 > li:nth-of-type(9n+1) { - clear: none; } - - .small-block-grid-10 > li:nth-of-type(10n+1) { - clear: none; } - - .small-block-grid-11 > li:nth-of-type(11n+1) { - clear: none; } - - .small-block-grid-12 > li:nth-of-type(12n+1) { - clear: none; } - - .large-block-grid-1 > li { - width: 100%; - padding: 0 0.625em 1.25em; } - .large-block-grid-1 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-1 > li:nth-of-type(1n+1) { - clear: both; } - - .large-block-grid-2 > li { - width: 50%; - padding: 0 0.625em 1.25em; } - .large-block-grid-2 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-2 > li:nth-of-type(2n+1) { - clear: both; } - - .large-block-grid-3 > li { - width: 33.33333%; - padding: 0 0.625em 1.25em; } - .large-block-grid-3 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-3 > li:nth-of-type(3n+1) { - clear: both; } - - .large-block-grid-4 > li { - width: 25%; - padding: 0 0.625em 1.25em; } - .large-block-grid-4 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-4 > li:nth-of-type(4n+1) { - clear: both; } - - .large-block-grid-5 > li { - width: 20%; - padding: 0 0.625em 1.25em; } - .large-block-grid-5 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-5 > li:nth-of-type(5n+1) { - clear: both; } - - .large-block-grid-6 > li { - width: 16.66667%; - padding: 0 0.625em 1.25em; } - .large-block-grid-6 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-6 > li:nth-of-type(6n+1) { - clear: both; } - - .large-block-grid-7 > li { - width: 14.28571%; - padding: 0 0.625em 1.25em; } - .large-block-grid-7 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-7 > li:nth-of-type(7n+1) { - clear: both; } - - .large-block-grid-8 > li { - width: 12.5%; - padding: 0 0.625em 1.25em; } - .large-block-grid-8 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-8 > li:nth-of-type(8n+1) { - clear: both; } - - .large-block-grid-9 > li { - width: 11.11111%; - padding: 0 0.625em 1.25em; } - .large-block-grid-9 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-9 > li:nth-of-type(9n+1) { - clear: both; } - - .large-block-grid-10 > li { - width: 10%; - padding: 0 0.625em 1.25em; } - .large-block-grid-10 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-10 > li:nth-of-type(10n+1) { - clear: both; } - - .large-block-grid-11 > li { - width: 9.09091%; - padding: 0 0.625em 1.25em; } - .large-block-grid-11 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-11 > li:nth-of-type(11n+1) { - clear: both; } - - .large-block-grid-12 > li { - width: 8.33333%; - padding: 0 0.625em 1.25em; } - .large-block-grid-12 > li:nth-of-type(n) { - clear: none; } - .large-block-grid-12 > li:nth-of-type(12n+1) { - clear: both; } } -p.lead { - font-size: 1.21875em; - line-height: 1.6; } - -.subheader { - line-height: 1.4; - color: #6f6f6f; - font-weight: 300; - margin-top: 0.2em; - margin-bottom: 0.5em; } - -/* Typography resets */ -div, -dl, -dt, -dd, -ul, -ol, -li, -h1, -h2, -h3, -h4, -h5, -h6, -pre, -form, -p, -blockquote, -th, -td { - margin: 0; - padding: 0; - direction: ltr; } - -/* Default Link Styles */ -a { - color: #2ba6cb; - text-decoration: none; - line-height: inherit; } - a:hover, a:focus { - color: #2795b6; } - a img { - border: none; } - -/* Default paragraph styles */ -p { - font-family: inherit; - font-weight: normal; - font-size: 1em; - line-height: 1.6; - margin-bottom: 1.25em; - text-rendering: optimizeLegibility; } - p aside { - font-size: 0.875em; - line-height: 1.35; - font-style: italic; } - -/* Default header styles */ -h1, h2, h3, h4, h5, h6 { - font-family: "Helvetica Neue", "Helvetica", Helvetica, Arial, sans-serif; - font-weight: bold; - font-style: normal; - color: #222222; - text-rendering: optimizeLegibility; - margin-top: 0.2em; - margin-bottom: 0.5em; - line-height: 1.2125em; } - h1 small, h2 small, h3 small, h4 small, h5 small, h6 small { - font-size: 60%; - color: #6f6f6f; - line-height: 0; } - -h1 { - font-size: 2.125em; } - -h2 { - font-size: 1.6875em; } - -h3 { - font-size: 1.375em; } - -h4 { - font-size: 1.125em; } - -h5 { - font-size: 1.125em; } - -h6 { - font-size: 1em; } - -hr { - border: solid #dddddd; - border-width: 1px 0 0; - clear: both; - margin: 1.25em 0 1.1875em; - height: 0; } - -/* Helpful Typography Defaults */ -em, -i { - font-style: italic; - line-height: inherit; } - -strong, -b { - font-weight: bold; - line-height: inherit; } - -small { - font-size: 60%; - line-height: inherit; } - -code { - font-family: Consolas, "Liberation Mono", Courier, monospace; - font-weight: bold; - color: #7f0a0c; } - -/* Lists */ -ul, -ol, -dl { - font-size: 1em; - line-height: 1.6; - margin-bottom: 1.25em; - list-style-position: outside; - font-family: inherit; } - -ul, ol { - margin-left: 0; } - -/* Unordered Lists */ -ul li ul, -ul li ol { - margin-left: 1.25em; - margin-bottom: 0; - font-size: 1em; - /* Override nested font-size change */ } -ul.square li ul, ul.circle li ul, ul.disc li ul { - list-style: inherit; } -ul.square { - list-style-type: square; } -ul.circle { - list-style-type: circle; } -ul.disc { - list-style-type: disc; } -ul.no-bullet { - list-style: none; } - -/* Ordered Lists */ -ol li ul, -ol li ol { - margin-left: 1.25em; - margin-bottom: 0; } - -/* Definition Lists */ -dl dt { - margin-bottom: 0.3em; - font-weight: bold; } -dl dd { - margin-bottom: 0.75em; } - -/* Abbreviations */ -abbr, -acronym { - text-transform: uppercase; - font-size: 90%; - color: #222222; - border-bottom: 1px dotted #dddddd; - cursor: help; } - -abbr { - text-transform: none; } - -/* Blockquotes */ -blockquote { - margin: 0 0 1.25em; - padding: 0.5625em 1.25em 0 1.1875em; - border-left: 1px solid #dddddd; } - blockquote cite { - display: block; - font-size: 0.8125em; - color: #555555; } - blockquote cite:before { - content: "\2014 \0020"; } - blockquote cite a, - blockquote cite a:visited { - color: #555555; } - -blockquote, -blockquote p { - line-height: 1.6; - color: #6f6f6f; } - -/* Microformats */ -.vcard { - display: inline-block; - margin: 0 0 1.25em 0; - border: 1px solid #dddddd; - padding: 0.625em 0.75em; } - .vcard li { - margin: 0; - display: block; } - .vcard .fn { - font-weight: bold; - font-size: 0.9375em; } - -.vevent .summary { - font-weight: bold; } -.vevent abbr { - cursor: default; - text-decoration: none; - font-weight: bold; - border: none; - padding: 0 0.0625em; } - -@media only screen and (min-width: 768px) { - h1, h2, h3, h4, h5, h6 { - line-height: 1.4; } - - h1 { - font-size: 2.75em; } - - h2 { - font-size: 2.3125em; } - - h3 { - font-size: 1.6875em; } - - h4 { - font-size: 1.4375em; } } -/* - * Print styles. - * - * Inlined to avoid required HTTP connection: www.phpied.com/delay-loading-your-print-css/ - * Credit to Paul Irish and HTML5 Boilerplate (html5boilerplate.com) -*/ -.print-only { - display: none !important; } - -@media print { - * { - background: transparent !important; - color: black !important; - /* Black prints faster: h5bp.com/s */ - box-shadow: none !important; - text-shadow: none !important; } - - a, - a:visited { - text-decoration: underline; } - - a[href]:after { - content: " (" attr(href) ")"; } - - abbr[title]:after { - content: " (" attr(title) ")"; } - - .ir a:after, - a[href^="javascript:"]:after, - a[href^="#"]:after { - content: ""; } - - pre, - blockquote { - border: 1px solid #999999; - page-break-inside: avoid; } - - thead { - display: table-header-group; - /* h5bp.com/t */ } - - tr, - img { - page-break-inside: avoid; } - - img { - max-width: 100% !important; } - - @page { - margin: 0.5cm; } - - p, - h2, - h3 { - orphans: 3; - widows: 3; } - - h2, - h3 { - page-break-after: avoid; } - - .hide-on-print { - display: none !important; } - - .print-only { - display: block !important; } - - .hide-for-print { - display: none !important; } - - .show-for-print { - display: inherit !important; } } - -/* Tables */ -table { - background: white; - margin-bottom: 1.25em; - border: solid 1px #dddddd; } - table thead, - table tfoot { - background: whitesmoke; - font-weight: bold; } - table thead tr th, - table thead tr td, - table tfoot tr th, - table tfoot tr td { - padding: 0.5em 0.625em 0.625em; - font-size: 0.875em; - color: #222222; - text-align: left; } - table tr th, - table tr td { - padding: 0.5625em 0.625em; - font-size: 0.875em; - color: #222222; } - table tr.even, table tr.alt, table tr:nth-of-type(even) { - background: #f9f9f9; } - table thead tr th, - table tfoot tr th, - table tbody tr td, - table tr td, - table tfoot tr td { - display: table-cell; - line-height: 1.125em; } diff --git a/_assets/stylesheets/lumberjaph.css.scss b/_assets/stylesheets/lumberjaph.css.scss deleted file mode 100644 index ad28a66..0000000 --- a/_assets/stylesheets/lumberjaph.css.scss +++ /dev/null @@ -1,4 +0,0 @@ -//= require normalize -//= require foundation -//= require pygment -//= require text diff --git a/_assets/stylesheets/normalize.css b/_assets/stylesheets/normalize.css deleted file mode 100644 index 6d24a38..0000000 --- a/_assets/stylesheets/normalize.css +++ /dev/null @@ -1,402 +0,0 @@ -/*! normalize.css v2.1.1 | MIT License | git.io/normalize */ - -/* ========================================================================== - HTML5 display definitions - ========================================================================== */ - -/** - * Correct `block` display not defined in IE 8/9. - */ - -article, -aside, -details, -figcaption, -figure, -footer, -header, -hgroup, -main, -nav, -section, -summary { - display: block; -} - -/** - * Correct `inline-block` display not defined in IE 8/9. - */ - -audio, -canvas, -video { - display: inline-block; -} - -/** - * Prevent modern browsers from displaying `audio` without controls. - * Remove excess height in iOS 5 devices. - */ - -audio:not([controls]) { - display: none; - height: 0; -} - -/** - * Address styling not present in IE 8/9. - */ - -[hidden] { - display: none; -} - -/* ========================================================================== - Base - ========================================================================== */ - -/** - * 1. Prevent system color scheme's background color being used in Firefox, IE, - * and Opera. - * 2. Prevent system color scheme's text color being used in Firefox, IE, and - * Opera. - * 3. Set default font family to sans-serif. - * 4. Prevent iOS text size adjust after orientation change, without disabling - * user zoom. - */ - -html { - background: #fff; /* 1 */ - color: #000; /* 2 */ - font-family: sans-serif; /* 3 */ - -ms-text-size-adjust: 100%; /* 4 */ - -webkit-text-size-adjust: 100%; /* 4 */ -} - -/** - * Remove default margin. - */ - -body { - margin: 0; -} - -/* ========================================================================== - Links - ========================================================================== */ - -/** - * Address `outline` inconsistency between Chrome and other browsers. - */ - -a:focus { - outline: thin dotted; -} - -/** - * Improve readability when focused and also mouse hovered in all browsers. - */ - -a:active, -a:hover { - outline: 0; -} - -/* ========================================================================== - Typography - ========================================================================== */ - -/** - * Address variable `h1` font-size and margin within `section` and `article` - * contexts in Firefox 4+, Safari 5, and Chrome. - */ - -h1 { - font-size: 2em; - margin: 0.67em 0; -} - -/** - * Address styling not present in IE 8/9, Safari 5, and Chrome. - */ - -abbr[title] { - border-bottom: 1px dotted; -} - -/** - * Address style set to `bolder` in Firefox 4+, Safari 5, and Chrome. - */ - -b, -strong { - font-weight: bold; -} - -/** - * Address styling not present in Safari 5 and Chrome. - */ - -dfn { - font-style: italic; -} - -/** - * Address differences between Firefox and other browsers. - */ - -hr { - -moz-box-sizing: content-box; - box-sizing: content-box; - height: 0; -} - -/** - * Address styling not present in IE 8/9. - */ - -mark { - background: #ff0; - color: #000; -} - -/** - * Correct font family set oddly in Safari 5 and Chrome. - */ - -code, -kbd, -pre, -samp { - font-family: monospace, serif; - font-size: 1em; -} - -/** - * Improve readability of pre-formatted text in all browsers. - */ - -pre { - white-space: pre-wrap; -} - -/** - * Set consistent quote types. - */ - -q { - quotes: "\201C" "\201D" "\2018" "\2019"; -} - -/** - * Address inconsistent and variable font size in all browsers. - */ - -small { - font-size: 80%; -} - -/** - * Prevent `sub` and `sup` affecting `line-height` in all browsers. - */ - -sub, -sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline; -} - -sup { - top: -0.5em; -} - -sub { - bottom: -0.25em; -} - -/* ========================================================================== - Embedded content - ========================================================================== */ - -/** - * Remove border when inside `a` element in IE 8/9. - */ - -img { - border: 0; -} - -/** - * Correct overflow displayed oddly in IE 9. - */ - -svg:not(:root) { - overflow: hidden; -} - -/* ========================================================================== - Figures - ========================================================================== */ - -/** - * Address margin not present in IE 8/9 and Safari 5. - */ - -figure { - margin: 0; -} - -/* ========================================================================== - Forms - ========================================================================== */ - -/** - * Define consistent border, margin, and padding. - */ - -fieldset { - border: 1px solid #c0c0c0; - margin: 0 2px; - padding: 0.35em 0.625em 0.75em; -} - -/** - * 1. Correct `color` not being inherited in IE 8/9. - * 2. Remove padding so people aren't caught out if they zero out fieldsets. - */ - -legend { - border: 0; /* 1 */ - padding: 0; /* 2 */ -} - -/** - * 1. Correct font family not being inherited in all browsers. - * 2. Correct font size not being inherited in all browsers. - * 3. Address margins set differently in Firefox 4+, Safari 5, and Chrome. - */ - -button, -input, -select, -textarea { - font-family: inherit; /* 1 */ - font-size: 100%; /* 2 */ - margin: 0; /* 3 */ -} - -/** - * Address Firefox 4+ setting `line-height` on `input` using `!important` in - * the UA stylesheet. - */ - -button, -input { - line-height: normal; -} - -/** - * Address inconsistent `text-transform` inheritance for `button` and `select`. - * All other form control elements do not inherit `text-transform` values. - * Correct `button` style inheritance in Chrome, Safari 5+, and IE 8+. - * Correct `select` style inheritance in Firefox 4+ and Opera. - */ - -button, -select { - text-transform: none; -} - -/** - * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` - * and `video` controls. - * 2. Correct inability to style clickable `input` types in iOS. - * 3. Improve usability and consistency of cursor style between image-type - * `input` and others. - */ - -button, -html input[type="button"], /* 1 */ -input[type="reset"], -input[type="submit"] { - -webkit-appearance: button; /* 2 */ - cursor: pointer; /* 3 */ -} - -/** - * Re-set default cursor for disabled elements. - */ - -button[disabled], -html input[disabled] { - cursor: default; -} - -/** - * 1. Address box sizing set to `content-box` in IE 8/9. - * 2. Remove excess padding in IE 8/9. - */ - -input[type="checkbox"], -input[type="radio"] { - box-sizing: border-box; /* 1 */ - padding: 0; /* 2 */ -} - -/** - * 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. - * 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome - * (include `-moz` to future-proof). - */ - -input[type="search"] { - -webkit-appearance: textfield; /* 1 */ - -moz-box-sizing: content-box; - -webkit-box-sizing: content-box; /* 2 */ - box-sizing: content-box; -} - -/** - * Remove inner padding and search cancel button in Safari 5 and Chrome - * on OS X. - */ - -input[type="search"]::-webkit-search-cancel-button, -input[type="search"]::-webkit-search-decoration { - -webkit-appearance: none; -} - -/** - * Remove inner padding and border in Firefox 4+. - */ - -button::-moz-focus-inner, -input::-moz-focus-inner { - border: 0; - padding: 0; -} - -/** - * 1. Remove default vertical scrollbar in IE 8/9. - * 2. Improve readability and alignment in all browsers. - */ - -textarea { - overflow: auto; /* 1 */ - vertical-align: top; /* 2 */ -} - -/* ========================================================================== - Tables - ========================================================================== */ - -/** - * Remove most spacing between table cells. - */ - -table { - border-collapse: collapse; - border-spacing: 0; -} diff --git a/_assets/stylesheets/pygment.css b/_assets/stylesheets/pygment.css deleted file mode 100644 index e3ed165..0000000 --- a/_assets/stylesheets/pygment.css +++ /dev/null @@ -1,3 +0,0 @@ -pre { - padding-left: 5px; -}pre .c{color:#998;font-style:italic;}pre .err{color:#a61717;background-color:#e3d2d2;}pre .k{font-weight:bold;}pre .o{font-weight:bold;}pre .cm{color:#998;font-style:italic;}pre .cp{color:#999;font-weight:bold;}pre .c1{color:#998;font-style:italic;}pre .cs{color:#999;font-weight:bold;font-style:italic;}pre .gd{color:#000;background-color:#fdd;}pre .gd .x{color:#000;background-color:#faa;}pre .ge{font-style:italic;}pre .gr{color:#a00;}pre .gh{color:#999;}pre .gi{color:#000;background-color:#dfd;}pre .gi .x{color:#000;background-color:#afa;}pre .go{color:#888;}pre .gp{color:#555;}pre .gs{font-weight:bold;}pre .gu{color:#aaa;}pre .gt{color:#a00;}pre .kc{font-weight:bold;}pre .kd{font-weight:bold;}pre .kp{font-weight:bold;}pre .kr{font-weight:bold;}pre .kt{color:#458;font-weight:bold;}pre .m{color:#099;}pre .s{color:#d14;}pre .na{color:#008080;}pre .nb{color:#0086B3;}pre .nc{color:#458;font-weight:bold;}pre .no{color:#008080;}pre .ni{color:#800080;}pre .ne{color:#900;font-weight:bold;}pre .nf{color:#900;font-weight:bold;}pre .nn{color:#555;}pre .nt{color:#000080;}pre .nv{color:#008080;}pre .ow{font-weight:bold;}pre .w{color:#bbb;}pre .mf{color:#099;}pre .mh{color:#099;}pre .mi{color:#099;}pre .mo{color:#099;}pre .sb{color:#d14;}pre .sc{color:#d14;}pre .sd{color:#d14;}pre .s2{color:#d14;}pre .se{color:#d14;}pre .sh{color:#d14;}pre .si{color:#d14;}pre .sx{color:#d14;}pre .sr{color:#009926;}pre .s1{color:#d14;}pre .ss{color:#990073;}pre .bp{color:#999;}pre .vc{color:#008080;}pre .vg{color:#008080;}pre .vi{color:#008080;}pre .il{color:#099;} diff --git a/_assets/stylesheets/text.css b/_assets/stylesheets/text.css deleted file mode 100644 index 5b71cb2..0000000 --- a/_assets/stylesheets/text.css +++ /dev/null @@ -1,69 +0,0 @@ -#header h1 { - font-size: 120%; - display: inline-block; -} - -code, pre { - color: #000; - background-color: #fff; - font-weight: normal; - font-family: "Menlo", "Consolas", "Inconsolata", "Anonymous", "Monaco", monospace; - font-size: 0.9em; - line-height: 1.2em; -} - -p code, li>code { - border: 1px solid #DDDDDD; - padding: 2px; - font-size: 0.8em; -} - -#header h1 a, h2.post-title a, h2.static-title a { - color: #000; -} - -ul, ol { - margin-left: 2em; -} - -div.highlight, pre { - margin-bottom: 1em; -} - -code.has-jax { - border: none; - background-color: #fff; -} - -.date { - font-style: italic; - color: #4e4e4e; -} - -body { - font: 300 1.1em/1.6em "Droid Serif", "Open Sans", Cambria, Georgia, "DejaVu Serif", serif; -} - -blockquote { - padding-top: 0; -} - -.panel { - padding-left: 2em; - padding-right: 2em; - font-size: 120%; -} - -.panel p { - font-weight: bold; - font-style: italic; - line-height: 1.1em; -} - -footer { - text-align: center; -} - -footer span { - font-style: italic; -} diff --git a/_config.yml b/_config.yml index 8469a5c..5826510 100644 --- a/_config.yml +++ b/_config.yml @@ -3,9 +3,4 @@ pygments: true markdown: redcarpet name: lumberjaph.net redcarpet: - extensions: ["no_intra_emphasis", "fenced_code_blocks", "autolink", "strikethrough", "superscript"] -assets: - compress: - css: sass - js: uglifier - cache: false
\ No newline at end of file + extensions: ["no_intra_emphasis", "fenced_code_blocks", "autolink", "strikethrough", "superscript"]
\ No newline at end of file diff --git a/_includes/footer.html b/_includes/footer.html new file mode 100644 index 0000000..0753098 --- /dev/null +++ b/_includes/footer.html @@ -0,0 +1,7 @@ +<footer> + <p id='copyright'>© 2008 – 2013 Franck Cuny. + [ <a href='/list.html'>All posts</a> | + <a href='/projects.html'>Projects</a> | + <a href='/about.html'>Colophon</a> | + <a href='/resume.html'>Résumé</a> ] +</footer> diff --git a/_layouts/default.html b/_layouts/default.html index d0b393f..b883c95 100644 --- a/_layouts/default.html +++ b/_layouts/default.html @@ -15,27 +15,13 @@ <meta name="twitter:description" content="{{ page.summary }}"> <title>{{ page.title }} | lumberjaph.net</title> - {% stylesheet lumberjaph %} + <link rel="stylesheet" href="/static/css/style.css" type="text/css"/> + <link rel="stylesheet" href="/static/css/emacs.css" type="text/css"/> + </head> <body> - <div class="row"> - <div class="large-12 large-offset-1 columns" id="header"> - <h1><a href="/">Franck Cuny</a></h1> - { - <a href="/">writing</a>, - <a href="/projects.html">projects</a>, - <a href="/about.html">about</a>, - <a href="/contact.html">contact</a> - } - </div> - </div> - - <div class="row"> - <div class="large-10 large-centered columns"> - {{ content }} - </div> - </div> + <div id="container"> {{ content }} </div> </body> </html> diff --git a/_layouts/post.html b/_layouts/post.html index 3422dbe..47e12a4 100644 --- a/_layouts/post.html +++ b/_layouts/post.html @@ -1,28 +1,30 @@ --- layout: default --- -<h2 class="post-title"><a href="{{ page.url }}">{{ page.title }}</a></h2> +<h1><a href="{{ page.url }}">{{ page.title }}</a></h1> -<p class="date">{{ page.date | date_to_string }}</p> +{% if page.previous.url %} + <p id="bigleft"><a href="{{page.previous.url}}">«</a></p><br /> +{% endif %} +{% if page.next.url %} + <p id="bigright"><a href="{{page.next.url}}">»</a></p> +{% endif %} -{{ content }} +<div id="entry"> + {{ content }} -<hr /> + <p class="timestamp"> + {% if page.previous.url %} + <a href="{{page.previous.url}}">« {{page.previous.title}}</a> | + {% endif %} -<div id="call-to-action"> - <h3>Read More</h3> - <ul> - {% for post in site.related_posts %}{% if post.url != page.url %} - <li> - <a href="{{ post.url }}">{{ post.title }}</a> - ({{ post.date | date_to_string }}) - </li> - {% endif %}{% endfor %} - </ul> -</div> + {{ page.date | date_to_string }} + + {% if page.next.url %} + | <a href="{{page.next.url}}">{{page.next.title}} »</a> + {% endif %} + </p> -<hr/> +</div> -<footer> - <span>I sleep all night and I work all day.</span><br/> -</footer> +{% include footer.html %} diff --git a/_layouts/static.html b/_layouts/static.html index 3f58dbb..2e6ba00 100644 --- a/_layouts/static.html +++ b/_layouts/static.html @@ -1,7 +1,8 @@ --- layout: default --- -<h2 class="static-title"><a href="{{ page.url }}">{{ page.title }}</a></h2> +<h1 class="static-title"><a href="{{ page.url }}">{{ page.title }}</a></h1> {{ content }} +{% include footer.html %} diff --git a/_plugins/ext.rb b/_plugins/ext.rb deleted file mode 100644 index b9c6c96..0000000 --- a/_plugins/ext.rb +++ /dev/null @@ -1 +0,0 @@ -require "jekyll-assets" diff --git a/_posts/2008-06-14-how-to-use-vim-as-a-personal-wiki.md b/_posts/2008-06-14-how-to-use-vim-as-a-personal-wiki.md index e162e5b..c5efedb 100644 --- a/_posts/2008-06-14-how-to-use-vim-as-a-personal-wiki.md +++ b/_posts/2008-06-14-how-to-use-vim-as-a-personal-wiki.md @@ -32,9 +32,9 @@ set shiftwidth=2 " Unify set softtabstop=2 " Unify ``` -I organize my files in directory. I've got a *work*, *lists*, *recipes*, *misc*, ... and I put my files in this directory. +I organize my files in directory. I've got a **work**, **lists**, **recipes**, **misc**, ... and I put my files in this directory. -I've got an index page, with links to main section. I don't have wikiword in camelcase or things like that, so if i want to put a link to a page, I just wrote the link this way **dir_name/page_name**, then, i juste have to hit **gf** on this link to open the page. I also use this place as a todo list manager. I've got one paragrah per day, like this : +I've got an index page, with links to main section. I don't have wikiword in camelcase or things like that, so if i want to put a link to a page, I just wrote the link this way **dir_name/page_name**, then, i juste have to hit `gf` on this link to open the page. I also use this place as a todo list manager. I've got one paragrah per day, like this : ``` 2008-06-14 @@ -43,7 +43,7 @@ I've got an index page, with links to main section. I don't have wikiword in cam ... ``` -and a bunch of vim mapping for marking complete (**,c**), work in progress (**,w**) or canceled (**,x**). +and a bunch of vim mapping for marking complete (`,c`), work in progress (`,w`) or canceled (`,x`). If i don't have a deadline for a particular task, I use a 'someday' file, where the task is put with a context. diff --git a/_posts/2008-06-17-vim-function-for-creating-new-task.md b/_posts/2008-06-17-vim-function-for-creating-new-task.md index 4452371..18e20c6 100644 --- a/_posts/2008-06-17-vim-function-for-creating-new-task.md +++ b/_posts/2008-06-17-vim-function-for-creating-new-task.md @@ -6,7 +6,7 @@ summary: In which I add a few functions for my vim wiki. I've added a new function to my .vimrc for creating quickly a new task: -```vim +{% highlight vim %} function! CreateTask() let context = input("Enter context: ") exe ":set noautoindent" @@ -15,8 +15,8 @@ function! CreateTask() exe ":set autoindent" exe ":startinsert" endfunction -``` +{% endhighlight %} and then this mapping: `map ct <esc>:call CreateTask()<cr>` -Now, I've just to hit **,n**, type my context, a new line will be inserted and I just have to create my task. +Now, I've just to hit `,n` and type my context. A new line will be inserted and I just have to create my task. diff --git a/_posts/2008-06-18-keep-your-zshrc-simple.md b/_posts/2008-06-18-keep-your-zshrc-simple.md index bbfe4d2..65332bd 100644 --- a/_posts/2008-06-18-keep-your-zshrc-simple.md +++ b/_posts/2008-06-18-keep-your-zshrc-simple.md @@ -1,12 +1,12 @@ --- layout: post title: keep your zshrc simple -summary: In which I explain how I maintain my zsh configuration +summary: In which I explain how I maintain my zsh configuration. --- Keep your .zshrc simple. Mine looks like this : -```sh +{% highlight vim %} autoload -U compinit zrecompile zsh_cache=${HOME}/.zsh_cache mkdir -p $zsh_cache @@ -19,11 +19,11 @@ for zshrc_snipplet in ~/.zsh.d/S[0-9][0-9]*[^~] ; do source $zshrc_snipplet done function history-all { history -E 1 } -``` +{% endhighlight %} and then, in my **.zsh.d** directory, I've got: -``` +{% highlight sh %} S10_zshopts S20_environment S30_binds @@ -32,6 +32,6 @@ S50_aliases S60_prompt S71_ssh S72_git -``` +{% endhighlight %} All my aliases are in the same file, it's much easier to search/find/add. diff --git a/_posts/2008-06-20-mirror-cpan.textile b/_posts/2008-06-20-mirror-cpan.md index 461aeaa..dc026e1 100644 --- a/_posts/2008-06-20-mirror-cpan.textile +++ b/_posts/2008-06-20-mirror-cpan.md @@ -1,38 +1,30 @@ --- layout: post -category: perl title: Mirror cpan +summary: In which I setup a mirror of CPAN using minicpan. --- For the last 10 months, I've been living with no internet connection at home (not on purpose, but this is another story), so I've tried to be as much as possible independent from the web. I've started to use git for being able to work off-line, I use Vim as a wiki on my computer, my blog engine for writing post off-line, ... As as perl developer, I use a lot the CPAN. So, I've start to mirror the CPAN on my computer. Here is how: -First, you will need the minicpan: +First, you will need the minicpan: `cpan CPAN::Mini`. -{% highlight bash %} -cpan CPAN::Mini -{% endhighlight %} - -Then, edit a .minicpanrc file and add the following: +Then, edit a **.minicpanrc** file and add the following: -bc. local: /path/to/my/mirror/cpan +{% highlight sh %} +local: /path/to/my/mirror/cpan remote: ftp://ftp.demon.co.uk/pub/CPAN/ +{% endhighlight %} And to finish, add this in your crontab: -bc. 5 14 * * * /usr/local/bin/minicpan > /dev/null 2>&1 - -Everyday, at 14h05, your cpan will be updated. - -Now use the CPAN cli: - -{% highlight bash %} -sudo cpan +{% highlight sh %} +5 14 * * * /usr/local/bin/minicpan > /dev/null 2>&1 {% endhighlight %} -and do the following +Everyday, at 14h05, your cpan will be updated. -bc. cpan[1]> o conf urllist unshift file:///path/to/my/mirror/cpan +Now use the CPAN cli: `sudo cpan` and execute the following command `cpan[1]> o conf urllist unshift file:///path/to/my/mirror/cpan` And voilà, I've got my own minicpan on my computer, so I can install everything when I need it, being off-line or not. diff --git a/_posts/2008-06-21-debug-your-dbix-class-queries.md b/_posts/2008-06-21-debug-your-dbix-class-queries.md index dc58280..7cb66c2 100644 --- a/_posts/2008-06-21-debug-your-dbix-class-queries.md +++ b/_posts/2008-06-21-debug-your-dbix-class-queries.md @@ -6,16 +6,16 @@ summary: In which I explain how to see SQL queries generated for DBIx::Class. If you use DBIx::Class and want to see what the SQL generated looks like, you can set the environment variable **DBIC_TRACE**. -```sh +{% highlight sh %} % DBIC_TRACE=1 my_programme.pl -``` +{% endhighlight %} And all the SQL will be printed on **STDERR**. If you give a filename to the variable, like this -```sh +{% highlight sh %} % DBIC_TRACE="1=/tmp/sql.debug" -``` +{% endhighlight %} all the statements will be printed in this file. diff --git a/_posts/2008-06-24-ack.md b/_posts/2008-06-24-ack.md index 96e8388..c791e83 100644 --- a/_posts/2008-06-24-ack.md +++ b/_posts/2008-06-24-ack.md @@ -4,7 +4,7 @@ layout: post summary: In which I share my settings for ack.. --- -*"Ack is designed as a replacement for 99% of the uses of grep."* +> Ack is designed as a replacement for 99% of the uses of grep. [Ack](https://metacpan.org/module/App::Ack) is a really nice tool for searching your source code. It's faster than grep because he already knows what you want : searching your sources files :) @@ -12,12 +12,12 @@ By default it will not search in SCM files (**.svn**, **.cvs**, ...), backups fi And you can set some defaults configuration in a .ackrc file ! Mine looks like this: -``` +{% highlight sh %} --sort-files --color --context=1 --follow -``` +{% endhighlight %} Check also: [vim with ack integration](http://use.perl.org/~Ovid/journal/36430?from=rss). diff --git a/_posts/2008-06-26-git-branch-everywhere.textile b/_posts/2008-06-26-git-branch-everywhere.md index 1c32000..bde1ffa 100644 --- a/_posts/2008-06-26-git-branch-everywhere.textile +++ b/_posts/2008-06-26-git-branch-everywhere.md @@ -1,14 +1,14 @@ --- layout: post -category: app title: Git branch everywhere +summary: In which I share a snippet of code to display a git branch in vim. --- The current trend is to have the name of the current git branch everywhere. Personnaly I display it in my vim's status bar, and in my zsh prompt. Here is my vimrc configuration for this (I'm not the author of this function, and can't remember where I saw it first): -{% highlight bash %} +{% highlight vim %} set statusline=%<[%n]%m%r%h%w%{'['.(&fenc!=''?&fenc:&enc).':'.&ff}%{g:gitCurrentBranch}%{']'}%y\ %F%=%l,%c%V%8P autocmd BufEnter * :call CurrentGitBranch() @@ -29,10 +29,10 @@ endfunction and my zshrc: -{% highlight bash %} +{% highlight vim %} local git_b git_b='$(get_git_prompt_info '%b')' PROMPT="%(?..%U%?%u:) $git_b %40<...<%/%(#.%U>%u.%B>%b) " {% endhighlight %} -with the following script "S55_git":http://www.jukie.net/~bart/conf/zsh.d/S55_git +with the following script [S55_git](http://www.jukie.net/~bart/conf/zsh.d/S55_git). diff --git a/_posts/2008-06-27-dotfiles-and-scm.textile b/_posts/2008-06-27-dotfiles-and-scm.md index 1868df9..5e1888b 100644 --- a/_posts/2008-06-27-dotfiles-and-scm.textile +++ b/_posts/2008-06-27-dotfiles-and-scm.md @@ -1,18 +1,19 @@ --- layout: post -category: app +summary: In which I share how I manage my dotfiles title: Dotfiles and SCM --- All my dotfiles are stored in a SCM. Most of the time I'm on my main computer, but I can be working on a server or a different workstation. In this case, I like to have all my configurations for zsh, vim, screen, etc. -So, instead of copying my files over different computers, I put everything in a private repostiroy, and when I'm on a new computer, I just have to checkout it. If I do a modification on a machine, I just need to commit it, and I can have the modification everywhere else. +So, instead of copying my files over different computers, I put everything in a private repostiroy, and when I'm on a new computer, I just have to checkout it. If I do a modification on a machine, I just need to commit it, and I can have the modification everywhere else. I've got a $HOME/dotfiles directory, which is versionned (with git in my case). All my configurations file are stored here. In this directory, as I'm avery lazy person, I've created a Makefile. Each time I create a new file, I add it to the makefile at the same time. The content of the Makefile is the following: -bc. DOTFILES := $(shell pwd) +{% highlight sh %} +DOTFILES := $(shell pwd) all: shell code perl plagger web shell: ln -fs $(DOTFILES)/zshrc ${HOME}/.zshrc @@ -39,17 +40,6 @@ ln -fns $(DOTFILES)/vimperator ${HOME}/.vimperator ln -fs $(DOTFILES)/vimperatorrc ${HOME}/.vimperatorrc ln -fs $(DOTFILES)/flickrrc ${HOME}/.flickrrc ln -fs $(DOTFILES)/rtorrent.rc ${HOME}/.rtorrent.rc - -So next time I want to deploy my dotfiles on a new computer, I can do - -{% highlight vim %} -make all -{% endhighlight %} - -or - -{% highlight vim %} -make perl code vim {% endhighlight %} -and I can start coding some perl with vim. +So next time I want to deploy my dotfiles on a new computer, I can run `make all` or `make perl code vim` and I can start coding some perl with vim. diff --git a/_posts/2008-06-30-upgrading-to-perl-5.10.md b/_posts/2008-06-30-upgrading-to-perl-5.10.md new file mode 100644 index 0000000..9746ac4 --- /dev/null +++ b/_posts/2008-06-30-upgrading-to-perl-5.10.md @@ -0,0 +1,25 @@ +--- +layout: post +summary: In which we upgrade to Perl 5.10. +title: Upgrading to perl 5.10 +--- + +Get the list of your installed 5.8 modules: + +{% highlight sh %} +% perl -MExtUtils::Installed -e'print join("\n", new ExtUtils::Installed->modules)' > module.list +{% endhighlight %} + +then install Perl 5.10: + +{% highlight sh %} +% wget http://www.cpan.org/src/perl-5.10.0.tar.gz +% tar xzf perl-5.10.0.tar.gz +% cd perl-5.10.0 +% sh Configure -de -Dprefix=/opt/perl -Duserelocatableinc +% make && make test +% sudo make install +% /opt/perl/bin/perl -e 'use feature qw(say); say "hi"' +{% endhighlight %} + +and then re-install your modules with `cpan \`cat module.list\``. diff --git a/_posts/2008-06-30-upgrading-to-perl-5.10.textile b/_posts/2008-06-30-upgrading-to-perl-5.10.textile deleted file mode 100644 index 8c122a2..0000000 --- a/_posts/2008-06-30-upgrading-to-perl-5.10.textile +++ /dev/null @@ -1,29 +0,0 @@ ---- -layout: post -category: perl -title: Upgrading to perl 5.10 ---- - -Get the list of your installed 5.8 modules: - -{% highlight bash %} -perl -MExtUtils::Installed -e'print join("\n", new ExtUtils::Installed->modules)' > module.list -{% endhighlight %} - -then install Perl 5.10: - -{% highlight bash %} -wget http://www.cpan.org/src/perl-5.10.0.tar.gz -tar xzf perl-5.10.0.tar.gz -cd perl-5.10.0 -sh Configure -de -Dprefix=/opt/perl -Duserelocatableinc -make && make test -sudo make install -/opt/perl/bin/perl -e 'use feature qw(say); say "hi"' -{% endhighlight %} - -and then re-install your modules - -{% highlight bash %} -cpan `cat module.list` -{% endhighlight %} diff --git a/_posts/2008-08-08-customize-your-mysql-prompt.md b/_posts/2008-08-08-customize-your-mysql-prompt.md new file mode 100644 index 0000000..f023186 --- /dev/null +++ b/_posts/2008-08-08-customize-your-mysql-prompt.md @@ -0,0 +1,14 @@ +--- +layout: post +summary: In which we customize our MySQL prompt +title: Customize your MySQL prompt +--- + +To customize your MySQL prompt, create a .my.cnf file in your $HOME then add the following: + +{% highlight sh %} +[mysql] +prompt="\\u [\\d] >" +{% endhighlight %} + +Your prompt will now looks like this: `username [dabatases_name] >` diff --git a/_posts/2008-08-08-customize-your-mysql-prompt.textile b/_posts/2008-08-08-customize-your-mysql-prompt.textile deleted file mode 100644 index bf21d9e..0000000 --- a/_posts/2008-08-08-customize-your-mysql-prompt.textile +++ /dev/null @@ -1,14 +0,0 @@ ---- -layout: post -category: app -title: Customize your mysql prompt ---- - -To customize your mysql prompt, create a .my.cnf file in your $HOME then add the following: - -bc. [mysql] -prompt="\\u [\\d] >" - -It will look like this: - -bc. username [dabatases_name] > diff --git a/_posts/2008-08-16-my-lifestream-page.textile b/_posts/2008-08-16-my-lifestream-page.md index 51895bd..46a33a8 100644 --- a/_posts/2008-08-16-my-lifestream-page.textile +++ b/_posts/2008-08-16-my-lifestream-page.md @@ -1,7 +1,7 @@ --- layout: post -category: perl title: my lifestream page +summary: In which I write a simple lifestream page in javascript --- For a while I've wanted to mash-up the different services I use on a single page. My first attempt was with <a href="http://plagger.org/trac">plagger</a>, and publishing all the information on my google calendar. That wasn't really interesting, as I don't need to archived all this stuff. After this one, I've tried to make a static page, using <a href="http://developer.yahoo.com/yui/">the yahoo! library interface</a> stuff. A page with tabs, each tabs for a service. The content was generated with Plagger and Template Toolkit. It was working fine, but I had to do some HTML, check why some some stuff were not working, etc. And as I'm not a designer, and as I'm incapable to think a good user interface, I was quickly fed up with this solution. @@ -16,7 +16,6 @@ I use <a href="http://pipes.yahoo.com/pipes/">yahoo! pipes</a> for mashing some For displaying the result of the JSON feed, I've found some simple code, that I've modified a bit to feet my purpose. - {% highlight javascript %} $(function(){ var CssToAdd = new Object(); diff --git a/_posts/2008-08-19-offlineimap-on-osx.textile b/_posts/2008-08-19-offlineimap-on-osx.md index 1ec6ba7..c5f6d8c 100644 --- a/_posts/2008-08-19-offlineimap-on-osx.textile +++ b/_posts/2008-08-19-offlineimap-on-osx.md @@ -1,12 +1,12 @@ --- layout: post -category: app +summary: In which I have to patch offline imap to make it work on OS X. title: offlineimap on osx --- -If you are using offlineimap on leopard, on an imap connection with ssl (like gmail) and it keep crashing because of the following error: +If you are using offlineimap on leopard, on an imap connection with ssl (like GMail) and it keep crashing because of the following error: -{% highlight bash %} +{% highlight sh %} File "/Library/Python/2.5/site-packages/offlineimap/imaplibutil.py", line 70, in _read return self.sslsock.read(n) MemoryError @@ -14,7 +14,7 @@ MemoryError you can fix it with this fix: -{% highlight bash %} +{% highlight sh %} sudo vim /Library/Python/2.5/site-packages/offlineimap/imaplibutil.py +70 {% endhighlight %} diff --git a/_posts/2008-08-21-le-goulet.textile b/_posts/2008-08-21-le-goulet.md index 084072f..bb371b5 100644 --- a/_posts/2008-08-21-le-goulet.textile +++ b/_posts/2008-08-21-le-goulet.md @@ -1,6 +1,6 @@ --- layout: post -category: misc +summary: In which I share a picture of the bay. title: le goulet --- diff --git a/_posts/2008-08-30-intention-cloud.textile b/_posts/2008-08-30-intention-cloud.md index 3a96e6f..f985310 100644 --- a/_posts/2008-08-30-intention-cloud.textile +++ b/_posts/2008-08-30-intention-cloud.md @@ -1,6 +1,6 @@ --- layout: post -category: perl +summary: In which I update the intention-cloud. title: intention cloud --- diff --git a/_posts/2008-12-05-vim-and-git.textile b/_posts/2008-12-05-vim-and-git.md index 1a137b6..00f8ff1 100644 --- a/_posts/2008-12-05-vim-and-git.textile +++ b/_posts/2008-12-05-vim-and-git.md @@ -1,6 +1,6 @@ --- layout: post -category: app +summary: In which I share another snippet of code for vim. title: vim and git --- @@ -31,14 +31,16 @@ function! SourceDiff() endfunction {% endhighlight %} -the output will look like +the output looks like this: -bc. Choose a revision: +{% highlight sh %} +Choose a revision: 1: ea0bb4d - (3 days ago) franck cuny - fix new_freq 2: a896ac7 - (5 weeks ago) franck cuny - fix typo 3: c9bc5fd - (5 weeks ago) franck cuny - update test 4: e9de4be - (5 weeks ago) franck cuny - change the way we rewrite and check an existing url 5: 3df1fd6 - (7 weeks ago) franck cuny - put id category +{% endhighlight %} You choose the revision you want to check the diff against, and you got a (colorless) diff in your vim buffer. diff --git a/_posts/2009-01-18-rtgi-recrute-encore.textile b/_posts/2009-01-18-rtgi-recrute-encore.md index 7d0545c..00b7863 100644 --- a/_posts/2009-01-18-rtgi-recrute-encore.textile +++ b/_posts/2009-01-18-rtgi-recrute-encore.md @@ -1,6 +1,6 @@ --- layout: post -category: linkfluence +summary: In which RTGI is still hiring. title: RTGI recrute (encore) --- diff --git a/_posts/2009-02-17-tidify-a-json-in-vim.textile b/_posts/2009-02-17-tidify-a-json-in-vim.md index 016e9fd..8771e59 100644 --- a/_posts/2009-02-17-tidify-a-json-in-vim.textile +++ b/_posts/2009-02-17-tidify-a-json-in-vim.md @@ -1,21 +1,15 @@ --- layout: post -category: perl +summary: In which we tidify a JSON in vim. title: tidify a json in vim --- If you have to edit json files from vim, you may want to make them more readable, here is how you can do this: -start by installing the JSON::XS perl module from the CPAN - -{% highlight perl %} -sudo cpan JSON::XS -{% endhighlight %} - -then, edit your .vimrc and add the following +start by installing the JSON::XS perl module from the CPAN by running `sudo cpan JSON::XS`, then, edit your .vimrc and add the following {% highlight vim %} map <leader>jt <Esc>:%!json_xs -f json -t json-pretty<CR> {% endhighlight %} -now while editing a json file, you can hit *,jt* (or whatever your leader is set to) and tidify a json. +now while editing a json file, you can hit `,jt` (or whatever your leader is set to) and tidify a json. diff --git a/_posts/2009-03-08-belgian-perl-workshop-09.md b/_posts/2009-03-08-belgian-perl-workshop-09.md new file mode 100644 index 0000000..19ef01f --- /dev/null +++ b/_posts/2009-03-08-belgian-perl-workshop-09.md @@ -0,0 +1,21 @@ +--- +layout: post +summary: In which I went to the Belgian Perl Workshop. +title: belgian perl workshop 09 +--- + +last weekend my co-workers and I went to the [Belgian Perl Workshop 09](http://conferences.mongueurs.net/bpw2009/). I attended the following presentations: + + * [KiokuDB](http://conferences.mongueurs.net/bpw2009/talk/1720), by nothingmuch. Slides are available [here](http://www.iinteractive.com/kiokudb/talks/bpw2009.xul). We were able to talk with him during the afternoon, we might we use it at [$work](http://rtgi.fr). + + * [Painless XSLT with Perl](http://conferences.mongueurs.net/bpw2009/talk/1740), by andrew shitov. Was interesting, even if I don't do any XSLT anymore. Again, some ideas might be used for work. + + * [What are you pretending to be ?](http://conferences.mongueurs.net/bpw2009/talk/1792), by liz. That's a hell of a hack. The module is available on the [CPAN](http://search.cpan.org/~elizabeth/persona/). + + * [Regular Expressions and Unicode Guru](http://conferences.mongueurs.net/bpw2009/event/473), by abigail. Feel better to know that I'm not the only one suffering with unicode in Perl ;). Learn some stuff like how to create a custom character classe, etc. + + * [Catalyst](http://conferences.mongueurs.net/bpw2009/event/474), by matt trout. Ok, we're using Catalyst at work for our webservices. So we allready know about catalyst, but we were curious. And as i was hoping, we learn some nice tweaks. We discovered [Catalyst::Model::Adaptor](http://search.cpan.org/perldoc?Catalyst::Model::Adaptor), so we don't have to do some horrible stuff in our Controller any more, and some other interesting stuff were put in this talk. And matt is a really good speaker, manage to keep an audiance amused and interested. + + * [Catalyst & AWS](http://conferences.mongueurs.net/bpw2009/event/476), by matt trout. Once again, a really good talk by matt. Some good advices, and a lot of fun. + +We didn't stay long for the social event in the evening; we had booked a hotel in bruxelles. But i'm glad that we were able to get to this perl workshop, it was well-organised, good talks, meet nice people, and learn some stuff. All in all, a good day :) diff --git a/_posts/2009-03-08-belgian-perl-workshop-09.textile b/_posts/2009-03-08-belgian-perl-workshop-09.textile deleted file mode 100644 index 9e72d8c..0000000 --- a/_posts/2009-03-08-belgian-perl-workshop-09.textile +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: post -category: conference -title: belgian perl workshop 09 ---- - -last weekend my co-workers and I went to the "Belgian Perl Workshop 09":http://conferences.mongueurs.net/bpw2009/. I attended the following presentations: - - * "KiokuDB":http://conferences.mongueurs.net/bpw2009/talk/1720, by nothingmuch. Slides are available "here":http://www.iinteractive.com/kiokudb/talks/bpw2009.xul. We were able to talk with him during the afternoon, we might we use it at "work":http://rtgi.fr - - * "Painless XSLT with Perl":http://conferences.mongueurs.net/bpw2009/talk/1740, by andrew shitov. Was interesting, even if I don't do any XSLT anymore. Again, some ideas might be used for work. - - * "What are you pretending to be ?":http://conferences.mongueurs.net/bpw2009/talk/1792, by liz. That's a hell of a hack. The module is available on the "cpan":http://search.cpan.org/~elizabeth/persona/ - - * "Regular Expressions and Unicode Guru":http://conferences.mongueurs.net/bpw2009/event/473, by abigail. Feel better to know that I'm not the only one suffering with unicode in Perl ;). Learn some stuff like how to create a custom character classe, etc. - - * "Catalyst":http://conferences.mongueurs.net/bpw2009/event/474, by matt trout. Ok, we're using Catalyst at work for our webservices. So we allready know about catalyst, but we were curious. And as i was hoping, we learn some nice tweaks. We discovered "Catalyst::Model::Adaptor":http://search.cpan.org/perldoc?Catalyst::Model::Adaptor, so we don't have to do some horrible stuff in our Controller any more, and some other interesting stuff were put in this talk. And matt is a really good speaker, manage to keep an audiance amused and interested. - - * "Catalyst & AWS":http://conferences.mongueurs.net/bpw2009/event/476, by matt trout. Once again, a really good talk by matt. Some good advices, and a lot of fun. - -We didn't stay long for the social event in the evening; we had booked a hotel in bruxelles. But i'm glad that we were able to get to this perl workshop, it was well-organised, good talks, meet nice people, and learn some stuff. All in all, a good day :) - -some photos are available on "my flickr account":http://www.flickr.com/photos/franck_/sets/72157614745345532/ diff --git a/_posts/2009-04-05-the-intentioncloud-strike-back.md b/_posts/2009-04-05-the-intentioncloud-strike-back.md new file mode 100644 index 0000000..15d0ad3 --- /dev/null +++ b/_posts/2009-04-05-the-intentioncloud-strike-back.md @@ -0,0 +1,41 @@ +--- +layout: post +title: the intentioncloud strike back +summary: In which once again I bring the intention cloud back. +--- + +I've decided to rewrite the intention cloud. Still with [Catalyst](http://dev.catalystframework.org/wiki/), but I've replaced prototype with [jquery](http://jquery.com) this time. I've end up with less code than the previous version. For the moment, only google is available, but I will add overture, and may be more engines. + +There is still some bug to fix, some tests to add, and I will be able to restore the [intentioncloud.net](http://intentioncloud.net) domain. + +It's really easy to plug a database to a catalyst application using [Catalyst::Model::DBIC::Schema](http://p3rl.org/Catalyst::Model::DBIC::Schema). Via the helper, you can tell the model to use [DBIx::Class::Schema::Loader](http://p3rl.org/DBIx:/Class::Schema::Loader), so the table informations will be loaded from the database at runtime. You end up with a code that looks like + +{% highlight perl %} +package intentioncloud::Model::DB; +use strict; +use base 'Catalyst::Model::DBIC::Schema'; +__PACKAGE__->config(schema_class => 'intentioncloud::Schema',); +1; +{% endhighlight %} + +and the schema: + +{% highlight perl %} +package intentioncloud::Schema; +use strict; +use base qw/DBIx::Class::Schema::Loader/; +__PACKAGE__->loader_options(relationships => 1); +1; +{% endhighlight %} + +Now, to do a query: + +{% highlight perl %} +my $rs = $c->model('DB::TableName')->find(1); +{% endhighlight %} + +and your done ! + +The code for the intentioncloud is avaible on [GitHub](http://github.com/franckcuny/intentioncloud/tree/master). + + diff --git a/_posts/2009-04-05-the-intentioncloud-strike-back.textile b/_posts/2009-04-05-the-intentioncloud-strike-back.textile deleted file mode 100644 index 83a114e..0000000 --- a/_posts/2009-04-05-the-intentioncloud-strike-back.textile +++ /dev/null @@ -1,41 +0,0 @@ ---- -layout: post -title: the intentioncloud strike back -category: perl ---- - -I've decided to rewrite the intention cloud. Still with "Catalyst":http://dev.catalystframework.org/wiki/, but I've replaced prototype with "jquery":http://jquery.com this time. I've end up with less code than the previous version. For the moment, only google is available, but I will add overture, and may be more engines. - -There is still some bug to fix, some tests to add, and I will be able to restore the "intentioncloud.net":http://intentioncloud.net domain. - -It's really easy to plug a database to a catalyst application using "Catalyst::Model::DBIC::Schema":http://p3rl.org/Catalyst::Model::DBIC::Schema. Via the helper, you can tell the model to use "DBIx::Class::Schema::Loader":http://p3rl.org/DBIx:/Class::Schema::Loader, so the table informations will be loaded from the database at runtime. You end up with a code that looks like - -{% highlight perl %} -package intentioncloud::Model::DB; -use strict; -use base 'Catalyst::Model::DBIC::Schema'; -__PACKAGE__->config(schema_class => 'intentioncloud::Schema',); -1; -{% endhighlight %} - -and the schema: - -{% highlight perl %} -package intentioncloud::Schema; -use strict; -use base qw/DBIx::Class::Schema::Loader/; -__PACKAGE__->loader_options(relationships => 1); -1; -{% endhighlight %} - -Now, to do a query: - -{% highlight perl %} -my $rs = $c->model('DB::TableName')->find(1); -{% endhighlight %} - -and your done ! - -The code for the intentioncloud is avaible on "github":http://github.com/franckcuny/intentioncloud/tree/master. - - diff --git a/_posts/2009-04-14-git-and-prove.textile b/_posts/2009-04-14-git-and-prove.md index fca6329..60c8740 100644 --- a/_posts/2009-04-14-git-and-prove.textile +++ b/_posts/2009-04-14-git-and-prove.md @@ -1,14 +1,14 @@ --- layout: post -category: app -title: git and prove +summary: In which I add a hook to git to run my tests. +title: Git and prove --- A little trick to force you to run your tests before a commit: -in a repositorie, create the following file *.git/hooks/pre-commit* with this content: +in a repositorie, create the following file **.git/hooks/pre-commit** with this content: -{% highlight bash %} +{% highlight sh %} #!/bin/sh if [ -d t ]; then res=`prove t` diff --git a/_posts/2009-04-25-controll-xmms2-from-vim.textile b/_posts/2009-04-25-controll-xmms2-from-vim.md index 6b713c3..848319d 100644 --- a/_posts/2009-04-25-controll-xmms2-from-vim.textile +++ b/_posts/2009-04-25-controll-xmms2-from-vim.md @@ -1,6 +1,6 @@ --- layout: post -category: app +summary: In which I control xmms2 from vim. title: controll xmms2 from vim --- @@ -14,5 +14,5 @@ map <leader>xp <Esc>:!xmms2 play<CR><CR> map <leader>xs <Esc>:!xmms2 stop<CR><CR> {% endhighlight %} -now, type *,xn* in vim, and xmms2 will start to play the next track from your playlist. +now, type `,xn` in vim, and xmms2 will start to play the next track from your playlist. diff --git a/_posts/2009-04-27-a-simple-feed-aggregator-with-modern-perl-part-1.md b/_posts/2009-04-27-a-simple-feed-aggregator-with-modern-perl-part-1.md new file mode 100644 index 0000000..4acb0e7 --- /dev/null +++ b/_posts/2009-04-27-a-simple-feed-aggregator-with-modern-perl-part-1.md @@ -0,0 +1,143 @@ +--- +title: A simple feed aggregator with modern Perl - part 1 +summary: In which I write a feed aggregator in Perl. +layout: post +--- + +Following [Matt's post](http://www.shadowcat.co.uk/blog/matt-s-trout/iron-man/) about people not blogging enough about Perl, I've decided to try to post once a week about Perl. So I will start by a series of articles about what we call **modern Perl**. For this, I will write a simple feed agregator (using [Moose](http://search.cpan.org/~drolsky/Moose-0.75/lib/Moose.pm), [DBIx::Class](http://search.cpan.org/perldoc?DBIx::Class), [KiokuDB](http://search.cpan.org/perldoc?KiokuDB), some tests, and a basic frontend (with [Catalyst](http://search.cpan.org/perldoc?Catalyst)). This article will be split in four parts: + + * the first one will explain how to create a schema using **DBIx::Class** + * the second will be about the aggregator. I will use **Moose*** and **KiokuDB** + * the third one will be about writing tests with **Test::Class** + * the last one will focus on **Catalyst** + +The code of these modules will be available on my github account at the same time each article is published. + +> I'm not showing you how to write the perfect feed aggregator. The purpose of this series of articles is only to show you how to write a simple aggregator using modern Perl. + +### The database schema + +We will use a database to store a list of feeds and feed entries. As I don't like, no, wait, I *hate* SQL, I will use an ORM for accessing the database. For this, my choice is **DBIx::Class**, the best ORM available in Perl. + +> If you never have used an ORM before, ORM stands for Object Relational Mapping. It's a SQL to OO mapper that creates an abstract encapsulation of your databases operations. **DBIx::Class**' purpose is to represent "queries in your code as perl-ish as possible. + +For a basic aggregator we need: + + * a table for the list of feeds + * a table for the entries + +We will create these two tables using *DBIx::Class*. For this, we first create a Schema module. I use *Module::Setup*, but you can use **Module::Starter** or whatever you want. + +{% highlight bash %} +% module-setup MyModel +% cd MyModel +% vim lib/MyModel.pm +{% endhighlight %} + +{% highlight perl %} +package MyModel; +use base qw/DBIx::Class::Schema/; +__PACKAGE__->load_classes(); +1; +{% endhighlight %} + +So, we have just created a schema class. The **load_classes** method loads all the classes that reside under the **MyModel** namespace. We now create the result class **MyModel::Feed** in **lib/MyModel/Feed.pm**: + +{% highlight perl %} +package MyModel::Feed; +use base qw/DBIx::Class/; +__PACKAGE__->load_components(qw/Core/); +__PACKAGE__->table('feed'); +__PACKAGE__->add_columns(qw/ feedid url /); +__PACKAGE__->set_primary_key('feedid'); +__PACKAGE__->has_many(entries => 'MyModel::Entry', 'feedid'); +1; +{% endhighlight %} + +Pretty self explanatory: we declare a result class that uses the table feed, with two columns: **feedid** and **url**, **feedid** being the primary key. The **has_many** method declares a one-to-many relationship. + +Now the result class **MyModel::Entry** in **lib/MyModel/Entry.pm**: + +{% highlight perl %} +package MyModel::Entry; +use base qw/DBIx::Class/; +__PACKAGE__->load_components(qw/Core/); +__PACKAGE__->table('entry'); +__PACKAGE__->add_columns(qw/ entryid permalink feedid/); +__PACKAGE__->set_primary_key('entryid'); +__PACKAGE__->belongs_to(feed => 'MyModel::Feed', 'feedid'); +1; +{% endhighlight %} + +Here we declare **feed** as a foreign key, using the column name **feedid**. + +You can do a more complex declaration of your schema. Let's say you want to declare the type of your fields, you can do this: + +{% highlight perl %} +__PACKAGE__->add_columns( + 'permalink' => { + 'data_type' => 'TEXT', + 'is_auto_increment' => 0, + 'default_value' => undef, + 'is_foreign_key' => 0, + 'name' => 'url', + 'is_nullable' => 1, + 'size' => '65535' + }, +); +{% endhighlight %} + +**DBIx::Class** also provides hooks for the deploy command. If you are using MySQL, you may need a InnoDB table. In your class, you can add this: + +{% highlight perl %} +sub sqlt_deploy_hook { + my ($self, $sqlt_table) = @_; + $sqlt_table->extra( + mysql_table_type => 'InnoDB', + mysql_charset => 'utf8' + ); +} +{% endhighlight %} + +Next time you call deploy on this table, the hook will be sent to **SQL::Translator::Schema**, and force the type of your table to InnoDB, and the charset to utf8. + +Now that we have a **DBIx::Class** schema, we need to deploy it. For this, I always do the same thing: create a **bin/deploy_mymodel.pl** script with the following code: + +{% highlight perl %} +use strict; +use feature 'say'; +use Getopt::Long; +use lib('lib'); +use MyModel; + +GetOptions( + 'dsn=s' => \my $dsn, + 'user=s' => \my $user, + 'passwd=s' => \my $passwd +) or die usage(); + +my $schema = MyModel->connect($dsn, $user, $passwd); +say 'deploying schema ...'; +$schema->deploy; + +say 'done'; + +sub usage { + say + 'usage: deploy_mymodel.pl --dsn $dsn --user $user --passwd $passwd'; +} +{% endhighlight %} + +This script will deploy for you the schema (you need to create the database first if using with mysql). + +Executing the following command: + +{% highlight bash %} +% perl bin/deploy_mymodel.pl --dsn dbi:SQLite:model.db +{% endhighlight %} + +generate a **model.db** database so we can work and test it. Now that we got our (really) simple **MyModel** schema, we can start to hack on our aggregator. + +[link to the code](http://github.com/franckcuny/ironman-mymodel/tree/master) + +> while using **DBIx::Class**, you may want to take a look at the generated queries. For this, export `DBIC_TRACE=1` in your environment, and the queries will be printed on STDERR. diff --git a/_posts/2009-04-27-a-simple-feed-aggregator-with-modern-perl-part-1.textile b/_posts/2009-04-27-a-simple-feed-aggregator-with-modern-perl-part-1.textile deleted file mode 100644 index 01febfa..0000000 --- a/_posts/2009-04-27-a-simple-feed-aggregator-with-modern-perl-part-1.textile +++ /dev/null @@ -1,153 +0,0 @@ ---- -title: A simple feed aggregator with modern Perl - part 1 -category: perl -layout: post ---- - -Following "matt's post":http://www.shadowcat.co.uk/blog/matt-s-trout/iron-man/ about people not blogging enough about Perl, I've decided to try to post once a week about Perl. So I will start by a series of articles about what we call *modern Perl*. For this, I will write a simple feed agregator (using "Moose":http://search.cpan.org/~drolsky/Moose-0.75/lib/Moose.pm, "DBIx::Class":http://search.cpan.org/perldoc?DBIx::Class, "KiokuDB":http://search.cpan.org/perldoc?KiokuDB, some tests, and a basic frontend (with "Catalyst":http://search.cpan.org/perldoc?Catalyst). This article will be split in four parts: - - * the first one will explain how to create a schema using *DBIx::Class* - * the second will be about the aggregator. I will use *Moose** and **KiokuDB* - * the third one will be about writing tests with *Test::Class* - * the last one will focus on *Catalyst* - -The code of these modules will be available on my github account at the -same time each article is published. - -bc. disclaimer: -I'm not showing you how to write the perfect feed aggregator. The purpose of -this series of articles is only to show you how to write a simple aggregator -using modern Perl. - - -h3. The database schema - -We will use a database to store a list of feeds and feed entries. As I don't like, no, wait, I *hate* SQL, I will use an ORM for accessing the database. For this, my choice is *DBIx::Class*, the best ORM available in Perl. - -bc. If you never have used an ORM before, ORM stands for Object Relational -Mapping. It's a SQL to OO mapper that creates an abstract encapsulation of -your databases operations. *DBIx::Class*' purpose is to represent "queries in -your code as perl-ish as possible. - -For a basic aggregator we need: - - * a table for the list of feeds - * a table for the entries - -We will create these two tables using *DBIx::Class*. For this, we first create a Schema module. I use *Module::Setup*, but you can use *Module::Starter* or whatever you want. - -{% highlight bash %} -module-setup MyModel -cd MyModel -vim lib/MyModel.pm -{% endhighlight %} - -{% highlight perl %} -package MyModel; -use base qw/DBIx::Class::Schema/; -__PACKAGE__->load_classes(); -1; -{% endhighlight %} - -So, we have just created a schema class. The *load_classes* method loads all the classes that reside under the *MyModel* namespace. We now create the result class *MyModel::Feed* in *lib/MyModel/Feed.pm*: - -{% highlight perl %} -package MyModel::Feed; -use base qw/DBIx::Class/; -__PACKAGE__->load_components(qw/Core/); -__PACKAGE__->table('feed'); -__PACKAGE__->add_columns(qw/ feedid url /); -__PACKAGE__->set_primary_key('feedid'); -__PACKAGE__->has_many(entries => 'MyModel::Entry', 'feedid'); -1; -{% endhighlight %} - -Pretty self explanatory: we declare a result class that uses the table feed, with two columns: *feedid* and *url*, *feedid* being the primary key. The *has_many* method declares a one-to-many relationship. - -Now the result class *MyModel::Entry* in *lib/MyModel/Entry.pm*: - -{% highlight perl %} -package MyModel::Entry; -use base qw/DBIx::Class/; -__PACKAGE__->load_components(qw/Core/); -__PACKAGE__->table('entry'); -__PACKAGE__->add_columns(qw/ entryid permalink feedid/); -__PACKAGE__->set_primary_key('entryid'); -__PACKAGE__->belongs_to(feed => 'MyModel::Feed', 'feedid'); -1; -{% endhighlight %} - -Here we declare *feed* as a foreign key, using the column name *feedid*. - -You can do a more complex declaration of your schema. Let's say you want to declare the type of your fields, you can do this: - -{% highlight perl %} -__PACKAGE__->add_columns( - 'permalink' => { - 'data_type' => 'TEXT', - 'is_auto_increment' => 0, - 'default_value' => undef, - 'is_foreign_key' => 0, - 'name' => 'url', - 'is_nullable' => 1, - 'size' => '65535' - }, -); -{% endhighlight %} - -*DBIx::Class* also provides hooks for the deploy command. If you are using MySQL, you may need a InnoDB table. In your class, you can add this: - -{% highlight perl %} -sub sqlt_deploy_hook { - my ($self, $sqlt_table) = @_; - $sqlt_table->extra( - mysql_table_type => 'InnoDB', - mysql_charset => 'utf8' - ); -} -{% endhighlight %} - -next time you call deploy on this table, the hook will be sent to *SQL::Translator::Schema*, and force the type of your table to InnoDB, and the charset to utf8. - -Now that we have a *DBIx::Class* schema, we need to deploy it. For this, I always do the same thing: create a *bin/deploy_mymodel.pl* script with the following code: - -{% highlight perl %} -use strict; -use feature 'say'; -use Getopt::Long; -use lib('lib'); -use MyModel; - -GetOptions( - 'dsn=s' => \my $dsn, - 'user=s' => \my $user, - 'passwd=s' => \my $passwd -) or die usage(); - -my $schema = MyModel->connect($dsn, $user, $passwd); -say 'deploying schema ...'; -$schema->deploy; - -say 'done'; - -sub usage { - say - 'usage: deploy_mymodel.pl --dsn $dsn --user $user --passwd $passwd'; -} -{% endhighlight %} - -This script will deploy for you the schema (you need to create the database first if using with mysql). - -Executing the following command: - -{% highlight bash %} -perl bin/deploy_mymodel.pl --dsn dbi:SQLite:model.db -{% endhighlight %} - -generate a *model.db* database so we can work and test it. Now that we got our (really) simple *MyModel* schema, we can start to hack on our aggregator. - -"link to the code":http://github.com/franckcuny/ironman-mymodel/tree/master - -bc. while using *DBIx::Class*, you may want to take a look at the generated -queries. For this, export *DBIC_TRACE=1* in your environment, and -the queries will be printed on STDERR. diff --git a/_posts/2009-04-28-a-simple-feed-aggregator-with-modern-perl-part-2.textile b/_posts/2009-04-28-a-simple-feed-aggregator-with-modern-perl-part-2.md index 258b0d8..c90d31d 100644 --- a/_posts/2009-04-28-a-simple-feed-aggregator-with-modern-perl-part-2.textile +++ b/_posts/2009-04-28-a-simple-feed-aggregator-with-modern-perl-part-2.md @@ -1,33 +1,31 @@ --- layout: post title: A simple feed aggregator with modern Perl - part 2 -category: perl +summary: In which we continue to write a feed aggregator in Perl. --- -bc. I've choose to write about a feed aggregator because it's one of the -things I'm working on at "RTGI":http://rtgi.eu/ (with web crawler stuffs, -gluing datas with search engine, etc) +> I've choose to write about a feed aggregator because it's one of the things I'm working on at [RTGI](http://rtgi.eu/) (with web crawler stuffs, gluing datas with search engine, etc) -For the feed aggregator, I will use *Moose*, *KiokuDB* and our *DBIx::Class* schema. Before we get started, I'd would like to give a short introduction to Moose and KiokuDB. +For the feed aggregator, I will use **Moose**, **KiokuDB** and our **DBIx::Class** schema. Before we get started, I'd would like to give a short introduction to Moose and KiokuDB. -bq. *Moose*: Moose is a "A postmodern object system for Perl 5". Moose brings to OO Perl some really nice concepts like roles, a better syntax, "free" constructor and destructor, ... If you don't already know Moose, check "http://www.iinteractive.com/moose/":http://www.iinteractive.com/moose/ for more informations. +Moose is a "A postmodern object system for Perl 5". Moose brings to OO Perl some really nice concepts like roles, a better syntax, "free" constructor and destructor, ... If you don't already know Moose, check "http://www.iinteractive.com/moose/":http://www.iinteractive.com/moose/ for more informations. -bq. *KiokuDB*: KiokuDB is a Moose based frontend to various data stores [...] Its purpose is to provide persistence for "regular" objects with as little effort as possible, without sacrificing control over how persistence is actually done, especially for harder to serialize objects. [...] KiokuDB is meant to solve two related persistence problems: +KiokuDB is a Moose based frontend to various data stores [...] Its purpose is to provide persistence for "regular" objects with as little effort as possible, without sacrificing control over how persistence is actually done, especially for harder to serialize objects. [...] KiokuDB is meant to solve two related persistence problems: - * Store arbitrary objects without changing their class definitions or worrying about schema details, and without needing to conform to the limitations of a relational model. - * Persisting arbitrary objects in a way that is compatible with existing data/code (for example interoperating with another app using *CouchDB* with *JSPON* semantics). +* Store arbitrary objects without changing their class definitions or worrying about schema details, and without needing to conform to the limitations of a relational model. +* Persisting arbitrary objects in a way that is compatible with existing data/code (for example interoperating with another app using **CouchDB** with **JSPON** semantics). I will store each feed entry in KiokuDB. I could have chosen to store them as plain text in JSON files, in my DBIx::Class model, etc. But as I want to show you new and modern stuff, I will store them in Kioku using the DBD's backend. -h3. And now for something completely different, code! +### And now for something completely different, code! -First, we will create a base module named *MyAggregator*. +First, we will create a base module named **MyAggregator**. {% highlight bash %} -module-setup MyAggregator +% module-setup MyAggregator {% endhighlight %} -We will now edit *lib/MyAggregator.pm* and write the following code: +We will now edit **lib/MyAggregator.pm** and write the following code: {% highlight perl %} package MyAggregator; @@ -35,16 +33,13 @@ use Moose; 1; {% endhighlight %} -As you can see, there is no *use strict; use warnings* here: Moose automatically turns on these pragmas. We don't have to write the new method either, as it's provided by Moose. +As you can see, there is no `use strict; use warnings` here: Moose automatically turns on these pragmas. We don't have to write the new method either, as it's provided by Moose. -For parsing feeds, we will use *XML::Feed*, and we will use it in a Role. If you don't know what roles are: +For parsing feeds, we will use **XML::Feed**, and we will use it in a Role. If you don't know what roles are: -bq. Roles have two primary purposes: as interfaces, and as a means of code -reuse. Usually, a role encapsulates some piece of behavior or state that can -be shared between classes. It is important to understand that roles are not -classes. You cannot inherit from a role, and a role cannot be instantiated. +> Roles have two primary purposes: as interfaces, and as a means of code reuse. Usually, a role encapsulates some piece of behavior or state that can be shared between classes. It is important to understand that roles are not classes. You cannot inherit from a role, and a role cannot be instantiated. -So, we will write our first role, *lib/MyAggregator/Roles/Feed.pm*: +So, we will write our first role, **lib/MyAggregator/Roles/Feed.pm**: {% highlight perl %} package MyAggregator::Roles::Feed; @@ -66,7 +61,7 @@ sub feed_parser { This one is pretty simple. It will read a content, try to parse it, and return a XML::Feed object. If it can't parse the feed, the error will be shown, and the result will be set to undef. -Now, a second role will be used to fetch the feed, and do basic caching, *lib/MyAggregator/Roles/UserAgent.pm*: +Now, a second role will be used to fetch the feed, and do basic caching, **lib/MyAggregator/Roles/UserAgent.pm**: {% highlight perl %} package MyAggregator::Roles::UserAgent; @@ -111,15 +106,13 @@ sub fetch_feed { 1; {% endhighlight %} -This role has 2 attributes: *ua* and *cache*. The *ua* attribute is our UserAgent. 'lazy' means that it will not be constructed until I call +This role has 2 attributes: **ua** and **cache**. The **ua** attribute is our UserAgent. 'lazy' means that it will not be constructed until I call `$self->ua->request`. -bc. $self->ua->request +I use **Cache::FileCache** for doing basic caching so I don't fetch or parse the feed if it's unnecessary, and I use the Etag and Last-Modified header to check the validity of my cache. -I use *Cache::FileCache* for doing basic caching so I don't fetch or parse the feed if it's unnecessary, and I use the Etag and Last-Modified header to check the validity of my cache. +The only method of this role is **fetch_feed**. It will fetch an URL if it's not already in the cache, and return a **HTTP::Response** object. -The only method of this role is *fetch_feed*. It will fetch an URL if it's not already in the cache, and return a *HTTP::Response* object. - -Now, I create an Entry class in *lib/MyAggregator/Entry.pm*: +Now, I create an Entry class in **lib/MyAggregator/Entry.pm**: {% highlight perl %} package MyAggregator::Entry; @@ -142,9 +135,9 @@ has 'permalink' => ( 1; {% endhighlight %} -Here the *permalink* has a trigger attribute: each entry has a unique *ID*, constructed with a sha256 value from the *permalink*. So, when we fill the *permalink* accessor, the *ID* is automatically set. +Here the **permalink** has a trigger attribute: each entry has a unique **ID**, constructed with a sha256 value from the **permalink**. So, when we fill the **permalink** accessor, the **ID** is automatically set. -We can now change our *MyAggregator* module like this: +We can now change our **MyAggregator** module like this: {% highlight perl %} package MyAggregator; @@ -228,15 +221,14 @@ sub dedupe_feed { {% endhighlight %} - * the with function composes roles into a class. So my MyAggregator class has a fetch_feed and parse_feed methods, and all the attributes of our roles - * context is a HashRef that contains the configuration - * schema is our MyModel schema +* the with function composes roles into a class. So my MyAggregator class has a fetch\_feed and parse\_feed methods, and all the attributes of our roles +* context is a HashRef that contains the configuration +* schema is our MyModel schema * kioku is a connection to our kiokudb backend +Two methods in this object: `run` and `dedupe`. -Two methods in this object: *run* and *dedupe*. - -The *run* method gets the list of feeds (line 28, via the *search*). For each feed return by the search, we try to fetch it, and if it's successful, we dedupe the entries. To dedupe the entries, we check if the permalink is alread in the database (line 45, via the *find*). If we already have this entry, we skip this one, and do the next one. If it's a new entry, we create a *MyAggregator::Entry* object, with the content, date, title, ... we store this object in kiokudb (line 55, we create a transaction, and do our insertion in the transaction), and create a new entry in the MyModel database (line 61, we enter in transaction too, and insert the entry in the database). +The `run` method gets the list of feeds (line 28, via the `search`). For each feed return by the search, we try to fetch it, and if it's successful, we dedupe the entries. To dedupe the entries, we check if the permalink is alread in the database (line 45, via the `find`). If we already have this entry, we skip this one, and do the next one. If it's a new entry, we create a **MyAggregator::Entry** object, with the content, date, title, ... we store this object in kiokudb (line 55, we create a transaction, and do our insertion in the transaction), and create a new entry in the MyModel database (line 61, we enter in transaction too, and insert the entry in the database). And to run this, a little script: @@ -248,16 +240,12 @@ my $agg = MyAggregator->new(context => LoadFile shift); $agg->run; {% endhighlight %} -so we can run our aggregator like this: - -{% highlight bash %} -perl bin/aggregator.pl conf.yaml -{% endhighlight %} +so we can run our aggregator like this `perl bin/aggregator.pl conf.yaml` And it's done :) We got a really basic aggregator now. If you want to improve this one, you would like to improve the dedupe process, using the permalink, the date and/or the title, as this one is too much basic. In the next article we will write some tests for this aggregator using Test::Class. -big thanks to "tea":http://bunniesincyberspace.wordpress.com/ and "blob":http://code.google.com/p/tinyaml/ for reviewing and fixing my broken english in the first 2 parts. +big thanks to [tea](http://bunniesincyberspace.wordpress.com/) and [blob](http://code.google.com/p/tinyaml/) for reviewing and fixing my broken english in the first 2 parts. -"the code is available on github":http://github.com/franckcuny/ironman-myaggregator/tree/master +[The code is available on GitHub](http://github.com/franckcuny/ironman-myaggregator/tree/master) Part 3 and 4 next week. diff --git a/_posts/2009-05-04-rtgi-and-perl-conferences.textile b/_posts/2009-05-04-rtgi-and-perl-conferences.md index 4903838..0f3d754 100644 --- a/_posts/2009-05-04-rtgi-and-perl-conferences.textile +++ b/_posts/2009-05-04-rtgi-and-perl-conferences.md @@ -1,6 +1,6 @@ --- layout: post -category: conference +summary: In which I go to a few conferences. title: RTGI and Perl conferences --- diff --git a/_posts/2009-05-06-a-simple-feed-aggregator-with-modern-perl-part-3.textile b/_posts/2009-05-06-a-simple-feed-aggregator-with-modern-perl-part-3.md index 3da3485..5f66675 100644 --- a/_posts/2009-05-06-a-simple-feed-aggregator-with-modern-perl-part-3.textile +++ b/_posts/2009-05-06-a-simple-feed-aggregator-with-modern-perl-part-3.md @@ -1,17 +1,17 @@ --- layout: post title: A simple feed aggregator with modern Perl - part 3 -category: perl +summary: In which we continue to write our feed aggregator. --- -Now that we have our aggregator, we have to write our tests. For this I will use Test::Class. Ovid have wrote an "excellent":http://www.modernperlbooks.com/mt/2009/03/organizing-test-suites-with-testclass.html "serie":http://www.modernperlbooks.com/mt/2009/03/reusing-test-code-with-testclass.html "of":http://www.modernperlbooks.com/mt/2009/03/making-your-testing-life-easier.html "articles":http://www.modernperlbooks.com/mt/2009/03/using-test-control-methods-with-testclass.html "about Test::Class":http://www.modernperlbooks.com/mt/2009/03/working-with-testclass-test-suites.html. You should really read this, because I will not enter in details. +Now that we have our aggregator, we have to write our tests. For this I will use Test::Class. Ovid have wrote an [excellent](http://www.modernperlbooks.com/mt/2009/03/organizing-test-suites-with-testclass.html) [serie](http://www.modernperlbooks.com/mt/2009/03/reusing-test-code-with-testclass.html) [of](http://www.modernperlbooks.com/mt/2009/03/making-your-testing-life-easier.html) [articles](http://www.modernperlbooks.com/mt/2009/03/using-test-control-methods-with-testclass.html) [about Test::Class](http://www.modernperlbooks.com/mt/2009/03/working-with-testclass-test-suites.html). You should really read this, because I will not enter in details. We have two things to test: * roles * aggregator -h3. Roles +### Roles For this, we create the following files: @@ -20,7 +20,7 @@ For this, we create the following files: * t/tests/Test/MyAggregator.pm * t/run.t -We will write our *run.t*: +We will write our **run.t**: {% highlight perl %} use lib 't/test'; @@ -99,21 +99,19 @@ Now we have to tests our 2 methods from the roles. We will test the fetch_feed m First, we indicate the number of tests that will be executed (6 in our case). Then we can write the test in the method: - * create an object - * fetch an url, and test the HTTP code of the response - * check if the content look like something we want - * now the data should be in cache, and the a new fetch of the url should return a 304 HTTP code +* create an object +* fetch an url, and test the HTTP code of the response +* check if the content look like something we want +* now the data should be in cache, and the a new fetch of the url should return a 304 HTTP code The second method to test is feed_parser. This method will do 3 tests. - * create an object - * we manually fetch the content from a feed - * send this content to feed_parser - * the result should return a XML::Feed::Format::RSS object +* create an object +* we manually fetch the content from a feed +* send this content to feed_parser +* the result should return a XML::Feed::Format::RSS object -When you run the tests now - -bc. prove t/run.t +When you run the tests now `prove t/run.t` the following result is produced: @@ -143,16 +141,15 @@ Files=1, Tests=11, 3 wallclock secs ( 0.03 usr 0.01 sys + 0.66 cusr 0.09 csy Result: PAS {% endhighlight %} -h3. Aggregator +### Aggregator -As we have our tests for the roles, we can write the tests for the -aggregator now. First, we add a new line in *t/run.t* +As we have our tests for the roles, we can write the tests for the aggregator now. First, we add a new line in **t/run.t**. {% highlight perl %} use Test::MyAggregator {% endhighlight %} -We edit our *t/tests/Test/MyAggregator.pm*: +We edit our **t/tests/Test/MyAggregator.pm**: {% highlight perl %} package Test::MyAggregator; @@ -259,4 +256,4 @@ Files=1, Tests=9, 3 wallclock secs ( 0.01 usr 0.01 sys + 1.39 cusr 0.12 csys Result: PASS {% endhighlight %} -We have our tests, so next step is the Catalyst frontend. As for the precedents parts, "the code is available on github":http://github.com/franckcuny/ironman-myaggregator/tree/master +We have our tests, so next step is the Catalyst frontend. As for the precedents parts, [the code is available on GitHub](http://github.com/franckcuny/ironman-myaggregator/tree/master) diff --git a/_posts/2009-05-13-a-simple-feed-aggregator-with-modern-perl-part-4.textile b/_posts/2009-05-13-a-simple-feed-aggregator-with-modern-perl-part-4.md index ca7849e..aace6ea 100644 --- a/_posts/2009-05-13-a-simple-feed-aggregator-with-modern-perl-part-4.textile +++ b/_posts/2009-05-13-a-simple-feed-aggregator-with-modern-perl-part-4.md @@ -1,33 +1,33 @@ --- layout: post title: A simple feed aggregator with modern Perl - part 4 -category: perl +summary: In which we reach the conclusion on how to write a feed aggregator. --- -We have the model, the aggregator (and some tests), now we can do a basic frontend to read our feed. For this I will create a webapp using "catalyst":http://www.catalystframework.org. +We have the model, the aggregator (and some tests), now we can do a basic frontend to read our feed. For this I will create a webapp using [Catalyst](http://www.catalystframework.org). -"Catalyst::Devel":http://search.cpan.org/perldoc?Catalyst::Devel is required for developping catalyst application, so we will install it first: +[Catalyst::Devel](http://search.cpan.org/perldoc?Catalyst::Devel) is required for developping catalyst application, so we will install it first: {% highlight perl %} -cpan Catalyst::Devel +% cpan Catalyst::Devel {% endhighlight %} Now we can create our catalyst application using the helper: {% highlight perl %} -catalyst.pl MyFeedReader +% catalyst.pl MyFeedReader {% endhighlight %} -This command initialise the framework for our application *MyFeedReader*. A number of files are created, like the structure of the MVC directory, some tests, helpers, ... +This command initialise the framework for our application **MyFeedReader**. A number of files are created, like the structure of the MVC directory, some tests, helpers, ... -We start by creating a view, using "TTSite":http://search.cpan.org/perldoc?Catalyst::View::TT. TTSite generate some templates for us, and the configuration for this template. We will also have a basic CSS, a header, footer, etc. +We start by creating a view, using [TTSite](http://search.cpan.org/perldoc?Catalyst::View::TT). TTSite generate some templates for us, and the configuration for this template. We will also have a basic CSS, a header, footer, etc. {% highlight bash %} cd MyFeedReader perl script/myfeedreader_create.pl view TT TTSite {% endhighlight %} -TTSite files are under *root/src* and *root/lib*. A *MyAggregator/View/TT.pm* file is also created. We edit it to make it look like this: +TTSite files are under **root/src** and **root/lib**. A **MyAggregator/View/TT.pm** file is also created. We edit it to make it look like this: {% highlight perl %} __PACKAGE__->config({ @@ -43,15 +43,17 @@ __PACKAGE__->config({ }); {% endhighlight %} -Now we create our first template, in *root/src/index.tt2* +Now we create our first template, in **root/src/index.tt2** -bq. to <a href="/feed/">your feeds</a> +{% highlight html %} +to <a href="/feed/">your feeds</a> +{% endhighlight %} -If you start the application (using _perl script/myfeedreader_server.pl_) and point your browser on http://localhost:3000/, this template will be rendered. +If you start the application (using `perl script/myfeedreader_server.pl`) and point your browser on http://localhost:3000/, this template will be rendered. We need two models, one for KiokuDB and another one for MyModel: -*lib/MyFeedReader/Model/KiokuDB.pm* +**lib/MyFeedReader/Model/KiokuDB.pm** {% highlight perl %} package MyFeedReader::Model::KiokuDB; @@ -60,15 +62,15 @@ BEGIN { extends qw(Catalyst::Model::KiokuDB) } 1; {% endhighlight %} -we edit the configuration file (*myfeedreader.conf*), and set the dsn for our kiokudb backend +we edit the configuration file (**myfeedreader.conf**), and set the dsn for our kiokudb backend -{% highlight perl %} +{% highlight xml %} <Model KiokuDB> dsn dbi:SQLite:../MyAggregator/foo.db </Model> {% endhighlight %} -*lib/MyFeedReader/Model/MyModel.pm* +**lib/MyFeedReader/Model/MyModel.pm** {% highlight perl %} package MyFeedReader::Model::MyModel; @@ -78,7 +80,7 @@ use base qw/Catalyst::Model::DBIC::Schema/; and the configuration: -{% highlight perl %} +{% highlight xml %} <Model MyModel> connect_info dbi:SQLite:../MyModel/model.db schema_class MyModel @@ -87,7 +89,7 @@ and the configuration: We got our view and our model, we can do the code for the controller. We need 2 controller, one for the feed, and one for the entries. The Feed controller will list them and display entries titles for a given feed. The Entry controller will just display them. -*lib/MyFeedReader/Controller/Feed.pm* +**lib/MyFeedReader/Controller/Feed.pm** {% highlight perl %} package MyFeedReader::Controller::Feed; @@ -112,13 +114,13 @@ sub view : Chained('/') : PathPart('feed/view') : Args(1) { 1; {% endhighlight %} -The function *index* list the feeds, while the function *view* list the entries for a give feed. We use the chained action mechanism to dispatch this url, so we can have urls like this _/feed/*_ +The function `index` list the feeds, while the function `view` list the entries for a give feed. We use the chained action mechanism to dispatch this url, so we can have urls like this **/feed/\*** We create our 2 templates (for index and view): -*root/src/feed/index.tt2* +**root/src/feed/index.tt2** -{% highlight perl %} +{% highlight html %} <ul> [% FOREACH feed IN feeds %] <li><a href="/feed/view/[% feed.id %]">[% feed.url %]</a></li> @@ -126,9 +128,9 @@ We create our 2 templates (for index and view): </ul> {% endhighlight %} -*root/src/feed/vew.tt2* +**root/src/feed/vew.tt2** -{% highlight perl %} +{% highlight html %} <h1>[% feed.url %]</h1> <h3>entries</h3> @@ -166,11 +168,11 @@ sub view : Chained('/') : PathPart('entry') : Args(1) { 1; {% endhighlight %} -The function *view* fetch an entry from the kiokudb backend, and store it in the stash, so we can use it in our template. +The function **view** fetch an entry from the kiokudb backend, and store it in the stash, so we can use it in our template. -*root/src/entry/view.tt2* +**root/src/entry/view.tt2** -{% highlight perl %} +{% highlight html %} <h1><a href="[% entry.permalink %]">[% entry.title %]</a></h1> <span>Posted [% entry.date %] by [% entry.author %]</span> <div id="content"> @@ -178,10 +180,10 @@ The function *view* fetch an entry from the kiokudb backend, and store it in the </div> {% endhighlight %} -If you point your browser to an entry (something like *http://localhost:3000/entry/somesha256value*), you will see an entry: +If you point your browser to an entry (something like **http://localhost:3000/entry/somesha256value**), you will see an entry: !/static/imgs/show_entry.png(show entry)! Et voila, we are done with a really basic feed reader. You can add methods to add or delete feed, mark an entry as read, ... -"The code is available on github":http://github.com/franckcuny/ironman-myfeedreader/tree/master +[The code is available on GitHub](http://github.com/franckcuny/ironman-myfeedreader/tree/master) diff --git a/_posts/2009-05-18-a-simple-feed-aggregator-with-modern-perl-part-4.1.textile b/_posts/2009-05-18-a-simple-feed-aggregator-with-modern-perl-part-4.1.md index 2577b4f..8f0e033 100644 --- a/_posts/2009-05-18-a-simple-feed-aggregator-with-modern-perl-part-4.1.textile +++ b/_posts/2009-05-18-a-simple-feed-aggregator-with-modern-perl-part-4.1.md @@ -1,12 +1,12 @@ --- layout: post title: A simple feed aggregator with modern Perl - part 4.1 -category: perl +summary: In which we had one more article on how to write a feed aggregator. --- -You can thanks "bobtfish":http://github.com/bobtfish for being such a pedantic guy, 'cause now you will have a better chained examples. He forked my repository from github and fix some code that I'll explain here. +You can thanks [bobtfish](http://github.com/bobtfish) for being such a pedantic guy, 'cause now you will have a better chained examples. He forked my repository from GitHub and fix some code that I'll explain here. -h3. lib/MyFeedReader.pm +### lib/MyFeedReader.pm {% highlight perl %} package MyFeedReader; @@ -23,14 +23,14 @@ h3. lib/MyFeedReader.pm +extends 'Catalyst'; {% endhighlight %} -You can see that he use "Moose":http://search.cpan.org/perldoc?Moose, so we can remove +You can see that he use [Moose](http://search.cpan.org/perldoc?Moose), so we can remove {% highlight perl %} use strict; use warnings; {% endhighlight %} -and have a more elegant way to inherit from "Catalyst":http://search.cpan.org/perldoc?Catalyst with +and have a more elegant way to inherit from [Catalyst](http://search.cpan.org/perldoc?Catalyst) with {% highlight perl %} extends 'Catalyst'; @@ -42,9 +42,9 @@ instead of use parent qw/Catalyst/; {% endhighlight %} -He also have updated the *Catalyst::Runtime* version, and added *namespace::autoclean*. The purpose of this module is to keep imported methods out of you namespace. Take a look at the "documentation":http://search.cpan.org/perldoc?namespace::autoclean, it's easy to understand how and why it's usefull. +He also have updated the **Catalyst::Runtime** version, and added **namespace::autoclean**. The purpose of this module is to keep imported methods out of you namespace. Take a look at the "documentation":http://search.cpan.org/perldoc?namespace::autoclean, it's easy to understand how and why it's usefull. -h3. lib/MyFeedReader/Controller/Root.pm +### lib/MyFeedReader/Controller/Root.pm {% highlight perl %} -use strict; @@ -71,9 +71,9 @@ h3. lib/MyFeedReader/Controller/Root.pm $c->response->status(404); {% endhighlight %} -A new method, *root*, that will be the root path for our application. All our methods will be chained from this action. If start you catalyst server and go to *http://localhost:3000/* you will be served with the Catalyst's welcome message as before. +A new method, `root`, that will be the root path for our application. All our methods will be chained from this action. If start you catalyst server and go to **http://localhost:3000/** you will be served with the Catalyst's welcome message as before. -h3. lib/MyFeedReader/Controller/Entry.pm +### lib/MyFeedReader/Controller/Entry.pm {% highlight perl %} -use warnings; @@ -97,9 +97,9 @@ h3. lib/MyFeedReader/Controller/Entry.pm +__PACKAGE__->meta->make_immutable; {% endhighlight %} -We extends the _Catalyst::Controller_ in a Moose way, and the _make_immutable_ instruction is a Moose recommanded best practice (you can alsa add _no Moose_ after the make_immutable). +We extends the **Catalyst::Controller** in a Moose way, and the `make_immutable` instruction is a Moose recommanded best practice (you can alsa add `no Moose` after the make_immutable). -h3. lib/MyFeedreader/Controller/Feed.pm +### lib/MyFeedreader/Controller/Feed.pm {% highlight perl %} +use Moose; @@ -133,25 +133,24 @@ h3. lib/MyFeedreader/Controller/Feed.pm +__PACKAGE__->meta->make_immutable; {% endhighlight %} -We got _feed_ which is chained to root. _index_ is chained to feed, and take no arguments. This method display the list of our feeds. And we got the _view_ method, chained to feed too, but with one argument, that display the content of an entry. +We got `feed` which is chained to root. `index` is chained to feed, and take no arguments. This method display the list of our feeds. And we got the `view` method, chained to feed too, but with one argument, that display the content of an entry. If you start the application, you will see the following routes: -{% highlight perl %} - .-------------------------------------+--------------------------------------. - | Path Spec | Private | - +-------------------------------------+--------------------------------------+ - | /root/entry/* | /root (0) | - | | => /entry/view | - | /root/feed | /root (0) | - | | -> /feed/feed (0) | - | | => /feed/index | - | /root/feed/view/* | /root (0) | - | | -> /feed/feed (0) | - | | => /feed/view | - | /root | /root (0) | - | | => /index | - '-------------------------------------+--------------------------------------' -{% endhighlight %} + +> .-------------------------------------+--------------------------------------. +> | Path Spec | Private | +> +-------------------------------------+--------------------------------------+ +> | /root/entry/* | /root (0) | +> | | => /entry/view | +> | /root/feed | /root (0) | +> | | -> /feed/feed (0) | +> | | => /feed/index | +> | /root/feed/view/* | /root (0) | +> | | -> /feed/feed (0) | +> | | => /feed/view | +> | /root | /root (0) | +> | | => /index | +> '-------------------------------------+--------------------------------------' I hope you got a better idea about chained action in catalyst now. And again, thanks to bobtfish for the code. diff --git a/_posts/2009-05-22-modules-i-like---module-setup.textile b/_posts/2009-05-22-modules-i-like---module-setup.md index 75428b3..7ecee8e 100644 --- a/_posts/2009-05-22-modules-i-like---module-setup.textile +++ b/_posts/2009-05-22-modules-i-like---module-setup.md @@ -1,12 +1,12 @@ --- layout: post -category: perl +summary: In which I share my enthusiasm for Module::Setup. title: modules I like Module::Setup --- -"Module::Setup":http://search.cpan.org/perldoc?Module::Setup by "Yappo":http://blog.yappo.jp/ is a really nice module. I don't like "Module::Starter":http://search.cpan.org/perldoc?Module::Starter, it's not easy to create template to make it do what you need. With Module::Setup you can create flavors for any type of modules you want. Most of the modules I create for work use Moose, and I like to use Test::Class too. I've created a Moose flavor for creating this kind of modules. +[Module::Setup](http://search.cpan.org/perldoc?Module::Setup) by [Yappo](http://blog.yappo.jp/) is a really nice module. I don't like [Module::Starter](http://search.cpan.org/perldoc?Module::Starter), it's not easy to create template to make it do what you need. With Module::Setup you can create flavors for any type of modules you want. Most of the modules I create for work use Moose, and I like to use Test::Class too. I've created a Moose flavor for creating this kind of modules. -h3. Creating a Moose flavor for Module::Setup +### Creating a Moose flavor for Module::Setup First, you tell it to init a new flavor: @@ -24,7 +24,7 @@ Start by editing *$HOME/.module-setup/flavors/moose/template/lib/____var-module_ + use Moose; {% endhighlight %} -Add *requires 'Moose'* in *Makefile.PL*. Create a *t/tests/Test/____var-module_path-var____.pm* file with the following content: +Add **requires 'Moose'** in **Makefile.PL**. Create a **t/tests/Test/____var-module_path-var____.pm** file with the following content: {% highlight perl %} package Test :: [%module %]; diff --git a/_posts/2009-05-30-catalystx-dispatcher-asgraph.md b/_posts/2009-05-30-catalystx-dispatcher-asgraph.md new file mode 100644 index 0000000..735cb52 --- /dev/null +++ b/_posts/2009-05-30-catalystx-dispatcher-asgraph.md @@ -0,0 +1,27 @@ +--- +layout: post +title: CatalystX::Dispatcher::AsGraph +summary: In which I wrote a module to visualize routes in Catalyst. +--- + +This morning I saw [this post](http://marcus.nordaaker.com/2009/05/awesome-route-graph-with-mojoxroutesasgraph/) from Marcus Ramberg about [MojoX::Routes::AsGraph](http://search.cpan.org/perldoc?MojoX::Routes::AsGraph). I liked the idea. But as I Catalyst instead of Mojo, I thought I could give a try and do the same thing for Catalyst dispatcher, and I've coded CatalystX::Dispatcher::AsGraph. For the moment only private actions are graphed. + +!/static/imgs/routes-300x249.png(routes)! + +You use it like this: `perl bin/catalyst_graph_dispatcher.pl --appname Arkham --output routes.png` + +You can create a simple script to output as text if you prefer: + +{% highlight perl %} +#!/usr/bin/perl -w +use strict; +use CatalystX::Dispatcher::AsGraph; + +my $graph = CatalystX::Dispatcher::AsGraph->new_with_options(); +$graph->run; +print $graph->graph->as_txt; +{% endhighlight %} + +The code is on [GitHub](http://github.com/franckcuny/CatalystX--Dispatcher--AsGraph/tree/master) for the moment. + +For thoses who are interested by visualization, I'll publish soon some (at least I think) really nice visualisations about CPAN, Perl, and his community, that we have created at [$work](http://rtgi.fr). diff --git a/_posts/2009-05-30-catalystx-dispatcher-asgraph.textile b/_posts/2009-05-30-catalystx-dispatcher-asgraph.textile deleted file mode 100644 index af46c6b..0000000 --- a/_posts/2009-05-30-catalystx-dispatcher-asgraph.textile +++ /dev/null @@ -1,29 +0,0 @@ ---- -layout: post -title: CatalystX::Dispatcher::AsGraph -category: perl ---- - -This morning I saw "this post":http://marcus.nordaaker.com/2009/05/awesome-route-graph-with-mojoxroutesasgraph/ from marcus ramberg about "MojoX::Routes::AsGraph":http://search.cpan.org/perldoc?MojoX::Routes::AsGraph. I liked the idea. But as I Catalyst instead of Mojo, I thought I could give a try and do the same thing for Catalyst dispatcher, and I've coded CatalystX::Dispatcher::AsGraph. For the moment only private actions are graphed. - -!/static/imgs/routes-300x249.png(routes)! - -You use it like this: - -bc. perl bin/catalyst_graph_dispatcher.pl --appname Arkham --output routes.png - -You can create a simple script to output as text if you prefer: - -{% highlight perl %} -#!/usr/bin/perl -w -use strict; -use CatalystX::Dispatcher::AsGraph; - -my $graph = CatalystX::Dispatcher::AsGraph->new_with_options(); -$graph->run; -print $graph->graph->as_txt; -{% endhighlight %} - -The code is on "github":http://github.com/franckcuny/CatalystX--Dispatcher--AsGraph/tree/master for the moment. - -For thoses who are interested by visualization, I'll publish soon some (at least I think) really nice visualisations about CPAN, Perl, and his community, that we have created at "$work":http://rtgi.fr. diff --git a/_posts/2009-06-06-modules-i-like-web-scraper.textile b/_posts/2009-06-06-modules-i-like-web-scraper.md index ab78fae..e2808cc 100644 --- a/_posts/2009-06-06-modules-i-like-web-scraper.textile +++ b/_posts/2009-06-06-modules-i-like-web-scraper.md @@ -1,12 +1,12 @@ --- layout: post -category: perl +summary: In which I talk about Web::Scraper title: modules I like Web::Scraper --- -For "$work":http://rtgi.fr I need to write scrapers. It used to be boring and painful. But thanks to "miyagawa":http://search.cpan.org/~miyagawa/, this is not true anymore. "Web::Scraper":http://search.cpan.org/perldoc?Web::Scraper offer a nice API: you can write your rules using XPath, you can chaine rules, a nice and simple syntax, etc. +For [$work](http://rtgi.fr) I need to write scrapers. It used to be boring and painful. But thanks to [miyagawa](http://search.cpan.org/~miyagawa/), this is not true anymore. [Web::Scraper](http://search.cpan.org/perldoc?Web::Scraper) offer a nice API: you can write your rules using XPath, you can chaine rules, a nice and simple syntax, etc. -I wanted to export my data from my last.fm account but there is no API for this, so I would need to scrap them. All the data are available "as a web page":http://www.last.fm/user/franckcuny/tracks that list your music. So the scraper need to find how many pages, and find the content on each page to extract a list of your listening. +I wanted to export my data from my last.fm account but there is no API for this, so I would need to scrap them. All the data are available [as a web page](http://www.last.fm/user/franckcuny/tracks) that list your music. So the scraper need to find how many pages, and find the content on each page to extract a list of your listening. For the total of pages, it's easy. Let's take a look at the HTML code and search for something like this: @@ -14,11 +14,11 @@ For the total of pages, it's easy. Let's take a look at the HTML code and search <a class="lastpage" href="/user/franckcuny/tracks?page=272">272</a> {% endhighlight %} -the information is in a class *lastpage*. +the information is in a class **lastpage**. Now we need to find our data: I need the artist name, the song name and the date I played this song. -All this data are in a *table*, and each new entry is in a *td*. +All this data are in a **table**, and each new entry is in a **td**. {% highlight html %} <tr id="r9_1580_1920248170" class="odd"> @@ -33,7 +33,7 @@ All this data are in a *table*, and each new entry is in a *td*. </td> {% endhighlight %} -It's simple: information about a song are stored in *subjectcell*, and the artist and song title are each in a tag *a*. The date is in a *dateCell*, and we need the *title* from the *abbr* tag. +It's simple: information about a song are stored in **subjectcell**, and the artist and song title are each in a tag **a**. The date is in a **dateCell**, and we need the **title** from the **abbr** tag. The scraper we need to write is @@ -49,7 +49,7 @@ my $scrap = scraper { }; {% endhighlight %} -The first rule extract the total of page. The second iter on each *tr* and store the content in an array named *songs*. This *tr* need to be scraped. So we look the the *abbr* tag, and store in *date* the property *title*. Then we look for the song and artitst information. We look for the *td* with a class named *subjectCell*, a extract all links. +The first rule extract the total of page. The second iter on each **tr** and store the content in an array named **songs**. This **tr** need to be scraped. So we look the the **abbr** tag, and store in **date** the property **title**. Then we look for the song and artitst information. We look for the **td** with a class named **subjectCell**, a extract all links. Our final script will look like this: @@ -96,5 +96,5 @@ sub scrap_lastfm { You can use this script like this: {% highlight bash %} -perl lastfmscraper.pl franckcuny store_data.txt +% perl lastfmscraper.pl franckcuny store_data.txt {% endhighlight %} diff --git a/_posts/2009-06-12-shape-of-cpan.textile b/_posts/2009-06-12-shape-of-cpan.md index 0b051ab..0e4ef86 100644 --- a/_posts/2009-06-12-shape-of-cpan.textile +++ b/_posts/2009-06-12-shape-of-cpan.md @@ -1,27 +1,27 @@ --- layout: post -category: graph -title: shape of CPAN +summary: In which I talk about the shape of the CPAN +title: The shape of the CPAN --- -My talk at the "FPW":http://conferences.mongueurs.net/fpw2009/ this year is about the shape of the Perl and CPAN community. This talk was prepared by some "$coworkers":http://labs.rtgi.eu/ and me. +My talk at the [FPW](http://conferences.mongueurs.net/fpw2009/) this year is about the shape of the Perl and CPAN community. This talk was prepared by some [$coworkers](http://labs.rtgi.eu/) and me. !/static/imgs/draft_cpan_prelimsmall.png(map of the Perl community on the web)! We generated two maps (authors and modules) using the CPANTS' data. For the websites, we crawled a seed generated from the CPAN pages of the previous authors. -Each of this graphs are generated using a "force base algorithm":http://en.wikipedia.org/wiki/Force-based_algorithms, with the help of "Gephi":http://gephi.org/. +Each of this graphs are generated using a [force base algorithm](http://en.wikipedia.org/wiki/Force-based_algorithms), with the help of [Gephi](http://gephi.org/). All the map are available in PDF files, in creative common licence. The slides are in french, but I will explain the three maps here. - * "slides (french)":http://labs.rtgi.eu/fpw09/resources/slides/ - * "authors map":http://labs.rtgi.eu/fpw09/resources/pdf/cpan_authors_core_march2009.pdf - * "modules map":http://labs.rtgi.eu/fpw09/resources/pdf/cpan_packages_core_march2009.pdf - * "community maps":http://labs.rtgi.eu/fpw09/resources/pdf/cpan-web-may2009-poster.pdf - * "community map (flash version)":http://labs.rtgi.eu/fpw09/map/ - * "cpan-explorer.org":http://cpan-explorer.org/ + * [slides (french)](http://labs.rtgi.eu/fpw09/resources/slides/) + * [authors map](http://labs.rtgi.eu/fpw09/resources/pdf/cpan_authors_core_march2009.pdf) + * [modules map](http://labs.rtgi.eu/fpw09/resources/pdf/cpan_packages_core_march2009.pdf) + * [community maps](http://labs.rtgi.eu/fpw09/resources/pdf/cpan-web-may2009-poster.pdf) + * [community map (flash version)](http://labs.rtgi.eu/fpw09/map/) + * [cpan-explorer.org](http://cpan-explorer.org/) -h3. CPAN's modules +### CPAN's modules The first map is about the modules available on the CPAN. We selected a list of modules which are listed as dependancies by at least 10 others modules, and the modules who used them. This graph is composed of 7193 nodes (or modules) and 17510 edges. Some clusters are interesting: @@ -30,14 +30,14 @@ The first map is about the modules available on the CPAN. We selected a list of * TK is isolated from the CPAN * Moose, DBIx::Class and Catalyst are forming a group. This data are from march, we will try to do a newer version of this map this summer. This one will be really interesting as Catalyst have switched to Moose -h3. The CPAN's authors +### The CPAN's authors This map is about the authors on the CPAN. There is about 700 authors and their connections. Each time an author use a module of another author, a link is created. * Modern Perl, constitued by Moose, Catalyst, DBIx::Class. Important authors are Steven, Sartak, perigin, jrockway, mstrout, nothingmuch, marcus ramberg * Slaven Rezi? and others TK developpers are on the border -h3. Web map +### Web map We crawled the web using the seed generated using the CPAN's authors pages. diff --git a/_posts/2009-06-17-xmobar-on-debian-sid.textile b/_posts/2009-06-17-xmobar-on-debian-sid.md index 7382b1a..46dfc40 100644 --- a/_posts/2009-06-17-xmobar-on-debian-sid.textile +++ b/_posts/2009-06-17-xmobar-on-debian-sid.md @@ -1,14 +1,14 @@ --- layout: post -category: app +summary: In which I path xmobar for Debian SID title: xmobar on debian SID --- -If you are using "xmonad":http://www.xmonad.org/ and "xmobar":http://code.haskell.org/~arossato/xmobar/ on "debian SID":http://www.debian.org/ on a laptop, and don't see any battery information, you may have test this "solution":http://5e6n1.wordpress.com/2009/03/30/xmobar-battery-plugin-using-sysfs-not-procfs/. +If you are using [xmonad](http://www.xmonad.org/) and [xmobar](http://code.haskell.org/~arossato/xmobar/) on [Debian SID](http://www.debian.org/) on a laptop, and don't see any battery information, you may have test this [solution](http://5e6n1.wordpress.com/2009/03/30/xmobar-battery-plugin-using-sysfs-not-procfs/). If this didn't solve your problem, try this patch on SysfsBatt.hs : -{% highlight haskell %} +{% highlight diff %} 52c52 < let path = sysfsPath ++ p ++ "/charge_full" --- @@ -26,9 +26,9 @@ If this didn't solve your problem, try this patch on SysfsBatt.hs : Then as before: {% highlight bash %} -runhaskell Setup.lhs configure --user -runhaskell Setup.lhs build -runhaskell Setup.lhs install --user +% runhaskell Setup.lhs configure --user +% runhaskell Setup.lhs build +% runhaskell Setup.lhs install --user {% endhighlight %} Battery information should be visible. diff --git a/_posts/2009-06-22-modules-i-like-getopt-long-and-moosex-getopt.textile b/_posts/2009-06-22-modules-i-like-getopt-long-and-moosex-getopt.md index 4f9470d..9decc0f 100644 --- a/_posts/2009-06-22-modules-i-like-getopt-long-and-moosex-getopt.textile +++ b/_posts/2009-06-22-modules-i-like-getopt-long-and-moosex-getopt.md @@ -1,12 +1,12 @@ --- layout: post -category: perl +summary: In which I talk about GetOpt::Long and Moosex::Getopt title: Modules I like Getopt::Long and MooseX::Getopt --- -h3. Getopt::Long +## Getopt::Long -"Getopt::long":http://search.cpan.org/perldoc?Getopt::Long is a useful module to parse command line arguements. +[Getopt::long](http://search.cpan.org/perldoc?Getopt::Long) is a useful module to parse command line arguements. A basic usage is something like this: @@ -21,19 +21,19 @@ GetOptions('config=s' => \my $cfg_file,); my $config = LoadFile $cfg_file {% endhighlight %} -In *GetOptions*, we require a value for config with *config=s*. If we wante an integer, we replace 's' with 'i', and for a floating point, with 'f'. +In **GetOptions**, we require a value for config with **config=s**. If we wante an integer, we replace 's' with 'i', and for a floating point, with 'f'. Call your script : {% highlight bash %} - script.pl --config=file.yml #this one works - script.pl --config file.yml #this one too! - script.pl -c file.yml #and this one too +% script.pl --config=file.yml #this one works +% script.pl --config file.yml #this one too! +% script.pl -c file.yml #and this one too {% endhighlight %} The three syntaxes are understood. -A good practices is to combine this module with "Pod::Usage":http://search.cpan.org/perldoc?Pod::Usage. Let's do some modifications on the example: +A good practices is to combine this module with [Pod::Usage](http://search.cpan.org/perldoc?Pod::Usage). Let's do some modifications on the example: {% highlight perl %} #!/usr/bin/perl -w @@ -72,7 +72,7 @@ Path to the config file then {% highlight bash %} -$ perl uberscript +% perl uberscript Usage: uberscript [options] @@ -83,9 +83,9 @@ Usage: From now if we call our script without argument, the POD will be printed on STDIN. -h3. MooseX::Getopt +## MooseX::Getopt -"MooseX::Getopt":http://search.cpan.org/perldoc?MooseX::Getopt) is a Role that add a <code>new_with_options</code> to your object. We create a basic Object : +[MooseX::Getopt](http://search.cpan.org/perldoc?MooseX::Getopt) is a Role that add a `new_with_options` to your object. We create a basic Object : {% highlight perl %} package OurShinyObject; @@ -116,18 +116,14 @@ my $obj = OurShinyObject->new_from_options(); {% endhighlight %} -bc. script.pl --config file.yml +{% highlight sh%} +% script.pl --config file.yml +{% endhighlight %} The role will set our attribute **context** using the value from the argument set on the command line. -The - -{% highlight perl %} -traits => ['NoGetopt'] -{% endhighlight %} - -indicate that this attributes will be not be read from the command line. An alternate way to do this is to prefix the attributes with *_*. +The `traits => ['NoGetopt']` indicate that this attributes will be not be read from the command line. An alternate way to do this is to prefix the attributes with **_**. -h3. conclusion (?) +## conclusion (?) -When you write a script, even if you're sure you will never need to have more than one argument, or that you never will have to update the code, *please* consider to use of *Getopt::Long* instead of a *shift @ARGV*, because we all know that you will at a certain point update this script and you will more than one argument :). +When you write a script, even if you're sure you will never need to have more than one argument, or that you never will have to update the code, *please* consider to use of **Getopt::Long** instead of a `shift @ARGV`, because we all know that you will at a certain point update this script and you will more than one argument :). diff --git a/_posts/2009-06-25-how-to-prevent-some-components-to-be-loaded-by-catalyst.md b/_posts/2009-06-25-how-to-prevent-some-components-to-be-loaded-by-catalyst.md new file mode 100644 index 0000000..9466bbc --- /dev/null +++ b/_posts/2009-06-25-how-to-prevent-some-components-to-be-loaded-by-catalyst.md @@ -0,0 +1,26 @@ +--- +layout: post +summary: In which I show how to disable some components in Catalyst. +title: How to prevent some components to be loaded by Catalyst +--- + +Let's say you have a large [Catalyst](http://search.cpan.org/perldoc?Catalyst) application, with a lot of compoments. When you deploy your application, or when you want to test it while your developping, you may not want to have some of thoses components loaded (you don't have all the dependencies, they are incompatible, etc...). Catalyst use [Module::Pluggable](http://search.cpan.org/perldoc?Module::Pluggable) to load the components, so you can easily configure this. In your application's configuration, add: + +{% highlight yaml %} +setup_components: + except: + - MyApp::Model::AAAA + - MyAPP::Model::BBBB::REST + ... +{% endhighlight %} + +Module::Pluggable have some other interesting features. You may have a second Catalyst application, and want to use one or more components from this one. You can easily do this: + +{% highlight yaml %} +setup_components: + search_path: + - MyApp + - MyOtherApp::Model +{% endhighlight %} + + diff --git a/_posts/2009-06-25-how-to-prevent-some-components-to-be-loaded-by-catalyst.textile b/_posts/2009-06-25-how-to-prevent-some-components-to-be-loaded-by-catalyst.textile deleted file mode 100644 index fefbede..0000000 --- a/_posts/2009-06-25-how-to-prevent-some-components-to-be-loaded-by-catalyst.textile +++ /dev/null @@ -1,26 +0,0 @@ ---- -layout: post -category: perl -title: How to prevent some components to be loaded by Catalyst ---- - -Let's say you have a "large" [Catalyst](http://search.cpan.org/perldoc?Catalyst) application, with a lot of compoments. When you deploy your application, or when you want to test it while your developping, you may not want to have some of thoses components loaded (you don't have all the dependencies, they are incompatible, etc...). Catalyst use [Module::Pluggable](http://search.cpan.org/perldoc?Module::Pluggable) to load the components, so you can easily configure this. In your application's configuration, add: - -{% highlight yaml %} -setup_components: - except: - - MyApp::Model::AAAA - - MyAPP::Model::BBBB::REST - ... -{% endhighlight %} - -Module::Pluggable have some other interesting features. You may have a second Catalyst application, and want to use one or more components from this one. You can easily do this: - -{% highlight yaml %} -setup_components: - search_path: - - MyApp - - MyOtherApp::Model -{% endhighlight %} - - diff --git a/_posts/2009-06-30-private-and-protected-methods-with-moose.textile b/_posts/2009-06-30-private-and-protected-methods-with-moose.md index 3734fe3..fe098f7 100644 --- a/_posts/2009-06-30-private-and-protected-methods-with-moose.textile +++ b/_posts/2009-06-30-private-and-protected-methods-with-moose.md @@ -1,6 +1,6 @@ --- layout: post -category: perl +summary: In which I show how to write dummy private methods for Moose title: Private and protected methods with Moose --- Yesterday, one of our interns asked me a question about private method in <a href="http://www.iinteractive.com/moose/">Moose</a>. I told him that for Moose as for Perl, there is no such things as private method. By convention, methods prefixed with '_' are considered private. @@ -44,5 +44,5 @@ done was this: with_caller => [qw( private protected )],); {% endhighlight %} -and write the 'private' and 'protected' sub. I'm sure there is some stuff I can do to improve this, but for a first test, I'm happy with the result and still amazed how easy it was to add this two keywords. +and write the `private` and `protected` sub. I'm sure there is some stuff I can do to improve this, but for a first test, I'm happy with the result and still amazed how easy it was to add this two keywords. diff --git a/_posts/2009-07-07-cpan-and-auto-install.md b/_posts/2009-07-07-cpan-and-auto-install.md new file mode 100644 index 0000000..9315702 --- /dev/null +++ b/_posts/2009-07-07-cpan-and-auto-install.md @@ -0,0 +1,9 @@ +--- +layout: post +summary: In which I show how to auto answer questions asked by cpan +title: CPAN and auto-install +--- + +When you install a module from the [CPAN](http://search.cpan.org), and this module requires other modules, the cpan shell will ask you if you want to install them. When you are installing [Catalyst](http://www.catalystframework.org/), it may take a while, and you may not want to spend your afternoon in front of the prompt answering "yes" every 5 seconds. + +The solution is to set the environment variable **PERL_MM_USE_DEFAULT**. Next time you want to install a big app: `PERL_MM_USE_DEFAULT=1 cpan Catalyst KiokuDB` and your done. diff --git a/_posts/2009-07-07-cpan-and-auto-install.textile b/_posts/2009-07-07-cpan-and-auto-install.textile deleted file mode 100644 index d79f6c0..0000000 --- a/_posts/2009-07-07-cpan-and-auto-install.textile +++ /dev/null @@ -1,13 +0,0 @@ ---- -layout: post -category: perl -title: CPAN and auto-install ---- - -When you install a module from the "CPAN":http://search.cpan.org, and this module requires other modules, the cpan shell will ask you if you want to install them. When you are installing "Catalyst":http://www.catalystframework.org/, it may take a while, and you may not want to spend your afternoon in front of the prompt answering "yes" every 5 seconds. - -The solution is to set the environment variable *PERL_MM_USE_DEFAULT*. Next time you want to install a big app: - -bc. PERL_MM_USE_DEFAULT=1 cpan Catalyst KiokuDB - -and your done. diff --git a/_posts/2009-07-16-cpanhq-and-dependencies-graph.textile b/_posts/2009-07-16-cpanhq-and-dependencies-graph.md index 0d9854b..c21f9cb 100644 --- a/_posts/2009-07-16-cpanhq-and-dependencies-graph.textile +++ b/_posts/2009-07-16-cpanhq-and-dependencies-graph.md @@ -1,17 +1,17 @@ --- layout: post -category: perl +summary: In which I graph dependencies for CPAN modules title: CPANHQ and dependencies graph --- -CPANHQ is a new project that "aims to be a community-driven, meta-data-enhanced alternative to such sites as "search.cpan.org":http://search.cpan.org/ and "kobesearch.cpan.org":http://kobesearch.cpan.org/. +CPANHQ is a new project that "aims to be a community-driven, meta-data-enhanced alternative to such sites as [search.cpan.org](http://search.cpan.org) and [kobesearch.cpan.org](http://kobesearch.cpan.org/). I believe that a good vizualisation can help to have a better understanding of datas. One of the missing thing on the actual search.cpan.org is the lack of informations about a distribution's dependencies. So my first contribution to the CPANHQ project was to add such informations. !/static/imgs/cpanhq-dep.png(cpanhq deps)! -For each distributions, a graph is generated for the this distribution. For this, I use "Graph::Easy":http://search.cpan.org/perldoc?Graph::Easy and data available from the CPANHQ database. I alsa include a simple list of the dependencies after the graph. +For each distributions, a graph is generated for the this distribution. For this, I use [Graph::Easy](http://search.cpan.org/perldoc?Graph::Easy) and data available from the CPANHQ database. I alsa include a simple list of the dependencies after the graph. Only the first level dependencies are displayed, as the distribution's metadata are analysed when the request is made. I could follow all the dependencies when the request is made, but for some distribution it could take a really long time, and it's not suitable for this kind of services. -*edit*: you can found CPANHQ here : "CPANHQ on github":http://github.com/bricas/cpanhq/tree/master. +**edit**: you can find [CPANHQ on GitHub](http://github.com/bricas/cpanhq/tree/master). diff --git a/_posts/2009-07-26-apply-a-role-to-a-moose-object.textile b/_posts/2009-07-26-apply-a-role-to-a-moose-object.md index 40ace15..3e96ac0 100644 --- a/_posts/2009-07-26-apply-a-role-to-a-moose-object.textile +++ b/_posts/2009-07-26-apply-a-role-to-a-moose-object.md @@ -1,11 +1,9 @@ --- layout: post title: Apply a role to a Moose object -category: perl +summary: In which I show how to apply a role to a Moose's object --- -h2. Apply a role to a Moose object - You can apply a role to a Moose object. You can do something like {% highlight perl %} @@ -34,5 +32,7 @@ $test->baz; with the following output: -bc. i can't haz baz +``` +i can't haz baz i can haz baz +``` diff --git a/_posts/2009-07-26-cpan-explorer.md b/_posts/2009-07-26-cpan-explorer.md new file mode 100644 index 0000000..10e6f7e --- /dev/null +++ b/_posts/2009-07-26-cpan-explorer.md @@ -0,0 +1,9 @@ +--- +layout: post +summary: In which I share an update to CPAN Explorer. +title: cpan-explorer +--- + +We ([RTGI]"http://rtgi.fr") have been working to update the [cpan-explorer](http://cpan-explorer.org). A new version will be available this week, before YAPC::EU. Three new maps have been created, using different informations than the previous one, and you will be able to search and pinpoint the browsable maps. + +!/static/imgs/authorsmap.png(authors map)! diff --git a/_posts/2009-07-26-cpan-explorer.textile b/_posts/2009-07-26-cpan-explorer.textile deleted file mode 100644 index bb532cf..0000000 --- a/_posts/2009-07-26-cpan-explorer.textile +++ /dev/null @@ -1,9 +0,0 @@ ---- -layout: post -category: graph -title: cpan-explorer ---- - -We ("RTGI":http://rtgi.fr) have been working to update the "cpan-explorer":http://cpan-explorer.org. A new version will be available this week, before YAPC::EU. Three new maps have been created, using different informations than the previous one, and you will be able to search and pinpoint the browsable maps. - -!/static/imgs/authorsmap.png(authors map)! diff --git a/_posts/2009-07-28-cpan-explorer-update-and-three-new-maps.textile b/_posts/2009-07-28-cpan-explorer-update-and-three-new-maps.md index e9ea414..c535efc 100644 --- a/_posts/2009-07-28-cpan-explorer-update-and-three-new-maps.textile +++ b/_posts/2009-07-28-cpan-explorer-update-and-three-new-maps.md @@ -1,28 +1,28 @@ --- layout: post -category: graph +summary: In which title: cpan-explorer update and three new maps --- -The site "cpan-explorer":http://cpan-explorer.org/ have been update with three new maps for the "YAPC::EU":http://yapceurope2009.org/ye2009/. This three maps are different from the previous one. This time, instead of having a big photography of the distributions and authors on the CPAN, Task::Kensho have been used to obtain a representation of what we call the *modern Perl*. +The site [cpan-explorer](http://cpan-explorer.org/) have been update with three new maps for the [YAPC::EU](http://yapceurope2009.org/ye2009/). This three maps are different from the previous one. This time, instead of having a big photography of the distributions and authors on the CPAN, Task::Kensho have been used to obtain a representation of what we call the **modern Perl**. -h3. distributions map +## distributions map !/static/imgs/moosedist.png(moose)! Task::Kensho acted as the seed for this map. Task::Kensho contains a list of modules recommended to do modern Perl development. So we extracted the modules that have a dependancie toward one of these modules, and create the graph with this data. -h3. authors map +## authors map The authors listed on this map are the one from the previous map. There is a far less authors thant the previous authors map, but it's more readable. A lot of informations are on the map : label size, node size, edge size, color of the node. -h3. web communities map +## web communities map This map look a lot like the previous one, as we used nearly the same data. The seed have been extended with a few websites only. -h3. cpan-explorer +## cpan-explorer -cpan-explorer is now hosted on a wordpress, so you can leave comments or suggestions for new maps you would like to see (a focus on web development modules, tests::* module, etc ...). All the new maps are also searchable, and give you a permalink for you search ("I'm here":http://cpan-explorer.org/2009/07/28/new-web-communities-map-for-yapceu/#dist%3Dlumberjaph.net and "here":http://cpan-explorer.org/2009/07/28/version-of-the-authors-graph-for-yapceu/#author%3Dfranck) +cpan-explorer is now hosted on a wordpress, so you can leave comments or suggestions for new maps you would like to see (a focus on web development modules, tests::* module, etc ...). All the new maps are also searchable, and give you a permalink for you search ([I'm here](http://cpan-explorer.org/2009/07/28/new-web-communities-map-for-yapceu/#dist%3Dlumberjaph.net) and [here](http://cpan-explorer.org/2009/07/28/version-of-the-authors-graph-for-yapceu/#author%3Dfranck)) I will give a talk at the "YAPC::EU":http://yapceurope2009.org/ye2009/talk/2061 about this work. Also, each map have been printed, and will be given for the auction. diff --git a/_posts/2009-08-23-perl-5.10.1-released.textile b/_posts/2009-08-23-perl-5.10.1-released.md index da1590f..0806675 100644 --- a/_posts/2009-08-23-perl-5.10.1-released.textile +++ b/_posts/2009-08-23-perl-5.10.1-released.md @@ -1,10 +1,10 @@ --- layout: post -category: perl +summary: In which I install Perl 5.10.1 title: Perl 5.10.1 released --- -"Perl 5.10.1":http://www.cpan.org/modules/by-authors/id/D/DA/DAPM/perl-5.10.1.tar.bz2 "have been release":http://www.nntp.perl.org/group/perl.perl5.porters/2009/08/msg150172.html. You can download it from the CPAN, or if you can't wait, "here":http://www.iabyn.com/tmp/perl-5.10.1.tar.bz2. +[Perl 5.10.1](http://www.cpan.org/modules/by-authors/id/D/DA/DAPM/perl-5.10.1.tar.bz2) [has been released](http://www.nntp.perl.org/group/perl.perl5.porters/2009/08/msg150172.html). You can download it from the CPAN, or if you can't wait, [here](http://www.iabyn.com/tmp/perl-5.10.1.tar.bz2). Next, you need to build it: diff --git a/_posts/2009-08-23-perl5lib-and-zsh.textile b/_posts/2009-08-23-perl5lib-and-zsh.md index bfa733f..24dcc37 100644 --- a/_posts/2009-08-23-perl5lib-and-zsh.textile +++ b/_posts/2009-08-23-perl5lib-and-zsh.md @@ -1,6 +1,6 @@ --- layout: post -category: perl +summary: In which I show how I manage my $PERL5LIB. title: $PERL5LIB and zsh --- @@ -14,4 +14,5 @@ for perl_lib in $(ls $BASE_PATH); do fi done export PERL5LIB + {% endhighlight %} diff --git a/_posts/2009-08-31-osdc.fr.textile b/_posts/2009-08-31-osdc.fr.md index de605c9..54578dc 100644 --- a/_posts/2009-08-31-osdc.fr.textile +++ b/_posts/2009-08-31-osdc.fr.md @@ -1,6 +1,6 @@ --- layout: post -category: conference +summary: In which I promote the OSDC.fr conference title: OSDC.fr --- diff --git a/_posts/2009-10-03-teh-batmoose-at-osdc.fr.md b/_posts/2009-10-03-teh-batmoose-at-osdc.fr.md new file mode 100644 index 0000000..0ee6434 --- /dev/null +++ b/_posts/2009-10-03-teh-batmoose-at-osdc.fr.md @@ -0,0 +1,11 @@ +--- +layout: post +title: teh batmoose at osdc.fr +summary: In which I share the batmoose +--- + +Today I presented a talk about Moose at [OSDC.fr](http://osdc.fr). The slides are available [here](http://franck.lumberjaph.net/blog/slides/Introduction_a_Moose.pdf) + +And big thanks to my friend [Morgan](http://www.bwoup.com) for his illustration of the batmoose :) + + diff --git a/_posts/2009-10-03-teh-batmoose-at-osdc.fr.textile b/_posts/2009-10-03-teh-batmoose-at-osdc.fr.textile deleted file mode 100644 index 98b13bd..0000000 --- a/_posts/2009-10-03-teh-batmoose-at-osdc.fr.textile +++ /dev/null @@ -1,11 +0,0 @@ ---- -layout: post -category: conference -title: teh batmoose at osdc.fr ---- - -Today I presented a talk about Moose at <a href="http://osdc.fr/">OSDC.fr</a>. The slides are available "here":http://franck.lumberjaph.net/blog/slides/Introduction_a_Moose.pdf. - -And big thanks to my friend <a href="http://www.bwoup.com/">Morgan</a> for his illustration of the batmoose :) - -!/static/imgs/batmoose_1024cut-300x225.png(batmoose)! diff --git a/_posts/2009-11-09-modules-i-like-devel-declare.textile b/_posts/2009-11-09-modules-i-like-devel-declare.md index 79f7bf5..bd435e3 100644 --- a/_posts/2009-11-09-modules-i-like-devel-declare.textile +++ b/_posts/2009-11-09-modules-i-like-devel-declare.md @@ -1,10 +1,10 @@ --- layout: post -category: perl +summary: In which I share my enthusiasm for Devel::Declare. title: Modules I like Devel::Declare --- -For "$work":http://linkfluence.net/, I've been working on a job queue system, using Moose, Catalyst (for a REST API) and DBIx::Class to store the jobs and some meta (yeah I know, there is not enough job queue system already, the world really needs a new one ...). +For [$work](http://linkfluence.net/), I've been working on a job queue system, using Moose, Catalyst (for a REST API) and DBIx::Class to store the jobs and some meta (yeah I know, there is not enough job queue system already, the world really needs a new one ...). Basicaly, I've got a XXX::Worker class that all the workers extends. This class provide methods for fetching job, add a new job, mark a job as fail, retry, ... @@ -39,7 +39,7 @@ sub foo { But as I'm using Moose, I want to add more sugar to the syntax, so writing a new worker would be really more easy. -Here comes "Devel::Declare":http://search.cpan.org/perldoc?Devel::Declare. +Here comes [Devel::Declare](http://search.cpan.org/perldoc?Devel::Declare). The syntax I want for my worker is this one: @@ -62,9 +62,9 @@ fail bar { }; {% endhighlight %} -Where with '*work*' I write the code the writer will execute on a task, '*success*', a specific code that will be executed after a job is marked as successfull, and '*fail*' for when the job fail. +Where with `work` I write the code the writer will execute on a task, `success`, a specific code that will be executed after a job is marked as successfull, and `fail` for when the job fail. -I will show how to add the '*work*' keyword. I start by writing a new package: +I will show how to add the `work` keyword. I start by writing a new package: {% highlight perl %} package XXX::Meta; @@ -97,9 +97,9 @@ sub init_meta { 1; {% endhighlight %} -The *init_meta* method is provided by Moose: (from the POD) +The `init_meta` method is provided by Moose: (from the POD) -bq. The *init_meta* method sets up the metaclass object for the class specified by *for_class*. This method injects a a meta accessor into the class so you can get at this object. It also sets the class's superclass to base_class, with Moose::Object as the default. +> The `init_meta` method sets up the metaclass object for the class specified by `for_class`. This method injects a a meta accessor into the class so you can get at this object. It also sets the class's superclass to base_class, with Moose::Object as the default. So I inject into the class that will use XXX::Meta a new metaclass, XXX::Meta::Class. @@ -156,7 +156,7 @@ sub add_local_method { 1; {% endhighlight %} -Here I add to the *->meta* provided by Moose '*local_work*', which is an array that contains all my '*work*' methods. So each time I do something like +Here I add to the `->meta` provided by Moose `local_work`, which is an array that contains all my `work` methods. So each time I do something like {% highlight perl %} work foo {}; @@ -224,7 +224,7 @@ sub parser { 1; {% endhighlight %} -The *install_methodhandler* add the *work* keyword, with a block of code. This code is sent to the parser, that will add more sugar. With the inject_if_block, I inject the following line +The `install_methodhandler` add the `work` keyword, with a block of code. This code is sent to the parser, that will add more sugar. With the inject_if_block, I inject the following line {% highlight perl %} my ($self, $context, $job) = @_; diff --git a/_posts/2009-11-17-sd-the-peer-to-peer-bug-tracking-system.textile b/_posts/2009-11-17-sd-the-peer-to-peer-bug-tracking-system.md index 3b4f69f..582ce10 100644 --- a/_posts/2009-11-17-sd-the-peer-to-peer-bug-tracking-system.textile +++ b/_posts/2009-11-17-sd-the-peer-to-peer-bug-tracking-system.md @@ -1,6 +1,6 @@ --- layout: post -category: app +summary: In which I write about SD. title: sd the peer to peer bug tracking system --- @@ -8,20 +8,20 @@ title: sd the peer to peer bug tracking system Why should you use SD ? Well, at <a href="http://linkfluence.net/">$work</a> we are using <a href="http://www.redmine.org/">redmine</a> as our ticket tracker. I spend a good part of my time in a terminal, and checking the ticket system, adding a ticket, etc, using the browser, is annoying. I prefer something which I can use in my terminal and edit with my <a href="http://www.vim.org/">$EDITOR</a>. So if you recognize yourself in this description, you might want to take a look at SD. -bq. In the contrib directory of the SD distribution, you will find a SD ticket syntax file for vim. +> In the contrib directory of the SD distribution, you will find a SD ticket syntax file for vim. -h3. how to do some basic stuff with sd +## how to do some basic stuff with sd We will start by initializing a database. By default {% highlight bash %} -sd init +% sd init {% endhighlight %} will create a *.sd* directory in your $HOME. If you want to create in a specific path, you will need to set the SD_REPO in your env. {% highlight bash %} -SD_REPO=~/code/myproject/sd sd init +% SD_REPO=~/code/myproject/sd sd init {% endhighlight %} The init command creates an sqlite database and a config file. The config file is in the same format as the one used by git. @@ -29,18 +29,18 @@ The init command creates an sqlite database and a config file. The config file i Now we can create a ticket: {% highlight bash %} -SD_REPO=~/code/myproject/sd ticket create +% SD_REPO=~/code/myproject/sd ticket create {% endhighlight %} This will open your $EDITOR, the part you need to edit are specified. After editing this file, you will get something like this: -bc. Created ticket 11 (437b823c-8f69-46ff-864f-a5f74964a73f) -Created comment 12 (f7f9ee13-76df-49fe-b8b2-9b94f8c37989) +> Created ticket 11 (437b823c-8f69-46ff-864f-a5f74964a73f) +> Created comment 12 (f7f9ee13-76df-49fe-b8b2-9b94f8c37989) You can view the created ticket: {% highlight bash %} -SD_REPO=~/code/myproject/sd ticket show 11 +% SD_REPO=~/code/myproject/sd ticket show 11 {% endhighlight %} and the content of your ticket will be displayed. @@ -48,8 +48,8 @@ and the content of your ticket will be displayed. You can list and filter your tickets: {% highlight bash %} -SD_REPO=~/code/myproject/sd ticket list -SD_REPO=~/code/myproject/sd search --regex foo +% SD_REPO=~/code/myproject/sd ticket list +% SD_REPO=~/code/myproject/sd search --regex foo {% endhighlight %} You can edit the SD configuration using the config tool or editing directly the file. SD will look for three files : /etc/sdrc, $HOME/.sdrc or the config file in your replica (in our exemple, ~/code/myproject/sd/config). @@ -57,26 +57,26 @@ You can edit the SD configuration using the config tool or editing directly the For changing my email address, I can do it this way: {% highlight bash %} -SD_REPO=~/code/myproject/sd config user.email-address franck@lumberjaph.net +% SD_REPO=~/code/myproject/sd config user.email-address franck@lumberjaph.net {% endhighlight %} or directly {% highlight bash %} -SD_REPO=~/code/myproject/sd config edit +% SD_REPO=~/code/myproject/sd config edit {% endhighlight %} and update the user section. -h3. sd with git +## sd with git SD provides a script for git: *git-sd*. Let's start by creating a git repository: {% highlight bash %} -mkdir ~/code/git/myuberproject -cd ~/code/git/myuberproject +% mkdir ~/code/git/myuberproject +% cd ~/code/git/myuberproject git init {% endhighlight %} @@ -85,7 +85,7 @@ SD comes with a git hook named "git-post-commit-close-ticket" (in the contrib di now we can initialize our sd database {% highlight bash %} -git-sd init +% git-sd init {% endhighlight %} git-sd will try to find which email you have choosen for this project using git config, and use the same address for it's configuration. @@ -102,20 +102,20 @@ print "hello, world\n"; then {% highlight bash %} -git add hello.pl -git commit -m "first commit" hello.pl +% git add hello.pl +% git commit -m "first commit" hello.pl {% endhighlight %} now we can create a new entry {% highlight bash %} -git-sd ticket create # create a ticket to replace print with say +% git-sd ticket create # create a ticket to replace print with say {% endhighlight %} We note the UUID for the ticket: in my exemple, the following output is produced: -bc. Created ticket 11 (92878841-d764-4ac9-8aae-cd49e84c1ffe) -Created comment 12 (ddb1e56e-87cb-4054-a035-253be4bc5855) +> Created ticket 11 (92878841-d764-4ac9-8aae-cd49e84c1ffe) +> Created comment 12 (ddb1e56e-87cb-4054-a035-253be4bc5855) so my UUID is <strong>92878841-d764-4ac9-8aae-cd49e84c1ffe</strong>. @@ -132,28 +132,28 @@ say "hello, world"; and commit it {% highlight bash %} -git commit -m "Closes 92878841-d764-4ac9-8aae-cd49e84c1ffe" hello.pl +% git commit -m "Closes 92878841-d764-4ac9-8aae-cd49e84c1ffe" hello.pl {% endhighlight %} If I do a {% highlight bash %} -git ticket show 92878841-d764-4ac9-8aae-cd49e84c1ffe +% git ticket show 92878841-d764-4ac9-8aae-cd49e84c1ffe {% endhighlight %} The ticket will be marked as closed. -h3. sd with github +## sd with github Let's say you want to track issues from a project (I will use <a href="http://plackperl.org/">Plack</a> for this exemple) that is hosted on github. {% highlight bash %} -git clone git://github.com/miyagawa/Plack.git -git-sd clone --from "github:http://github.com/miyagawa/Plack" +% git clone git://github.com/miyagawa/Plack.git +% git-sd clone --from "github:http://github.com/miyagawa/Plack" # it's the same as -git-sd clone --from "github:miyagawa/Plack" +% git-sd clone --from "github:miyagawa/Plack" # or if you don't want to be prompted for username and password each time -git-sd clone --from github:http://githubusername:apitoken@github.com/miyagawa/Plack.git +% git-sd clone --from github:http://githubusername:apitoken@github.com/miyagawa/Plack.git {% endhighlight %} It will ask for you github username and your API token, and clone the database. @@ -161,7 +161,7 @@ It will ask for you github username and your API token, and clone the database. Later, you can publish your sd database like this: {% highlight bash %} -git-sd push --to "github:http://github.com/$user/$project" +% git-sd push --to "github:http://github.com/$user/$project" {% endhighlight %} Now you can code offline with git, and open/close tickets using SD :) diff --git a/_posts/2009-12-13-riak-perl-and-kiokudb.textile b/_posts/2009-12-13-riak-perl-and-kiokudb.md index c0a51de..5065042 100644 --- a/_posts/2009-12-13-riak-perl-and-kiokudb.textile +++ b/_posts/2009-12-13-riak-perl-and-kiokudb.md @@ -1,6 +1,6 @@ --- layout: post -category: perl +summary: In which I write about Riak Perl and KiokuDB. title: Riak, Perl and KiokuDB --- @@ -10,34 +10,34 @@ So Riak is a document based database, it's key value, no sql, REST, and in Erlan One of the nice things with Riak it's that it let you defined the N, R and W value for each operation. This values are: - * N: the number of replicas of each value to store - * R: the number of replicas required to perform a read operation - * W: the number of replicas needed for a write operation +* N: the number of replicas of each value to store +* R: the number of replicas required to perform a read operation +* W: the number of replicas needed for a write operation Riak comes with library for python ruby PHP and even javascript, but not for Perl. As all these libraries are just communicating with Riak via the REST interface, I've <a href="http://github.com/franckcuny/anyevent-riak">started to write one</a> using AnyEvent::HTTP, and <a href="http://github.com/franckcuny/kiokudb-backend-riak">also a backend for KiokuDB</a>. -h3. Installing and using Riak +## Installing and using Riak If you interested in Riak, you can install it easily. First, you will need the Erlang VM. On debian, a simple {% highlight bash %} -sudo aptitude install erlang +% sudo aptitude install erlang {% endhighlight %} install everything you need. Next step is to install Riak: {% highlight bash %} -wget http://hg.basho.com/riak/get/riak-0.6.2.tar.gz -tar xzf riak-0.6.2.tar.gz -cd riak -make -export RIAK=`pwd` +% wget http://hg.basho.com/riak/get/riak-0.6.2.tar.gz +% tar xzf riak-0.6.2.tar.gz +% cd riak +% make +% export RIAK=`pwd` {% endhighlight %} Now, you can start to use it with {% highlight bash %} -./start-fresh config/riak-demo.erlenv +% ./start-fresh config/riak-demo.erlenv {% endhighlight %} or if you want to test it in cluster mode, you can write a configuration like this: @@ -59,10 +59,10 @@ or if you want to test it in cluster mode, you can write a configuration like th {riak_web_logdir, "/tmp/riak_log"}. {% endhighlight %} -Copy this config on a second server, edit it to replace the riak_hostname and riak_nodename. On the first server, start it like show previously, then on the second, with +Copy this config on a second server, edit it to replace the riak\_hostname and riak\_nodename. On the first server, start it like show previously, then on the second, with {% highlight bash %} -./start-join.sh config/riak-demo.erlenv 192.168.0.11 +% ./start-join.sh config/riak-demo.erlenv 192.168.0.11 {% endhighlight %} where the IP address it the address of the first node in your cluster. @@ -70,11 +70,11 @@ where the IP address it the address of the first node in your cluster. Let's check if everything works: {% highlight bash %} -curl -X PUT -H "Content-type: application/json" \ +% curl -X PUT -H "Content-type: application/json" \ http://192.168.0.11:8098/jiak/blog/lumberjaph/ \ -d "{\"bucket\":\"blog\",\"key\":\"lumberjaph\",\"object\":{\"title\":\"I'm a lumberjaph, and I'm ok\"},\"links\":[]}" -curl -i http://192.168.0.11:8098/jiak/blog/lumberjaph/ +% curl -i http://192.168.0.11:8098/jiak/blog/lumberjaph/ {% endhighlight %} will output (with the HTTP blabla) @@ -83,7 +83,7 @@ will output (with the HTTP blabla) {"object":{"title":"I'm a lumberjaph, and I'm ok"},"vclock":"a85hYGBgzGDKBVIsbGubKzKYEhnzWBlCTs08wpcFAA==","lastmod":"Sun, 13 Dec 2009 20:28:04 GMT","vtag":"5YSzQ7sEdI3lABkEUFcgXy","bucket":"blog","key":"lumberjaph","links":[]} {% endhighlight %} -h3. Using Riak with Perl and KiokuDB +## Using Riak with Perl and KiokuDB I need to store various things in Riak: html pages, json data, and objects using KiokuDB. I've started to write a client for Riak with AnyEvent, so I can do simple operations at the moment, (listing information about a bucket, defining a new bucket with a specific schema, storing, retriving and deleting documents). To create a client, you need to @@ -108,8 +108,8 @@ my $client = AnyEvent::Riak->new( where: - * the W and DW values define that the request returns as soon as at least W nodes have received the request, and at least DW nodes have stored it in their storage backend. - * with the R value, the request returns as soon as R nodes have responded with a value or an error. You can also set this values when calling fetch, store and delete. By default, the value is set to 2. +* the W and DW values define that the request returns as soon as at least W nodes have received the request, and at least DW nodes have stored it in their storage backend. +* with the R value, the request returns as soon as R nodes have responded with a value or an error. You can also set this values when calling fetch, store and delete. By default, the value is set to 2. So, if you wan to store a value, retrieve it, then delete it, you can do: @@ -121,8 +121,7 @@ my $fetch = $client->fetch('foo', 'bar')->recv; my $delete = $client->delete('foo', 'bar')->recv; {% endhighlight %} -If there is an error, the croak method from AnyEvent is used, so you may -prefer to do this: +If there is an error, the croak method from AnyEvent is used, so you may prefer to do this: {% highlight perl %} use Try::Tiny; diff --git a/_posts/2009-12-20-moosex-net-api.md b/_posts/2009-12-20-moosex-net-api.md new file mode 100644 index 0000000..6589ec7 --- /dev/null +++ b/_posts/2009-12-20-moosex-net-api.md @@ -0,0 +1,110 @@ +--- +layout: post +summary: In which I introduce MooseX::Net::API. +title: MooseX::Net::API +--- + +## Net::Twitter + +I've been asked for [$work](http://linkfluence.net) to write an API client for [backtype](http://www.backtype.com/), as we plan to integrate it in one of our services. A couple of days before I was reading the [Net::Twitter](http://search.cpan.org/perldoc?Net::Twitter) source code, and I've found interesting how [semifor](http://blog.questright.com/) wrote it. + +Basically, what Net::Twitter does is this: for each API method, there is a `twitter_api_method` method, where the only code for this method is an API specification of the method. Let's look at the public timeline method: + +{% highlight perl %} +twitter_api_method home_timeline => ( + description => <<'', +Returns the 20 most recent statuses, including retweets, posted by the +authenticating user and that user's friends. This is the equivalent of +/timeline/home on the Web. + + path => 'statuses/home_timeline', + method => 'GET', + params => [qw/since_id max_id count page/], + required => [], + returns => 'ArrayRef[Status]', +); +{% endhighlight %} + +The `twitter_api_method` method is exported with Moose::Exporter. It generates a sub called `home_timeline` that is added to the class. + +## MooseX::Net::API + +As I've found this approch nice and simple, I thought about writing a [little framework](http://github.com/franckcuny/moosex-net-api) to easily write API client this way. I will show how I've write a [client for the Backtype API](http://github.com/franckcuny/net-backtype) using this (I've wrote some other client for private API at works too). + +## Backtype API + +First we defined our class: + +{% highlight perl %} +package Net::Backtweet; +use Moose; +use MooseX::Net::API; +{% endhighlight %} + +MooseX::Net::API export two methods: `net_api_declare` and `net_api_method`. The first method is for all the paramters that are common for each method. For Backtype, I'll get this: + +{% highlight perl %} +net_api_declare backtweet => ( + base_url => 'http://backtweets.com', + format => 'json', + format_mode => 'append', +); +{% endhighlight %} + +This set + +* the base URL for the API +* the format is JSON +* some API use an extension at the name of the method to determine the format. "append" do this. + +Right now three formats are supported: xml json and yaml. Two modes are supported: `append` and `content-type`. + +Now the `net_api_method` method. + +{% highlight perl %} +net_api_method backtweet_search => ( + path => '/search', + method => 'GET', + params => [qw/q since key/], + required => [qw/q key/], + expected => [qw/200/], +); +{% endhighlight %} + +* path: path for the method (required) +* method: how to acces this resource (GET POST PUT and DELETE are supported) (required) +* params: list of parameters to access this resource (required) +* required: which keys are required +* expected: list of HTTP code accepted + +To use it: + +{% highlight perl %} +my $backtype = Net::Bactype->new(); +my $res = + $backtype->backtweet_search(q => "http://lumberjaph.net", key => "foo"); +warn Dump $res->{tweets}; +{% endhighlight %} + +## MooseX::Net::API implementation + +Now, what is done by the framework. The `net_api_declare` method add various attributes to the class: + +* api\_base_url: base URL of the API +* api_format: format for the query +* api\_format_mode: how the format is used (append or content-type) +* api_authentication: if the API requires authentication +* api_username: the username for accessing the resource +* api_password: the password +* api_authentication: does the resource requires to be authenticated + +It will also apply two roles, for serialization and deserialization, unless you provides your own roles for this. You can provides your own method for useragent and authentication too (the module only do basic authentication). + +For the `net_api_method` method, you can overload the authentication (in case some resources requires authentication). You can also overload the default code generated. + +In case there is an error, an MooseX::Net::API::Error will be throw. + +## Conclusion + +Right now, this module is not finished. I'm looking for suggestions (what should be added, done better, how I can improve stuff, ...). I'm not aiming to handle all possibles API, but at least most of the REST API avaible. I've uploaded a first version of +[MooseX::Net::API](http://search.cpan.org/perldoc?MooseX::Net::API) and [Net::Backtype](http://search.cpan.org/perldoc?Net::Backtype) on CPAN, and [the code](http://github.com/franckcuny/net-backtype) is also [available on GitHub](http://github.com/franckcuny/moosex-net-api). diff --git a/_posts/2009-12-20-moosex-net-api.textile b/_posts/2009-12-20-moosex-net-api.textile deleted file mode 100644 index 939122f..0000000 --- a/_posts/2009-12-20-moosex-net-api.textile +++ /dev/null @@ -1,110 +0,0 @@ ---- -layout: post -category: perl -title: MooseX::Net::API ---- - -h3. Net::Twitter - -I've been asked for "$work":http://linkfluence.net to write an API client for "backtype":http://www.backtype.com/, as we plan to integrate it in one of our services. A couple of days before I was reading the "Net::Twitter":http://search.cpan.org/perldoc?Net::Twitter source code, and I've found interesting how "semifor":http://blog.questright.com/ wrote it. - -Basically, what Net::Twitter does is this: for each API method, there is a *twitter_api_method* method, where the only code for this method is an API specification of the method. Let's look at the public timeline method: - -{% highlight perl %} -twitter_api_method home_timeline => ( - description => <<'', -Returns the 20 most recent statuses, including retweets, posted by the -authenticating user and that user's friends. This is the equivalent of -/timeline/home on the Web. - - path => 'statuses/home_timeline', - method => 'GET', - params => [qw/since_id max_id count page/], - required => [], - returns => 'ArrayRef[Status]', -); -{% endhighlight %} - -The *twitter_api_method* method is exported with Moose::Exporter. It generates a sub called *home_timeline* that is added to the class. - -h3. MooseX::Net::API - -As I've found this approch nice and simple, I thought about writing a "little framework":http://github.com/franckcuny/moosex-net-api to easily write API client this way. I will show how I've write a "client for the Backtype API":http://github.com/franckcuny/net-backtype using this (I've wrote some other client for private API at works too). - -h3. Backtype API - -First we defined our class: - -{% highlight perl %} -package Net::Backtweet; -use Moose; -use MooseX::Net::API; -{% endhighlight %} - -MooseX::Net::API export two methods: *net_api_declare* and *net_api_method*. The first method is for all the paramters that are common for each method. For Backtype, I'll get this: - -{% highlight perl %} -net_api_declare backtweet => ( - base_url => 'http://backtweets.com', - format => 'json', - format_mode => 'append', -); -{% endhighlight %} - -This set - - * the base URL for the API - * the format is JSON - * some API use an extension at the name of the method to determine the format. "append" do this. - -Right now three formats are supported: xml json and yaml. Two modes are supported: _append_ and _content-type_. - -Now the *net_api_method* method. - -{% highlight perl %} -net_api_method backtweet_search => ( - path => '/search', - method => 'GET', - params => [qw/q since key/], - required => [qw/q key/], - expected => [qw/200/], -); -{% endhighlight %} - - * path: path for the method (required) - * method: how to acces this resource (GET POST PUT and DELETE are supported) (required) - * params: list of parameters to access this resource (required) - * required: which keys are required - * expected: list of HTTP code accepted - -To use it: - -{% highlight perl %} -my $backtype = Net::Bactype->new(); -my $res = - $backtype->backtweet_search(q => "http://lumberjaph.net", key => "foo"); -warn Dump $res->{tweets}; -{% endhighlight %} - -h3. MooseX::Net::API implementation - -Now, what is done by the framework. The *net_api_declare* method add various attributes to the class: - - * api_base_url: base URL of the API - * api_format: format for the query - * api_format_mode: how the format is used (append or content-type) - * api_authentication: if the API requires authentication - * api_username: the username for accessing the resource - * api_password: the password - * api_authentication: does the resource requires to be authenticated - -It will also apply two roles, for serialization and deserialization, unless you provides your own roles for this. You can provides your own method for useragent and authentication too (the module only do basic authentication). - -For the *net_api_method* method, you can overload the authentication (in case some resources requires authentication). You can also overload the default code generated. - -In case there is an error, an MooseX::Net::API::Error will be throw. - -h3. Conclusion - -Right now, this module is not finished. I'm looking for suggestions (what should be added, done better, how I can improve stuff, ...). I'm not aiming to handle all possibles API, but at least most of the REST API avaible. I've uploaded a first version of -"MooseX::Net::API":http://search.cpan.org/perldoc?MooseX::Net::API and "Net::Backtype":http://search.cpan.org/perldoc?Net::Backtype on CPAN, and "the code":http://github.com/franckcuny/net-backtype is also "available on github":http://github.com/franckcuny/moosex-net-api. diff --git a/_posts/2009-12-21-tatsumaki-or-how-to-write-a-nice-webapp-in-less-than-two-hours.textile b/_posts/2009-12-21-tatsumaki-or-how-to-write-a-nice-webapp-in-less-than-two-hours.md index bd042e4..ade7e55 100644 --- a/_posts/2009-12-21-tatsumaki-or-how-to-write-a-nice-webapp-in-less-than-two-hours.textile +++ b/_posts/2009-12-21-tatsumaki-or-how-to-write-a-nice-webapp-in-less-than-two-hours.md @@ -1,6 +1,6 @@ --- layout: post -category: perl +summary: In which I write about Tatsumaki. title: Tatsumaki, or how to write a nice webapp in less than two hours --- diff --git a/_posts/2010-01-31-dancer-1.130.textile b/_posts/2010-01-31-dancer-1.130.md index 241deca..3990926 100644 --- a/_posts/2010-01-31-dancer-1.130.textile +++ b/_posts/2010-01-31-dancer-1.130.md @@ -1,12 +1,12 @@ --- layout: post -category: perl +summary: In which I announce Dancer 1.130 title: Dancer 1.130 --- -"Alexis":http://www.sukria.net/ ("sukria":http://search.cpan.org/~sukria/) released "Dancer":http://search.cpan.org/perldoc?Dancer 1.130 this weekend. Dancer is a small and nice web framework based on ruby's "sinatra":http://www.sinatrarb.com/. +[Alexis](http://www.sukria.net/) ([sukria](http://search.cpan.org/~sukria/)) released [Dancer](http://search.cpan.org/perldoc?Dancer) 1.130 this weekend. Dancer is a small and nice web framework based on ruby's [sinatra](http://www.sinatrarb.com/). -Dancer have few dependancies (and it doesn't depends anymore on CGI.pm). The path dispatching is done using rules declared with HTTP methods (get/post/put/delete), and they are mapped to a sub-routine which is returned as the response to the request. Sessions are supported, and two template engines (one of them is Template Toolkit) comes with the Core. Dancer::Template::MicroTemplate is also available on CPAN if you need a light template engine. +Dancer have few dependancies (and it doesn't depends anymore on CGI.pm). The path dispatching is done using rules declared with HTTP methods (get/post/put/delete), and they are mapped to a sub-routine which is returned as the response to the request. Sessions are supported, and two template engines (one of them is Template Toolkit) comes with the Core. Dancer::Template::MicroTemplate is also available on CPAN if you need a light template engine. You can easily test it with a simple script @@ -27,23 +27,17 @@ dance; and execute this script, point your browser to http://127.0.0.1:3000, and voila. -Dancer provides also a small helper to write a new application: +Dancer provides also a small helper to write a new application: `dancer -a MyApplication` -bc. dancer -a MyApplication +If you create an application with this script, an **app.psgi** file will be created. You can now execute `plackup --port 8080` -If you create an application with this script, an **app.psgi** file will be created. You can now execute +(which comes with [Plack](http://search.cpan.org/perldoc?Plack) the [Perl Web Server](http://plackperl.org/)) and test if everything works fine: `curl http://localhost:8080`. -bc. plackup --port 8080 - -(which comes with "Plack":http://search.cpan.org/perldoc?Plack the "Perl Web Server":http://plackperl.org/) and test if everything works fine: - -bc. curl http://localhost:8080 - -This release remove some components from the core and they are now available as differents CPAN distributions. Two new keyword have also been added, *header* and *prefix*. +This release remove some components from the core and they are now available as differents CPAN distributions. Two new keyword have also been added, **header** and **prefix**. If you want to read more about Dancer: - * "Dancer's documentation":http://search.cpan.org/perldoc?Dancer - * "review by xsawyerx":http://blogs.perl.org/users/sawyer_x/2010/01/i-gotz-me-a-dancer.html - * "gugod's review":http://gugod.org/2009/12/dancer.html - * "sukria's blog":http://www.sukria.net/fr/archives/tag/dancer/ + * [Dancer's documentation](http://search.cpan.org/perldoc?Dancer) + * [review by xsawyerx](http://blogs.perl.org/users/sawyer_x/2010/01/i-gotz-me-a-dancer.html) + * [gugod's review](http://gugod.org/2009/12/dancer.html) + * [sukria's blog](http://www.sukria.net/fr/archives/tag/dancer/) diff --git a/_posts/2010-02-03-sd-and-github.textile b/_posts/2010-02-03-sd-and-github.md index c95c008..e090e38 100644 --- a/_posts/2010-02-03-sd-and-github.textile +++ b/_posts/2010-02-03-sd-and-github.md @@ -1,19 +1,19 @@ --- layout: post -category: app +summary: In which I share how to use SD with GitHub title: SD and github --- -If you are using the version of <a href="http://syncwith.us/">SD</a> hosted on <a href="http://github.com/bestpractical/sd">github</a>, you can now clone and pull issues very easily. First, +If you are using the version of <a href="http://syncwith.us/">SD</a> hosted on <a href="http://github.com/bestpractical/sd">GitHub</a>, you can now clone and pull issues very easily. First, -{% highlight bash %} +{% highlight sh %} $ git config --global github.user franckcuny $ git config --global github.token myapitoken {% endhighlight %} This will set in your <strong>.gitconfig</strong> your github username and api token. Now, when you clone or pull some issues using sd: -{% highlight bash %} +{% highlight sh %} $ git sd clone --from github:sukria/Dancer {% endhighlight %} diff --git a/_posts/2010-03-07-github-explorer-a-preview.textile b/_posts/2010-03-07-github-explorer-a-preview.md index ba6e88a..1cfc3e1 100644 --- a/_posts/2010-03-07-github-explorer-a-preview.textile +++ b/_posts/2010-03-07-github-explorer-a-preview.md @@ -1,12 +1,12 @@ --- layout: post -category: graph -title: github explorer - a preview +summary: In which I share a preview of GitHub Explorer +title: GitHub Explorer - a preview --- -bq. *You may want to see the final version here: "github explorer":httpp://lumberjaph.net/blog/index.php/2010/03/25/github-explorer/* +> You may want to see the final version here: [GitHub Explorer](/github-explorer/). -For the last weeks, I've been working on the successor of "CPAN Explorer":http://cpan-explorer.org/. This time, I've decided to create some visualizations (probably 8) of the various communities using "Github":http://github.com/. I'm happy with the result, and will soon start to publish the maps (statics and interactives) with some analyses. I'm publishing two previews: the Perl community and the european developers. These are not final results. The colors, fonts, and layout may change. But the structure of the graphs will be the same. All the data was collected using the "github API":http://develop.github.com/. +For the last weeks, I've been working on the successor of [CPAN Explorer](http://cpan-explorer.org/). This time, I've decided to create some visualizations (probably 8) of the various communities using [GitHub](http://github.com/). I'm happy with the result, and will soon start to publish the maps (statics and interactives) with some analyses. I'm publishing two previews: the Perl community and the european developers. These are not final results. The colors, fonts, and layout may change. But the structure of the graphs will be the same. All the data was collected using the [GitHub API](http://develop.github.com/). <a href="http://www.flickr.com/photos/franck_/4413528529/sizes/l/">!/static/imgs/github-perl-preview.png(the Perl community on github)!</a> @@ -16,6 +16,6 @@ Each node on the graph represents a developer. When a developer "follows" anothe The second graph is a little bit more complex. It represents the European developers on github. Here the colors represent the languages used by the developers. It appears that ruby is by far the most represented language on github, as it dominates the whole map. Perl is the blue cluster at the bottom of the map, and the green snake is... Python. -Thanks to "bl0b":http://code.google.com/p/tinyaml/ for his suggestions :) +Thanks to [bl0b](http://code.google.com/p/tinyaml/) for his suggestions :) diff --git a/_posts/2010-03-19-easily-create-rest-interface-with-the-dancer-1.170.textile b/_posts/2010-03-19-easily-create-rest-interface-with-the-dancer-1.170.md index 73f04aa..e5f04f7 100644 --- a/_posts/2010-03-19-easily-create-rest-interface-with-the-dancer-1.170.textile +++ b/_posts/2010-03-19-easily-create-rest-interface-with-the-dancer-1.170.md @@ -1,21 +1,21 @@ --- layout: post -category: perl +summary: In which we see that it's easy to create REST interface with Dancer. title: Easily create REST interface with the Dancer 1.170 --- -This week, with "Alexi":http://www.sukria.net/fr/'s help, "I've been working on":http://github.com/sukria/Dancer on adding auto-(de)serialization to Dancer's request. This features will be available in the next "Dancer":http://perldancer.org/ version, the 1.170 (which will be out before April). +This week, with [Alexi](http://www.sukria.net/fr/)'s help, [I've been working on](http://github.com/sukria/Dancer) to add auto-(de)serialization to Dancer's request. This features will be available in the next [Dancer](http://perldancer.org/) version, the 1.170 (which will be out before April). The basic idea was to provides to developer a simple way to access data that have been send in a serialized format, and to properly serialize the response. At the moment, the supported serializers are : - * Dancer::Serialize::JSON - * Dancer::Serialize::YAML - * Dancer::Serialize::XML - * Dancer::Serialize::Mutable +* Dancer::Serialize::JSON +* Dancer::Serialize::YAML +* Dancer::Serialize::XML +* Dancer::Serialize::Mutable -h3. Configuring an application to use the serializer +## Configuring an application to use the serializer To activate serialization in your application: @@ -29,14 +29,14 @@ or in your configuration file: serializer: "JSON" {% endhighlight %} -h3. A simple handler +## A simple handler -Let's create a new dancer application (you can fetch the source on "github":http://github.com/franckcuny/dancerREST : +Let's create a new dancer application (you can fetch the source on [GitHub](http://github.com/franckcuny/dancerREST) : {% highlight bash %} -dancer -a dancerREST -cd dancerREST -vim dancerREST.pm +% dancer -a dancerREST +% cd dancerREST +% vim dancerREST.pm {% endhighlight %} then @@ -67,8 +67,8 @@ true; We can test if everything works as expected: {% highlight bash %} -plackup app.psgi & -curl -H "Content-Type: application/json" -X POST http://localhost:5000/api/user/ -d '{"name":"foo","id":1}' +% plackup app.psgi & +% curl -H "Content-Type: application/json" -X POST http://localhost:5000/api/user/ -d '{"name":"foo","id":1}' # => {"name":"foo","id":"1"} {% endhighlight %} @@ -98,23 +98,27 @@ get '/api/user/' => sub { If we want to fetch the full list: -bc. curl -H "Content-Type: application/json" http://localhost:5000/api/user/ +{% highlight sh %} +curl -H "Content-Type: application/json" http://localhost:5000/api/user/ # => [{"name":"foo","id":"1"}] +{% endhighlight %} and a specific user: -bc. curl -H "Content-Type: application/json" http://localhost:5000/api/user/1 +{% highlight sh %} +curl -H "Content-Type: application/json" http://localhost:5000/api/user/1 # => {"name":"foo"} +{% endhighlight %} -h3. The mutable serializer +## The mutable serializer -The mutable serializer will try to load an appropriate serializer guessing from the *Content-Type* and *Accept-Type* header. You can also overload this by adding a *content_type=application/json* parameter to your request. +The mutable serializer will try to load an appropriate serializer guessing from the **Content-Type** and **Accept-Type** header. You can also overload this by adding a **content_type=application/json** parameter to your request. While setting your serializer to mutable, your let your user decide which format they prefer between YAML, JSON and XML. -h3. And the bonus +## And the bonus -Dancer provides now a new method to the request object : *is_ajax*. Now you can write something like +Dancer provides now a new method to the request object : `is_ajax`. Now you can write something like {% highlight perl %} get '/user/:id' => sub { @@ -143,15 +147,15 @@ sub _render_user { If we want to simulate an AJAX query: {% highlight bash %} -curl -H "X-Requested-With: XMLHttpRequest" http://localhost:5000/user/1 +% curl -H "X-Requested-With: XMLHttpRequest" http://localhost:5000/user/1 {% endhighlight %} and we will obtain our result in JSON. But we can also test without the X-Requested-With: {% highlight bash %} -curl http://localhost:5000/user/1 +% curl http://localhost:5000/user/1 {% endhighlight %} and the template will be rendered. -Hope you like this new features. I've also been working on something similar for "Tatsumaki":http://github.com/miyagawa/tatsumaki. +Hope you like this new features. I've also been working on something similar for [Tatsumaki](http://github.com/miyagawa/tatsumaki). diff --git a/_posts/2010-03-25-github-explorer.textile b/_posts/2010-03-25-github-explorer.md index 891e862..95a51d3 100644 --- a/_posts/2010-03-25-github-explorer.textile +++ b/_posts/2010-03-25-github-explorer.md @@ -1,22 +1,20 @@ --- layout: post -category: graph -title: Github explorer +summary: In which I write about GitHub Explorer. +title: GitHub explorer --- -bq. *More informations about the poster are available on "this post":http://lumberjaph.net/graph/2010/04/02/github-poster.html* +> *More informations about the poster are available on [this post](http://lumberjaph.net/graph/2010/04/02/github-poster.html)* -Last year, with help from my coworkers at "Linkfluence":http://linkfluence.net/, I created two sets of maps of the "Perl":http://perl.org and "CPAN":http://search.cpan.org/'s community. For this, I collected data from CPAN to create three maps: +Last year, with help from my coworkers at [Linkfluence](http://linkfluence.net/), I created two sets of maps of the [Perl](http://perl.org) and [CPAN](http://search.cpan.org/)'s community. For this, I collected data from CPAN to create three maps: - * "dependencies between distributions":http://cpan-explorer.org/2009/07/28/new-version-of-the-distributions-map-for-yapceu/ + * [dependencies between distributions](http://cpan-explorer.org/2009/07/28/new-version-of-the-distributions-map-for-yapceu/) + * [which authors wre important in term of reliability](http://cpan-explorer.org/2009/07/28/version-of-the-authors-graph-for-yapceu/) + * [and how the websites theses authors are structured](http://cpan-explorer.org/2009/07/28/new-web-communities-map-for-yapceu/) - * "which authors wre important in term of reliability":http://cpan-explorer.org/2009/07/28/version-of-the-authors-graph-for-yapceu/ +I wanted to do something similar again, but not with the same data. So I took a look at what could be a good subject. One of the things that we saw from the map of the websites is the importance [GitHub](http://github.com/) is gaining inside the Perl community. GitHub provides a [really good API](http://develop.github.com/), so I started to play with it. - * "and how the websites theses authors are structured":http://cpan-explorer.org/2009/07/28/new-web-communities-map-for-yapceu/ - -I wanted to do something similar again, but not with the same data. So I took a look at what could be a good subject. One of the things that we saw from the map of the websites is the importance "github":http://github.com/ is gaining inside the Perl community. Github provides a "really good API":http://develop.github.com/, so I started to play with it. - -bq. This graph will be printed on a poster, size will be "A2":http://en.wikipedia.org/wiki/A2_paper_size and "A1":http://en.wikipedia.org/wiki/A1_paper_size". Please, contact me *(franck.cuny [at] linkfluence.net)* if you will be interested by one. +> This graph will be printed on a poster, size will be [A2](http://en.wikipedia.org/wiki/A2_paper_size) and [A1](http://en.wikipedia.org/wiki/A1_paper_size). Please, contact me franck.cuny [at] linkfluence.net if you will be interested by one. <img class="img_center" src="/static/imgs/general.png" title="github explorer global" /> @@ -24,42 +22,42 @@ bq. This graph will be printed on a poster, size will be "A2":http://en.wikipedi This time, I didn't aim for the Perl community only, but the whole github communities. I've created several graphs: -bq. all the graph are available "on my flickr account":http://www.flickr.com/photos/franck_/sets/72157623447857405/ +> all the graph are available "on my flickr account":http://www.flickr.com/photos/franck_/sets/72157623447857405/ - * "a graph of all languages":http://www.flickr.com/photos/franck_/4460144638/ - * "a graph of the Perl community":http://www.flickr.com/photos/franck_/4456072255/in/set-72157623447857405/ - * "a graph of the Ruby community":http://www.flickr.com/photos/franck_/4456914448/ - * "a graph of the Python community":http://www.flickr.com/photos/franck_/4456118597/in/set-72157623447857405/ - * "a graph of the PHP community":http://www.flickr.com/photos/franck_/4456830956/in/set-72157623447857405/ - * "a graph of the European community":http://www.flickr.com/photos/franck_/4456862434/in/set-72157623447857405/ - * "a graph of the Japan community":http://www.flickr.com/photos/franck_/4456129655/in/set-72157623447857405/ +* [a graph of all languages](http://www.flickr.com/photos/franck_/4460144638/) +* [a graph of the Perl community](http://www.flickr.com/photos/franck_/4456072255/in/set-72157623447857405/) +* [a graph of the Ruby community](http://www.flickr.com/photos/franck_/4456914448/) +* [a graph of the Python community](http://www.flickr.com/photos/franck_/4456118597/in/set-72157623447857405/) +* [a graph of the PHP community](http://www.flickr.com/photos/franck_/4456830956/in/set-72157623447857405/) +* [a graph of the European community](http://www.flickr.com/photos/franck_/4456862434/in/set-72157623447857405/) +* [a graph of the Japan community](http://www.flickr.com/photos/franck_/4456129655/in/set-72157623447857405/) -I think a disclaimer is important at this point. I know that github doesn't represent the whole open source community. With these maps, I don't claim to represent what the open source world looks like right now. This is not a troll about which language is best, or used at large. It's *ONLY* about github. +I think a disclaimer is important at this point. I know that github doesn't represent the whole open source community. With these maps, I don't claim to represent what the open source world looks like right now. This is not a troll about which language is best, or used at large. It's **ONLY** about GitHub. -Also, I won't provide deep analysis for each of these graphs, as I lack insight about some of those communities. So feel free to "re-use the graphs":http://franck.lumberjaph.net/graphs.tgz and provide your own analyses. +Also, I won't provide deep analysis for each of these graphs, as I lack insight about some of those communities. So feel free to [re-use the graphs](http://franck.lumberjaph.net/graphs.tgz) and provide your own analyses. -h3. Methodology +## Methodology -I didn't collect all the profiles. We (with "Guilhem":http://twitter.com/gfouetil decided to limit to peoples who are followed by at least two other people. We did the same thing for repositories, limiting to repositories which are at least forked once. Using this technique, more than 17k profiles have been collected, and nearly as many repositories. +I didn't collect all the profiles. We (with [Guilhem](http://twitter.com/gfouetil) decided to limit to peoples who are followed by at least two other people. We did the same thing for repositories, limiting to repositories which are at least forked once. Using this technique, more than 17k profiles have been collected, and nearly as many repositories. -For each profile, using the github API, I've tried to determine what the main language for this person is. And with the help of the "geonames":http://www.geonames.org, find the right country to attach the profile to. +For each profile, using the github API, I've tried to determine what the main language for this person is. And with the help of the [geonames](http://www.geonames.org), find the right country to attach the profile to. Each profile is represented by a node. For each node, the following attributes are set: - * name of the profile - * main language used by this profile, determined by github - * name of the country - * follower count - * following count - * repository count +* name of the profile +* main language used by this profile, determined by github +* name of the country +* follower count +* following count +* repository count An edge is a link between two profiles. Each time someone follows another profile, a link is created. By default, the weight of this link is 1. For each project this person forked from the target profile, the weight is incremented. -As always, I've used "Gephi":http://gephi.org/ (now in version 0.7) to create the graphs. Feel free to download the various graph files and use them with Gephi. +As always, I've used [Gephi](http://gephi.org/) (now in version 0.7) to create the graphs. Feel free to download the various graph files and use them with Gephi. -h3. Github +## Github -bq. properties of the graph: 16443 nodes / 130650 edges +> properties of the graph: 16443 nodes / 130650 edges <a href="http://www.flickr.com/photos/franck_/4460144638/" title="Github - All - by languages by franck.cuny, on Flickr"><img class="img_center" src="http://farm5.static.flickr.com/4027/4460144638_48e7d83e80.jpg" width="482" height="500" alt="Github - All - by languages" /></a> @@ -67,11 +65,11 @@ The first map is about all the languages available on github. This one was real You can't miss Ruby on this map. As github uses Ruby on Rails, it's not really surprising that the Ruby community has a particular interest on this website. The main languages on github are what we can expect, with PHP, Python, Perl, Javascript. -Some languages are not really well represented. We can assume that most Haskell projects might use darcs, and therefore are not on github. Some other languages may use other platforms, like launchpad, or sourceforge. +Some languages are not really well represented. We can assume that most Haskell projects might use darcs, and therefore are not on github. Some other languages may use other platforms, like launchpad, or sourceforge. -h3. Perl +## Perl -bq. properties of the graph: 365 nodes / 4440 edges +> properties of the graph: 365 nodes / 4440 edges <a href="http://www.flickr.com/photos/franck_/4456842344/" title="Perl community on Github by franck.cuny, on Flickr"><img src="http://farm5.static.flickr.com/4002/4456842344_06f39127a8.jpg" class="img_center" width="500" height="437" alt="Perl community on Github" /></a> @@ -83,17 +81,17 @@ One important project that is not (deliberately) represented on this graph is th To conclude about Perl, there are only 365 nodes on this graph, but no less than 4440 edges. That's nearly two times the number of edges compared to the Python community. Perl is a really well structured community, probably thanks to the CPAN, which already acted as hub for contributors. -h3. Python +## Python -<blockquote>properties of the graph: 532 nodes / 2566 edges</blockquote> +> properties of the graph: 532 nodes / 2566 edges <a href="http://www.flickr.com/photos/franck_/4456118597/" title="Python community, by country, on Github by franck.cuny, on Flickr"><img src="http://farm3.static.flickr.com/2676/4456118597_9d39f8d413.jpg" class="img_center" width="470" height="500" alt="Python community, by country, on Github" /></a> The Python community looks a lot like the Perl community, but only in the structure of the graph. If we look closely, <a href="http://www.djangoproject.com/">Django</a> is the main project that represent Python on Github, in contrast with Perl where there is no leader. Some small projects gather small community of developers. -h3. PHP +## PHP -<blockquote>properties of the graph: 301 nodes / 1071 edges</blockquote> +> properties of the graph: 301 nodes / 1071 edges <a href="http://www.flickr.com/photos/franck_/4456830956/" title="PHP community on Github by franck.cuny, on Flickr"><img src="http://farm5.static.flickr.com/4033/4456830956_ef0e8f3587.jpg" class="img_center" width="500" height="372" alt="PHP community on Github" /></a> @@ -101,9 +99,9 @@ PHP is the only community that is structured this way on Github. We can clearly <a href="http://cakephp.org/">CakePHP</a> and <a href="http://www.symfony-project.org/">Symphony</a> are the two main projects. Nearly all the projects gather an international community, at the exception of a few japanese-only projects -h3. Ruby +## Ruby -<blockquote>properties of the graph: 3742 nodes / 24571 edges</blockquote> +> properties of the graph: 3742 nodes / 24571 edges <a href="http://www.flickr.com/photos/franck_/4456914448/" title="Ruby community, by country, on Github by franck.cuny, on Flickr"><img src="http://farm5.static.flickr.com/4012/4456914448_8089c3acca.jpg" class="img_center" width="500" height="469" alt="Ruby community, by country, on Github" /></a> @@ -111,17 +109,17 @@ As for the Github graph, we can clearly see that some countries are isolated. On The main projects that gather most of the hackers are <a href="http://rubyonrails.org/">Rails</a> and <a href="http://sinatrarb.com/">Sinatra</a>, two famous web frameworks. -h3. Europe +## Europe -<blockquote>properties of the graph: 2711 nodes / 11259 edges</blockquote> +> properties of the graph: 2711 nodes / 11259 edges <a href="http://www.flickr.com/photos/franck_/4456862434/" title="Europe community on Github by franck.cuny, on Flickr"><img src="http://farm5.static.flickr.com/4062/4456862434_324e7b2c75.jpg" class="img_center" width="500" height="450" alt="Europe community on Github" /></a> This one shows interesting features. Some countries are really isolated. If we look at Spain, we can see a community of Ruby programmers, with an important connectivity between them, but no really strong connection with any foreign developers. We can clearly see the Perl community exists as only one community, and is not split by country. The same is true for Python. -h3. Japanese hackers community +## Japanese hackers community -<blockquote>properties of the graph: 559 nodes / 5276 edges</blockquote> +> properties of the graph: 559 nodes / 5276 edges <a href="http://www.flickr.com/photos/franck_/4456129655/" title="Japan community on github by franck.cuny, on Flickr"><img src="http://farm3.static.flickr.com/2800/4456129655_8c6f7f20a0.jpg" class="img_center" width="500" height="410" alt="Japan community on github" /></a> @@ -133,7 +131,7 @@ We have seen in the previous graph that the Japanese hackers are always isolated This is a really well-connected graph too. -h3. Conclusions and graphs +## Conclusions and graphs I may have not provided a deep analysis of all the graph. I don't have knowledge of most of the community outside of Perl. Feel free to <a href="http://franck.lumberjaph.net/blog/graphs.tgz">download the graph</a>, to load them in <a href="http://gephi.org/">Gephi</a>, experiment, and provides your own thoughts. diff --git a/_posts/2010-04-02-github-poster.textile b/_posts/2010-04-02-github-poster.md index b111771..b111771 100644 --- a/_posts/2010-04-02-github-poster.textile +++ b/_posts/2010-04-02-github-poster.md diff --git a/_posts/2010-04-03-more-fun-with-tatsumaki-and-plack.md b/_posts/2010-04-03-more-fun-with-tatsumaki-and-plack.md new file mode 100644 index 0000000..8781689 --- /dev/null +++ b/_posts/2010-04-03-more-fun-with-tatsumaki-and-plack.md @@ -0,0 +1,166 @@ +--- +layout: post +summary: In which I have fun with Tatsumaki. +title: More fun with Tatsumaki and Plack +--- + +Lately I've been toying a lot with [Plack](http://plackperl.org/) and two Perl web framework: [Tatsumaki](http://search.cpan.org/perldoc?Tatsumaki) and [Dancer](http://search.cpan.org/perldoc?Dancer). I use both of them for different purposes, as their features complete each other. + +## Plack + +If you don't already know what Plack is, you would want to take a look at the following Plack resources: + +* [Plack (redesigned) website](http://plackperl.org) +* [Plack documentation](http://search.cpan.org/perldoc?Plack) +* [Miyagawa's screencast](http://bulknews.typepad.com/blog/2009/11/plack-and-psgi-screencast-and-feedbacks.html) +* [Plack advent calendar](http://advent.plackperl.org/) + +> As [sukria](http://www.sukria.net/) is planning to talk about [Dancer](http://perldancer.org) during the [FPW 2010](http://journeesperl.fr/fpw2010/index.html), I will probably do a talk about Plack. + +After reading some code, I've started to write two middleware: the first one add ETag header to the HTTP response, and the second one provides a way to limit access to your application. + +### Plack::Middleware::ETag + +This middleware is really simple: for each request, an [ETag](http://en.wikipedia.org/wiki/HTTP_ETag) header is added to the response. The ETag value is a sha1 of the response's content. In case the content is a file, it works like apache, using various information from the file: inode, modified time and size. This middleware can be used with [Plack::Middleware::ConditionalGET](http://search.cpan.org/perldoc?Plack::Middleware::ConditionalGET), so the client will have the ETag information for the page, and when he will do a request next time, it will send an "if-modified" header. If the ETag is the same, a 304 response will be send, meaning the content have not been modified. This module is [available on CPAN](http://search.cpan.org/perldoc?Plack::Middleware::ETag). + +Let's see how it works. First, we create a really simple application (we call it app.psgi): + +{% highlight perl %} +#!/usr/bin/env perl +use strict; +use warnings; +use Plack::Builder; + +builder { + enable "Plack::Middleware::ConditionalGET"; + enable "Plack::Middleware::ETag"; + sub { + ['200', ['Content-Type' => 'text/html'], ['Hello world']]; + }; +}; +{% endhighlight %} + +Now we can test it: + +{% highlight bash %} +% plackup app.psgi& +% curl -D - http://localhost:5000 +HTTP/1.0 200 OK +Date: Sat, 03 Apr 2010 09:31:43 GMT +Server: HTTP::Server::PSGI +Content-Type: text/html +ETag: 7b502c3a1f48c8609ae212cdfb639dee39673f5e +Content-Length: 11 + +% curl -H "If-None-Match: 7b502c3a1f48c8609ae212cdfb639dee39673f5e" -D - http://localhost:5000 +HTTP/1.0 304 Not Modified +Date: Sat, 03 Apr 2010 09:31:45 GMT +Server: HTTP::Server::PSGI +ETag: 7b502c3a1f48c8609ae212cdfb639dee39673f5e +{% endhighlight %} + +### Plack::Middleware::Throttle + +[With this middleware](http://github.com/franckcuny/plack--middleware--throttle), you can control how many times you want to provide an access to your application. This module is not yet on CPAN, has I want to add some features, but you can get the code on github. There is four methods to control access: + +* Plack::Middleware::Throttle::Hourly: how many times in one hour someone can access the application +* P::M::T::Daily: the same, but for a day +* P::M::T::Interval: which interval the client must wait between two query +* by combining the three previous methods + +To store sessions informations, you can use any cache backend that provides `get`, `set` and `incr` methods. By default, if no backend is provided, it will store informations in a hash. You can easily modify the defaults throttling strategies by subclassing all the classes. + +Let's write another application to test it: + +{% highlight perl %} +#!/usr/bin/env perl +use strict; +use warnings; +use Plack::Builder; + +builder { + enable "Plack::Middleware::Throttle::Hourly", max => 2; + sub { + ['200', ['Content-Type' => 'text/html'], ['Hello world']]; + }; +}; +{% endhighlight %} + +then test + +{% highlight bash %} +% curl -D - http://localhost:5000/ +HTTP/1.0 200 OK +Date: Sat, 03 Apr 2010 09:57:40 GMT +Server: HTTP::Server::PSGI +Content-Type: text/html +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 1 +X-RateLimit-Reset: 140 +Content-Length: 11 + +Hello world + +% curl -D - http://localhost:5000/ +HTTP/1.0 200 OK +Date: Sat, 03 Apr 2010 09:57:40 GMT +Server: HTTP::Server::PSGI +Content-Type: text/html +X-RateLimit-Limit: 2 +X-RateLimit-Remaining: 0 +X-RateLimit-Reset: 140 +Content-Length: 11 + +Hello world + +% curl -D - http://localhost:5000/ +HTTP/1.0 503 Service Unavailable +Date: Sat, 03 Apr 2010 09:57:41 GMT +Server: HTTP::Server::PSGI +Content-Type: text/plain +X-RateLimit-Reset: 139 +Content-Length: 15 + +Over rate limit +{% endhighlight %} + +Some HTTP headers are added to the response : + +* **X-RateLimit-Limit**: how many request can be done +* **X-RateLimit-Remaining**: how many requests are available +* **X-RateLimit-Reset**: when will the counter be reseted (in seconds) + +This middleware could be a very good companion to the [Dancer REST stuff](http://www.sukria.net/fr/archives/2010/03/19/let-the-dancer-rest/) [added recently](/easily-create-rest-interface-with-the-dancer-1170/). + +## another Tatsumaki application with Plack middlewares + +To demonstrate the use of this two middleware, [I wrote a small application](http://github.com/franckcuny/feeddiscovery) with Tatsumaki. This application fetch a page, parse it to find all the feeds declared, and return a JSON with the result. + +{% highlight bash %} +% GET http://feeddiscover.tirnan0g.org/?url=http://lumberjaph.net/blog/ +{% endhighlight %} + +will return + +{% highlight javascript %} +% [{"href":"http://lumberjaph.net/blog/index.php/feed/","type":"application/rss+xml","title":"i'm a lumberjaph RSS Feed"}] +{% endhighlight %} + +This application is composed of one handler, that handle only **GET** request. The request will fetch the url given in the **url** parameter, scrap the content to find the links to feeds, and cache the result with Redis. The response is a JSON string with the informations. + +The interesting part is the app.psgi file: + +{% highlight perl %} +my $app = Tatsumaki::Application->new(['/' => 'FeedDiscovery::Handler'],); + +builder { + enable "Plack::Middleware::ConditionalGET"; + enable "Plack::Middleware::ETag"; + enable "Plack::Middleware::Throttle::Hourly", + backend => Redis->new(server => '127.0.0.1:6379',), + max => 100; + $app; +}; +{% endhighlight %} + +The application itself is really simple: for a given url, the Tatsumaki::HTTPClient fetch an url, I use [Web::Scraper](http://search.cpan.org/perldoc?Web::Scraper) to find the **link rel="alternate"** from the page, if something is found, it's stored in Redis, then a JSON string is returned to the client. diff --git a/_posts/2010-04-03-more-fun-with-tatsumaki-and-plack.textile b/_posts/2010-04-03-more-fun-with-tatsumaki-and-plack.textile deleted file mode 100644 index 7b47c0f..0000000 --- a/_posts/2010-04-03-more-fun-with-tatsumaki-and-plack.textile +++ /dev/null @@ -1,166 +0,0 @@ ---- -layout: post -category: perl -title: More fun with Tatsumaki and Plack ---- - -Lately I've been toying a lot with "Plack":http://plackperl.org/ and two Perl web framework: "Tatsumaki":http://search.cpan.org/perldoc?Tatsumaki and "Dancer":http://search.cpan.org/perldoc?Dancer. I use both of them for different purposes, as their features complete each other. - -h3. Plack - -If you don't already know what Plack is, you would want to take a look at the following Plack resources: - - * "Plack (redesigned) website":http://plackperl.org - * "Plack documentation":http://search.cpan.org/perldoc?Plack - * "miyagawa's screencast":http://bulknews.typepad.com/blog/2009/11/plack-and-psgi-screencast-and-feedbacks.html - * "Plack advent calendar":http://advent.plackperl.org/ - -.bq As "sukria":http://www.sukria.net/ is planning to talk about "Dancer":http://perldancer.org during the "FPW 2010":http://journeesperl.fr/fpw2010/index.html, I will probably do a talk about Plack. - -After reading some code, I've started to write two middleware: the first one add ETag header to the HTTP response, and the second one provides a way to limit access to your application. - -h4. Plack::Middleware::ETag - -This middleware is really simple: for each request, an "ETag":http://en.wikipedia.org/wiki/HTTP_ETag header is added to the response. The ETag value is a sha1 of the response's content. In case the content is a file, it works like apache, using various information from the file: inode, modified time and size. This middleware can be used with "Plack::Middleware::ConditionalGET":http://search.cpan.org/perldoc?Plack::Middleware::ConditionalGET, so the client will have the ETag information for the page, and when he will do a request next time, it will send an "if-modified" header. If the ETag is the same, a 304 response will be send, meaning the content have not been modified. This module is "available on CPAN":http://search.cpan.org/perldoc?Plack::Middleware::ETag. - -Let's see how it works. First, we create a really simple application (we call it app.psgi): - -{% highlight perl %} -#!/usr/bin/env perl -use strict; -use warnings; -use Plack::Builder; - -builder { - enable "Plack::Middleware::ConditionalGET"; - enable "Plack::Middleware::ETag"; - sub { - ['200', ['Content-Type' => 'text/html'], ['Hello world']]; - }; -}; -{% endhighlight %} - -Now we can test it: - -{% highlight bash %} -> plackup app.psgi& -> curl -D - http://localhost:5000 -HTTP/1.0 200 OK -Date: Sat, 03 Apr 2010 09:31:43 GMT -Server: HTTP::Server::PSGI -Content-Type: text/html -ETag: 7b502c3a1f48c8609ae212cdfb639dee39673f5e -Content-Length: 11 - -> curl -H "If-None-Match: 7b502c3a1f48c8609ae212cdfb639dee39673f5e" -D - http://localhost:5000 -HTTP/1.0 304 Not Modified -Date: Sat, 03 Apr 2010 09:31:45 GMT -Server: HTTP::Server::PSGI -ETag: 7b502c3a1f48c8609ae212cdfb639dee39673f5e -{% endhighlight %} - -h4. Plack::Middleware::Throttle - -"With this middleware":http://github.com/franckcuny/plack--middleware--throttle, you can control how many times you want to provide an access to your application. This module is not yet on CPAN, has I want to add some features, but you can get the code on github. There is four methods to control access: - - * Plack::Middleware::Throttle::Hourly: how many times in one hour someone can access the application - * P::M::T::Daily: the same, but for a day - * P::M::T::Interval: which interval the client must wait between two query - * by combining the three previous methods - -To store sessions informations, you can use any cache backend that provides *get*, *set* and *incr* methods. By default, if no backend is provided, it will store informations in a hash. You can easily modify the defaults throttling strategies by subclassing all the classes. - -Let's write another application to test it: - -{% highlight perl %} -#!/usr/bin/env perl -use strict; -use warnings; -use Plack::Builder; - -builder { - enable "Plack::Middleware::Throttle::Hourly", max => 2; - sub { - ['200', ['Content-Type' => 'text/html'], ['Hello world']]; - }; -}; -{% endhighlight %} - -then test - -{% highlight bash %} -$ curl -D - http://localhost:5000/ -HTTP/1.0 200 OK -Date: Sat, 03 Apr 2010 09:57:40 GMT -Server: HTTP::Server::PSGI -Content-Type: text/html -X-RateLimit-Limit: 2 -X-RateLimit-Remaining: 1 -X-RateLimit-Reset: 140 -Content-Length: 11 - -Hello world - -$ curl -D - http://localhost:5000/ -HTTP/1.0 200 OK -Date: Sat, 03 Apr 2010 09:57:40 GMT -Server: HTTP::Server::PSGI -Content-Type: text/html -X-RateLimit-Limit: 2 -X-RateLimit-Remaining: 0 -X-RateLimit-Reset: 140 -Content-Length: 11 - -Hello world - -$ curl -D - http://localhost:5000/ -HTTP/1.0 503 Service Unavailable -Date: Sat, 03 Apr 2010 09:57:41 GMT -Server: HTTP::Server::PSGI -Content-Type: text/plain -X-RateLimit-Reset: 139 -Content-Length: 15 - -Over rate limit -{% endhighlight %} - -Some HTTP headers are added to the response : - - * *X-RateLimit-Limit*: how many request can be done - * *X-RateLimit-Remaining*: how many requests are available - * *X-RateLimit-Reset*: when will the counter be reseted (in seconds) - -This middleware could be a very good companion to the "Dancer REST stuff":http://www.sukria.net/fr/archives/2010/03/19/let-the-dancer-rest/ "added recently":http://lumberjaph.net/blog/index.php/2010/03/19/easily-create-rest-interface-with-the-dancer-1170/. - -h3. another Tatsumaki application with Plack middlewares - -To demonstrate the use of this two middleware, "I've wrote a small application":http://github.com/franckcuny/feeddiscovery with Tatsumaki. This application fetch a page, parse it to find all the feeds declared, and return a JSON with the result. - -{% highlight bash %} - GET http://feeddiscover.tirnan0g.org/?url=http://lumberjaph.net/blog/ -{% endhighlight %} - -will return - -{% highlight javascript %} - [{"href":"http://lumberjaph.net/blog/index.php/feed/","type":"application/rss+xml","title":"i'm a lumberjaph RSS Feed"}] -{% endhighlight %} - -This application is composed of one handler, that handle only *GET* request. The request will fetch the url given in the *url* parameter, scrap the content to find the links to feeds, and cache the result with Redis. The response is a JSON string with the informations. - -The interesting part is the app.psgi file: - -{% highlight perl %} -my $app = Tatsumaki::Application->new(['/' => 'FeedDiscovery::Handler'],); - -builder { - enable "Plack::Middleware::ConditionalGET"; - enable "Plack::Middleware::ETag"; - enable "Plack::Middleware::Throttle::Hourly", - backend => Redis->new(server => '127.0.0.1:6379',), - max => 100; - $app; -}; -{% endhighlight %} - -The application itself is really simple: for a given url, the Tatsumaki::HTTPClient fetch an url, I use "Web::Scraper":http://search.cpan.org/perldoc?Web::Scraper to find the *link rel="alternate"* from the page, if something is found, it's stored in Redis, then a JSON string is returned to the client. diff --git a/_posts/2010-04-14-presque-a-redis-tatsumaki-based-message-queue.md b/_posts/2010-04-14-presque-a-redis-tatsumaki-based-message-queue.md new file mode 100644 index 0000000..cdb92c7 --- /dev/null +++ b/_posts/2010-04-14-presque-a-redis-tatsumaki-based-message-queue.md @@ -0,0 +1,87 @@ +--- +layout: post +summary: In which I introduce presque +title: presque, a Redis / Tatsumaki based message queue +--- + +[presque](http://github.com/franckcuny/presque/tree/) is a small message queue service build on top of [redis](http://code.google.com/p/redis/) and [Tatsumaki](http://search.cpan.org/perldoc?Tatsumaki). It's heavily inspired by [RestMQ](http://github.com/gleicon/restmq) and [resque](http://github.com/defunkt/resque). + +* Communications are done in JSON over HTTP +* Queues and messages are organized as REST resources +* A worker can be writen in any language that make a HTTP request and read JSON +* Thanks to redis, the queues are persistent + +## Overview + +resque need a configuration file, writen in YAML that contains the host and port for the Redis server. + +{% highlight yaml %} +redis: + host: 127.0.0.1 + port: 6379 +{% endhighlight %} + +Let's start the server: + +{% highlight bash %} +% plackup app.psgi --port 5000 +{% endhighlight %} + +The applications provides some HTTP routes: + +* **/**: a basic HTML page with some information about the queues +* **/q/**: REST API to get and post job to a queue +* **/j/**: REST API to get some information about a queue +* **/control/**: REST API to control a queue (start or stop consumers) +* **/stats/**: REST API to fetch some stats (displayed on the index page) + +Queues are created on the fly, when a job for an unknown queue is inserted. When a new job is created, the JSON send in the POST will be stored "as is". There is no restriction on the schema or the content of the JSON. + +Creating a new job simply consist to : + +{% highlight bash %} +% curl -X POST "http://localhost:5000/q/foo" -d '{"foo":"bar", "foo2":"bar" }' +{% endhighlight %} + +and fetching the job: + +{% highlight bash %} +% curl "http://localhost:5000/q/foo" +{% endhighlight %} + +When a job is fetched, it's removed from the queue. + +## A basic worker + +I've also uploaded [presque::worker](http://github.com/franckcuny/presque-worker/tree/) to GitHub. It's based on [AnyEvent::HTTP](http://search.cpan.org/perldoc?AnyEvent::HTTP) and [Moose](http://search.cpan.org/perldoc?Moose). Let's write a basic worker using this class: + +{% highlight perl %} +use strict; +use warnings; +use 5.012; # w00t + +package simple::worker; +use Moose; +extends 'presque::worker'; + +sub work { + my ($self, $job) = @_; + say "job's done"; + ...; # yadda yadda! + return; +} + +package main; +use AnyEvent; + +my $worker = + simple::worker->new(base_uri => 'http://localhost:5000', queue => 'foo'); + +AnyEvent->condvar->recv; +{% endhighlight %} + +A worker have to extends the presque::worker class, and implement the method *work*. When the object is created, the class check if this method is avalaible. You can also provide a `fail` method, which will be called when an error occur. + +## The future + +I plan to add support for [websocket](http://en.wikipedia.org/wiki/WebSocket), and probably [XMPP](http://en.wikipedia.org/wiki/Xmpp). More functionalities to the worker too: logging, forking, handling many queues, ... I would like to add priorities to queue also, and maybe scheluding job for a given date (not sure if it's feasable with Redis). diff --git a/_posts/2010-04-14-presque-a-redis-tatsumaki-based-message-queue.textile b/_posts/2010-04-14-presque-a-redis-tatsumaki-based-message-queue.textile deleted file mode 100644 index c23d43d..0000000 --- a/_posts/2010-04-14-presque-a-redis-tatsumaki-based-message-queue.textile +++ /dev/null @@ -1,87 +0,0 @@ ---- -layout: post -category: perl -title: presque, a Redis / Tatsumaki based message queue ---- - -"presque":http://github.com/franckcuny/presque/tree/ is a small message queue service build on top of "redis":http://code.google.com/p/redis/ and "Tatsumaki":http://search.cpan.org/perldoc?Tatsumaki. It's heavily inspired by "RestMQ":http://github.com/gleicon/restmq and "resque":http://github.com/defunkt/resque. - - * Communications are done in JSON over HTTP - * Queues and messages are organized as REST resources - * A worker can be writen in any language that make a HTTP request and read JSON - * Thanks to redis, the queues are persistent - -h3. Overview - -resque need a configuration file, writen in YAML that contains the host and port for the Redis server. - -{% highlight yaml %} -redis: - host: 127.0.0.1 - port: 6379 -{% endhighlight %} - -Let's start the server: - -{% highlight bash %} -$ plackup app.psgi --port 5000 -{% endhighlight %} - -The applications provides some HTTP routes: - - * */*: a basic HTML page with some information about the queues - * */q/*: REST API to get and post job to a queue - * */j/*: REST API to get some information about a queue - * */control/*: REST API to control a queue (start or stop consumers) - * */stats/*: REST API to fetch some stats (displayed on the index page) - -Queues are created on the fly, when a job for an unknown queue is inserted. When a new job is created, the JSON send in the POST will be stored "as is". There is no restriction on the schema or the content of the JSON. - -Creating a new job simply consist to : - -{% highlight bash %} -curl -X POST "http://localhost:5000/q/foo" -d '{"foo":"bar", "foo2":"bar" }' -{% endhighlight %} - -and fetching the job: - -{% highlight bash %} -curl "http://localhost:5000/q/foo" -{% endhighlight %} - -When a job is fetched, it's removed from the queue. - -h3. A basic worker - -I've also uploaded "presque::worker":http://github.com/franckcuny/presque-worker/tree/ to github. It's based on "AnyEvent::HTTP":http://search.cpan.org/perldoc?AnyEvent::HTTP and "Moose":http://search.cpan.org/perldoc?Moose. Let's write a basic worker using this class: - -{% highlight perl %} -use strict; -use warnings; -use 5.012; # w00t - -package simple::worker; -use Moose; -extends 'presque::worker'; - -sub work { - my ($self, $job) = @_; - say "job's done"; - ...; # yadda yadda! - return; -} - -package main; -use AnyEvent; - -my $worker = - simple::worker->new(base_uri => 'http://localhost:5000', queue => 'foo'); - -AnyEvent->condvar->recv; -{% endhighlight %} - -A worker have to extends the presque::worker class, and implement the method *work*. When the object is created, the class check if this method is avalaible. You can also provide a **fail** method, which will be called when an error occur. - -h3. The future - -I plan to add support for "websocket":http://en.wikipedia.org/wiki/WebSocket, and probably "XMPP":http://en.wikipedia.org/wiki/Xmpp. More functionalities to the worker too: logging, forking, handling many queues, ... I would like to add priorities to queue also, and maybe scheluding job for a given date (not sure if it's feasable with Redis). diff --git a/_posts/2010-04-19-the-dancer-ecosystem.textile b/_posts/2010-04-19-the-dancer-ecosystem.md index 6f3f7d0..d10d394 100644 --- a/_posts/2010-04-19-the-dancer-ecosystem.textile +++ b/_posts/2010-04-19-the-dancer-ecosystem.md @@ -1,12 +1,12 @@ --- layout: post -category: perl +summary: In which we look at Dancer's ecosystem. title: The Dancer Ecosystem --- Even though it's still a young project, an active community is starting to emerge around <a href="http://search.cpan.org/perldoc?Dancer">Dancer</a>. Some modules start to appear on CPAN and github to add functionalities, or to extend existing ones. -h3. Templates +## Templates By default, Dancer comes with support for two templating systems: <a href="http://search.cpan.org/dist/Template-Toolkit/">Template Toolkit</a> and Dancer::Template::Simple, a small templating engine written by <a href="http://www.sukria.net/">sukria</a>. But support for other templating systems are available: @@ -18,7 +18,7 @@ By default, Dancer comes with support for two templating systems: <a href="http: * <a href="http://search.cpan.org/perldoc?Dancer::Template::Mason">Dancer::Template::Mason</a> by Yanick Champoux * <a href="http://search.cpan.org/perldoc?Dancer::Template::Haml">Dancer::Template::Haml</a> by David Moreno -h3. Logger +## Logger Out of the box, Dancer only has a simple logging system to write to file, but more logging sytems are available: @@ -30,7 +30,7 @@ Out of the box, Dancer only has a simple logging system to write to file, but mo The last one is for writing directly your log message via <ah href="http://search.cpan.org/perldoc?Plack">Plack</a>. You can use a middleware like <a href="http://search.cpan.org/~miyagawa/Plack-0.9932/lib/Plack/Middleware/LogDispatch.pm">P::M::LogDispatch</a> or <a href="http://search.cpan.org/~miyagawa/Plack-0.9932/lib/Plack/Middleware/Log4perl.pm">P::M::Log4perl</a> to handle logs for your application. Even better, if you use <a href="http://github.com/miyagawa/Plack-Middleware-ConsoleLogger">P::M::ConsoleLogger</a>, you can have logs from your Dancer application in your javascript console. -h3. Debug +## Debug To debug your application with Plack, you can use the awesome <a href="http://search.cpan.org/perldoc?Plack::Middleware::Debug">Plack::Middleware::Debug</a>. I've writen <a href="http://github.com/franckcuny/dancer-debug">Dancer::Debug</a> (which requires my fork of <a href="http://github.com/franckcuny/Plack-Middleware-Debug">P::M::Debug</a>), a middleware that add panels, with specific informations for Dancer applications. @@ -53,11 +53,11 @@ $handler = builder { }; {% endhighlight %} -h3. Plugins +## Plugins Dancer has support for plugins since a few version. There is not a lot of plugins at the moment, but this will soon improve. Plugins support is one of the top priorities for the 1.2 release. -h4. Dancer::Plugin::REST +### Dancer::Plugin::REST <a href="http://github.com/sukria/Dancer-Plugin-REST">This one is really nice</a>. This plugin, used with the serialization stuff, allow you to write easily REST application. @@ -79,7 +79,7 @@ And you got the following routes: * PUT /user/:id * PUT /user/:id.:format -h4. Dancer::Plugin::Database +### Dancer::Plugin::Database <a href="http://github.com/bigpresh/Dancer-Plugin-Database">This plugin</a>, by bigpresh, add the <strong>database</strong> keyword to your app. @@ -96,12 +96,12 @@ get '/widget/view/:id' => sub { }; {% endhighlight %} -h4. Dancer::Plugin::SiteMap +### Dancer::Plugin::SiteMap <a href="http://search.cpan.org/perldoc?Dancer::Plugin::SiteMap">With this plugin</a>, by James Ronan, a <a href="http://en.wikipedia.org/wiki/Sitemap">sitemap</a> of your application is created. <blockquote>Plugin module for the Dancer web framwork that automagically adds sitemap routes to the webapp. Currently adds /sitemap and /sitemap.xml where the former is a basic HTML list and the latter is an XML document of URLS.</blockquote> -h3. you can help! :) +## you can help! :) There is still a lot of stuff to do. Don't hesitate to come on #dancer@irc.perl.org to discuss ideas or new features that you would like. diff --git a/_posts/2010-06-10-moosex-net-api-update.textile b/_posts/2010-06-10-moosex-net-api-update.md index 155b691..452038e 100644 --- a/_posts/2010-06-10-moosex-net-api-update.textile +++ b/_posts/2010-06-10-moosex-net-api-update.md @@ -1,10 +1,10 @@ --- layout: post -category: perl +summary: In which I write an update about MooseX::Net::API title: Moosex::Net::API - update --- -"MooseX::Net::API":http://github.com/franckcuny/moosex-net-api is a module to help writing clients for RESTful (and even non-RESTful) WebServices: +[MooseX::Net::API](http://github.com/franckcuny/moosex-net-api) is a module to help writing clients for RESTful (and even non-RESTful) WebServices: {% highlight perl %} package my::api; @@ -50,21 +50,21 @@ expected: 200, 404 It's not yet complete, but a new version will be available soon on CPAN. Here is a list of some more features I plan to add quickly: - * better internal API - * better authorization support (OAuth!) - * add more methods to provide better introspection - * better unserialization - * more tests and better documentation - * generate POD via a PODWeaver plugin ? - * plugins ? - * renaming ? (not sure it really fits in the MooseX:: namespace) +* better internal API +* better authorization support (OAuth!) +* add more methods to provide better introspection +* better unserialization +* more tests and better documentation +* generate POD via a PODWeaver plugin ? +* plugins ? +* renaming ? (not sure it really fits in the MooseX:: namespace) -h3. http-console +## http-console -I've also started "*Net::HTTP::Console*":http://github.com/franckcuny/net-http-console. It's inspired by "http-console":http://github.com/cloudhead/http-console. It relies on MX::Net::API, and can use any libraries written with MX::Net::API, as well as any *raw* RESTful API. As an example, let's use it on twitter. +I've also started [Net::HTTP::Console](http://github.com/franckcuny/net-http-console). It's inspired by [http-console](http://github.com/cloudhead/http-console). It relies on MX::Net::API, and can use any libraries written with MX::Net::API, as well as any **raw** RESTful API. As an example, let's use it on twitter. {% highlight bash %} -http-console --url http://api.twitter.com --format json +% http-console --url http://api.twitter.com --format json http://127.0.0.1:5984> GET /1/statuses/public_timeline [ @@ -91,21 +91,21 @@ You can call any method from the twitter API (at the exception of the ones that You can also use it with any library that uses MX::Net::API: {% highlight bash %} -http-console --lib Net::Backtweet +% http-console --lib Net::Backtweet http://api.backtweet.com> help command available commands: -- tweets_by_url -- stats_by_url -- good_tweets_by_url +- tweets\_by_url +- stats\_by_url +- good\_tweets_by_url -http://api.backtype.com> help command tweets_by_url +http://api.backtype.com> help command tweets\_by_url name: tweets_by_url description: Retrieve tweets that link to a given URL, whether the links are shortened or unshortened. method: GET path: /tweets/search/links -http://api.backtype.com> stats_by_url {"q":"http://lumberjaph.net","key":s3kr3t"} +http://api.backtype.com> stats\_by_url {"q":"http://lumberjaph.net","key":s3kr3t"} { "tweetcount" : 388 } @@ -122,16 +122,12 @@ Arguments to the methods are serialized in JSON format. Not sure if it's the bes } {% endhighlight %} -It's far from being complete at the moment, but I will extend it quickly. Right now, you can define global headers, and get help for all methods in your MX::Net::API library. Authentication is on top of my priority list, as is alias creation, so instead of doing (on a non-moosex::net::api lib): +It's far from being complete at the moment, but I will extend it quickly. Right now, you can define global headers, and get help for all methods in your MX::Net::API library. Authentication is on top of my priority list, as is alias creation, so instead of doing (on a non-moosex::net::api lib): `GET /users/` you will do: -bc. GET /users/ - -you will do: - -bc. alias users/:country as users +> alias users/:country as users then: -bc. users {"country":"france"} +> users {"country":"france"} -(and yes, I've switched from wordpress to "blawd":http://github.com/perigrin/blawd) +(and yes, I've switched from wordpress to [blawd](http://github.com/perigrin/blawd)). diff --git a/_posts/2010-06-13-fpw2010-summary.md b/_posts/2010-06-13-fpw2010-summary.md new file mode 100644 index 0000000..2883242 --- /dev/null +++ b/_posts/2010-06-13-fpw2010-summary.md @@ -0,0 +1,29 @@ +--- +layout: post +summary: In which I summarize my FPW experience. +title: FPW 2010 summary +--- + +First, no more [welsh](http://en.wikipedia.org/wiki/Welsh_rarebit). Ever. + +Even if Calais was not the easiest destination for every one, it was a really fun and intersting two days. I met nice folks, had great discussions and drink good beers. + +For those who missed this workshop, a short summary for you: + + * had excellent discussions with [sukria](http://www.sukria.net/fr/) about the future of [Dancer](http://github.com/sukria/dancer) + * fun facts about time zone and unicode with [maddingue](http://twitter.com/maddingue), [rgs](http://twitter.com/octoberequus) and [fperrad](http://github.com/fperrad) + * interesting chat with fperrad about lua and parrot + * convinced more people that Plack *is* the future for Perl Web development + * beers, and more beers with sukria, jerome, [Arnaud](http://twitter.com/ephoz), rgs, [Camille](http://twitter.com/cmaussan) and [Stephane](http://twitter.com/straux). + * talked with [Marc](http://www.tinybox.net/) about Plack, Dancer, and other stuff + +!http://farm5.static.flickr.com/4045/4695068097_1193f8c4d6.jpg(diner)! + +My slides are available online (in french): + + * [GitHub explorer](http://franck.lumberjaph.net/blog/slides/github-explorer.pdf) + * [Introduction to plack](http://franck.lumberjaph.net/slides/introduction_a_plack.pdf) + +And to finish, two other summaries: [sukria's one](http://www.sukria.net/fr/archives/2010/06/12/french-perl-workshop-2010-report/) and [twitter's timeline](http://twitter.com/#search?q=%23fpw2010); and [some pictures on flickr](http://www.flickr.com/photos/franck_/sets/72157624263416548/). + +**Thanks to Laurent and Sebastien for their hard work on organizing this conference.** diff --git a/_posts/2010-06-13-fpw2010-summary.textile b/_posts/2010-06-13-fpw2010-summary.textile deleted file mode 100644 index 9c3e1f3..0000000 --- a/_posts/2010-06-13-fpw2010-summary.textile +++ /dev/null @@ -1,29 +0,0 @@ ---- -layout: post -category: conference -title: FPW 2010 summary ---- - -First, no more "welsh":http://en.wikipedia.org/wiki/Welsh_rarebit. Ever. - -Even if Calais was not the easiest destination for every one, it was a really fun and intersting two days. I met nice folks, had great discussions and drink good beers. - -For those who missed this workshop, a short summary for you: - - * had excellent discussions with "sukria":http://www.sukria.net/fr/ about the future of "Dancer":http://github.com/sukria/dancer - * fun facts about time zone and unicode with "maddingue":http://twitter.com/maddingue, "rgs":http://twitter.com/octoberequus and "fperrad":http://github.com/fperrad - * interesting chat with fperrad about lua and parrot - * convinced more people that Plack *is* the future for Perl Web development - * beers, and more beers with sukria, jerome, "arnaud":http://twitter.com/ephoz, rgs, "camille":http://twitter.com/cmaussan and "stephane":http://twitter.com/straux. - * talked with "Marc":http://www.tinybox.net/ about Plack, Dancer, and other stuff - -!http://farm5.static.flickr.com/4045/4695068097_1193f8c4d6.jpg(diner)! - -My slides are available online (in french): - - * github explorer: "slideshare":http://www.slideshare.net/franckcuny/github-explorer and "PDF":http://franck.lumberjaph.net/blog/slides/github-explorer.pdf - * introduction to plack: "slideshare":http://www.slideshare.net/franckcuny/introduction-a-plack and "PDF":http://franck.lumberjaph.net/slides/introduction_a_plack.pdf - -And to finish, two other summaries: "sukria's one":http://www.sukria.net/fr/archives/2010/06/12/french-perl-workshop-2010-report/ and "twitter's timeline":http://twitter.com/#search?q=%23fpw2010; and "some pictures on flickr.":http://www.flickr.com/photos/franck_/sets/72157624263416548/ - -*Thanks to laurent and sebastien for their hard work on organizing this conference.* diff --git a/_posts/2010-06-20-dancer-meeting.textile b/_posts/2010-06-20-dancer-meeting.md index e7423cd..935d998 100644 --- a/_posts/2010-06-20-dancer-meeting.textile +++ b/_posts/2010-06-20-dancer-meeting.md @@ -1,16 +1,16 @@ --- layout: post -category: perl +summary: In which I share some notes from June's Dancer meeting. title: Monthly Dancer meeting --- -I've been contributing to Dancer for a few months now, and the discussions occurs mainly on IRC (irc.perl.org, #dancer) or on "the Mailing List":http://lists.perldancer.org/cgi-bin/listinfo/dancer-users. +I've been contributing to Dancer for a few months now, and the discussions occurs mainly on IRC (irc.perl.org, #dancer) or on [the Mailing List](http://lists.perldancer.org/cgi-bin/listinfo/dancer-users). Last weekend, I had the occasion to meet sukria during the French Perl Workshop. This has been really productive, we had the occasion to talk about Plack, the templating system, websocket, ... and I really think we should have met before. It was also the occasion to meet another contributor, eiro, with whom I've been able to share some knowledge about Plack. -During the workshop, I made a talk about Plack and the Middlewares. The direct result of this is "the last feature added by sukria":http://github.com/sukria/Dancer/commit/5ee83a5206e08256d7326f92c2f2f62c5e035ba9#L0R440: middlewares can be set in the configuration file of your Dancer application, and will be loaded for you. +During the workshop, I made a talk about Plack and the Middlewares. The direct result of this is [the last feature added by sukria](http://github.com/sukria/Dancer/commit/5ee83a5206e08256d7326f92c2f2f62c5e035ba9#L0R440): middlewares can be set in the configuration file of your Dancer application, and will be loaded for you. -The next release of Dancer won't generate an *app.psgi* file anymore, so you will only need to edit your environment file (like _deployement.yaml_), and add the following configuration: +The next release of Dancer won't generate an **app.psgi** file anymore, so you will only need to edit your environment file (like **deployement.yaml**), and add the following configuration: {% highlight yaml %} warnings: 1 @@ -24,8 +24,8 @@ plack_middlewares: - Dancer::Settings {% endhighlight %} -and your application will load some "Plack::Middleware::Debug":http://search.cpan.org/perldoc?Plack::Middleware::Debug and "Dancer::Debug":http://search.cpan.org/dist/Dancer-Debug/ panels. +and your application will load some [Plack::Middleware::Debug](http://search.cpan.org/perldoc?Plack::Middleware::Debug) and [Dancer::Debug](http://search.cpan.org/dist/Dancer-Debug/) panels. Sukria has suggested a monthly drinkup meeting for people in/near Paris, to talk about Dancer and Plack, in a pub or another place where we can bring a laptop, have some beers and share idea/codes and other exchange technicals thoughts. -I hope to meet more Dancer developers and users in a near future (sawyer at Pise maybe ?). +I hope to meet more Dancer developers and users in a near future (sawyer at Pisa maybe ?). diff --git a/_posts/2010-06-25-presque-new_features.md b/_posts/2010-06-25-presque-new_features.md new file mode 100644 index 0000000..de94d65 --- /dev/null +++ b/_posts/2010-06-25-presque-new_features.md @@ -0,0 +1,41 @@ +--- +layout: post +summary: In which I show some new features for presque. +title: presque +--- + +I've added a few new features to [presque](http://github.com/franckcuny/presque). + +[presque](http://lumberjaph.net/presque-a-redis---tatsumaki-based-message-queue.html) is a persistant job queue based on [Redis](http://github.com/antirez/redis) and [Tatsumaki](http://github.com/miyagawa/Tatsumaki). + +A short list of current features implemented: + +* jobs are JSON object +* possibility to stop/start queues +* jobs can be delayed to run after a certain date in the future +* workers can register themself, doing this, you can know when a worker started, what he have done, ... +* statistics about queue, jobs, and workers +* possible to store and fetch jobs in batch +* a job can be unique + +The REST interface is simple, and there is only a few methods. It's fast (I will provide numbers soon from our production environment), and workers can be implemented in any languages. + +There have been a lot of refactoring lately. The main features missing right now are a simple HTML interface that will display various informations, pulling the data from the REST API (hint : if someone want to help to design this one ... :) ), websocket (sending a message to all workers). + +There is a Perl client to the REST API: [net::presque](http://github.com/franckcuny/net-presque), that you can use with [net::http::console](http://github.com/franckcuny/net-http-console): + +{% highlight bash %} +% perl bin/http-console --api_lib Net::Presque --url http://localhost:5000 +http://localhost:5000> fetch_job {"queue_name":"twitter_stream"} +{ + "text" : "Australias new prime minister - julia gillard is our 27th prime minister.", + "user" : "Lov3LifeAlways" +} +{% endhighlight %} + +I've also wrote a better [worker for Perl](http://github.com/franckcuny/presque-worker). It's a Moose::Role that you apply to your class. You need to write a **work** method, and your done. This worker handle retries, provide a logger, ... As for [resque](http://github.com/defunkt/resque), there is two dispatcher: + +* normal : the worker grab a job, process it, then ask for the next job +* fork : the worker grab a job, fork, let the child do the job and exit, while the parent ask for the next job. As resque says, "Resque assumes chaos". And me too, I like (ordered) chaos + +I hope to finish the documentation and to writes one or two more workers as example (maybe in Python and javascript/node.js) soon to be able to tag a first version, and to collect some info about how many jobs have been processed at work (we use it to do url resolution and collect twitter data among few other things). Although I'm not sure I will release it to CPAN. diff --git a/_posts/2010-06-25-presque-new_features.textile b/_posts/2010-06-25-presque-new_features.textile deleted file mode 100644 index 94d86f0..0000000 --- a/_posts/2010-06-25-presque-new_features.textile +++ /dev/null @@ -1,41 +0,0 @@ ---- -layout: post -category: perl -title: presque ---- - -I've added a few new features to "presque":http://github.com/franckcuny/presque. - -"presque":http://lumberjaph.net/presque-a-redis---tatsumaki-based-message-queue.html is a persistant job queue based on "Redis":http://github.com/antirez/redis and "Tatsumaki":http://github.com/miyagawa/Tatsumaki. - -A short list of current features implemented: - - * jobs are JSON object - * possibility to stop/start queues - * jobs can be delayed to run after a certain date in the future - * workers can register themself, doing this, you can know when a worker started, what he have done, ... - * statistics about queue, jobs, and workers - * possible to store and fetch jobs in batch - * a job can be unique - -The REST interface is simple, and there is only a few methods. It's fast (I will provide numbers soon from our production environment), and workers can be implemented in any languages. - -There have been a lot of refactoring lately. The main features missing right now are a simple HTML interface that will display various informations, pulling the data from the REST API (hint : if someone want to help to design this one ... :) ), websocket (sending a message to all workers). - -There is a Perl client to the REST API: "net::presque":http://github.com/franckcuny/net-presque, that you can use with "net::http::console":http://github.com/franckcuny/net-http-console: - -{% highlight bash %} -perl bin/http-console --api_lib Net::Presque --url http://localhost:5000 -http://localhost:5000> fetch_job {"queue_name":"twitter_stream"} -{ - "text" : "Australias new prime minister - julia gillard is our 27th prime minister.", - "user" : "Lov3LifeAlways" -} -{% endhighlight %} - -I've also wrote a better "worker for Perl":http://github.com/franckcuny/presque-worker. It's a Moose::Role that you apply to your class. You need to write a *work* method, and your done. This worker handle retries, provide a logger, ... As for "resque":http://github.com/defunkt/resque, there is two dispatcher: - - * normal : the worker grab a job, process it, then ask for the next job - * fork : the worker grab a job, fork, let the child do the job and exit, while the parent ask for the next job. As resque says, "Resque assumes chaos". And me too, I like (ordered) chaos - -I hope to finish the documentation and to writes one or two more workers as example (maybe in Python and javascript/node.js) soon to be able to tag a first version, and to collect some info about how many jobs have been processed at work (we use it to do url resolution and collect twitter data among few other things). Although I'm not sure I will release it to CPAN. diff --git a/_posts/2010-06-30-github-poster-to-ship.textile b/_posts/2010-06-30-github-poster-to-ship.md index a65b4dd..c11ba77 100644 --- a/_posts/2010-06-30-github-poster-to-ship.textile +++ b/_posts/2010-06-30-github-poster-to-ship.md @@ -1,6 +1,6 @@ --- layout: post -category: graph +summary: In which I try to see if anyone is interested in buying a poster. title: Github Communities Posters for shipping --- @@ -10,5 +10,5 @@ I've finally found a printer that can do international shipping for reasonable c <a href="http://fr.linkfluence.net/posters/"><img style="border: 1px solid #000;" src="http://fr.linkfluence.net/wp-content/images/atlas/github_thumb.png" /></a> </center> -So, if you're interested by a poster in A1 size, "send me a mail":/contact/. I'll need at least 15 persons to be able to do this. So contact me, and I will keep you informed. +So, if you're interested by a poster in A1 size, send me a mail. I'll need at least 15 persons to be able to do this. So contact me, and I will keep you informed. diff --git a/_posts/2010-09-10-dancer-summer-of-code.textile b/_posts/2010-09-10-dancer-summer-of-code.md index 07b0f52..a8bc7f7 100644 --- a/_posts/2010-09-10-dancer-summer-of-code.textile +++ b/_posts/2010-09-10-dancer-summer-of-code.md @@ -1,12 +1,12 @@ --- layout: post -category: perl +summary: In which I talk about Dancer's Summer of Code title: Dancer's Summer of Code --- -h3. Middleware +### Middleware -After the "French Perl Workshop":http://journeesperl.fr/fpw2010/, we decided to focus our efforts on bringing middleware into Dancer. As the .psgi script is now obsolete, we wanted to simplify the middleware configuration for users not familiar with Plack. +After the [French Perl Workshop](http://journeesperl.fr/fpw2010/), we decided to focus our efforts on bringing middleware into Dancer. As the .psgi script is now obsolete, we wanted to simplify the middleware configuration for users not familiar with Plack. It's now possible to load a middleware by adding it to your configuration: @@ -20,15 +20,15 @@ plack_middlewares: - Timer {% endhighlight %} -h3. YAPC::Eu 2010 +### YAPC::Eu 2010 -During YAPC::Eu, I've been happy to meet with "squeeks":http://github.com/squeeks, "sawyer":http://blogs.perl.org/users/sawyer_x/ and "Martin Berends":http://github.com/mberends. Sadly, we didn't have much time to talk and do some coding. +During YAPC::EU, I've been happy to meet with [squeeks](http://github.com/squeeks), [sawyer](http://blogs.perl.org/users/sawyer_x/) and [Martin Berends](http://github.com/mberends). Sadly, we didn't have much time to talk and do some coding. -I had already met Martin during the FPW, where he started to port Dancer to Perl6. His first objective is to have "HTTP::Server::Simple":http://github.com/mberends/http-server-simple work. I was really impressed with his works; if I manage to find some spare time soon, I will join his effort. +I had already met Martin during the FPW, where he started to port Dancer to Perl6. His first objective is to have [HTTP::Server::Simple](http://github.com/mberends/http-server-simple) work. I was really impressed with his works; if I manage to find some spare time soon, I will join his effort. -h3. Dancer's application +### Dancer's application -In august, "alexis":http://www.sukria.net/ brought a big effort to refactor the core of Dancer, to add the possibility to "plug" components to your application. This required a lot of rewriting, but in the meantime, we added more tests to ensure nothing would break. +In august, [Alexis](http://www.sukria.net/) brought a big effort to refactor the core of Dancer, to add the possibility to "plug" components to your application. This required a lot of rewriting, but in the meantime, we added more tests to ensure nothing would break. With this feature, you can do the following: @@ -58,9 +58,9 @@ get '/' => sub { }; {% endhighlight %} -Now you can request */* and */forum*. The before filter declared in the package *myapp::forum* will be executed when the */forum* path is matched, and the filter in *myapp::blog* will be executed for */*. +Now you can request **/** and **/forum**. The before filter declared in the package **myapp::forum** will be executed when the **/forum** path is matched, and the filter in **myapp::blog** will be executed for **/**. -h3. QA +### QA The weekend following the YAPC::EU, we held a small hackaton/QA day on irc. Not many people were present, but we managed to achieve some results: @@ -71,11 +71,11 @@ The weekend following the YAPC::EU, we held a small hackaton/QA day on irc. Not Today our code average is over 92%, and we have more than 1200 tests. -With the new hook system, two new keywords have been added: *before_template* and *after*. They work as the *before* keyword, except the *before_template* is executed before sending the tokens to the template, so you can modify them (a small example can be found in the "Dancer::Plugin::i18n":http://github.com/franckcuny/dancer-plugin-i18n). The *after* is executed before the response is sent to the user. +With the new hook system, two new keywords have been added: **before_template** and **after**. They work as the **before** keyword, except the **before_template** is executed before sending the tokens to the template, so you can modify them (a small example can be found in the [Dancer::Plugin::i18n](http://github.com/franckcuny/dancer-plugin-i18n)). The **after** is executed before the response is sent to the user. Sukria has also set up an autobuild system for our two main branches. Every 15 minutes, the test suite is executed when there is a new commit, and builds a report. Code coverage is also measured, so we can always know the state of our various development cycles. -h3. WebSocket +### WebSocket This is the question that came back from time to time: when/will Dancer support websocket ? @@ -85,9 +85,9 @@ We investigated various ways to do this: * writing our own implementation * ... -I didn't want to write a websocket implementation for Dancer, as the spec are not yet final and it's not easy to do. Thanks to "clkao":http://github.com/clkao, we didn't have to care about all this, as he already wrote a Plack middleware for this: "Web::Hippie":http://search.cpan.org/perldoc?Web::Hippie::Pipe. +I didn't want to write a websocket implementation for Dancer, as the spec are not yet final and it's not easy to do. Thanks to [clkao](http://github.com/clkao), we didn't have to care about all this, as he already wrote a Plack middleware for this: [Web::Hippie](http://search.cpan.org/perldoc?Web::Hippie::Pipe). -So, what we did, is to use this middleware and add some syntactic sugar so people can use it easily in their applications. A small application is available "here":http://github.com/franckcuny/dancer-chat. +So, what we did, is to use this middleware and add some syntactic sugar so people can use it easily in their applications. A small application is available [here](http://github.com/franckcuny/dancer-chat). This is not yet complete, it's only available in the 'devel' branch, and subject to change. A small code sample: @@ -113,16 +113,16 @@ websocket '/message' => sub { As you can see, a lot of stuff can be improved quite easily in terms of syntax. -h3. Deployment +### Deployment -We're also in the process of reworking our current Deployment documentation. Lots of people are trying to deploy Dancer using various configurations, and not all are well documented, or don't work as expected. If you use Dancer, and have deployed an application in a way not documened in our Deployement documentation, please join us on irc (#dancer on irc.perl.org) or contact us on the mailing list, or even better, send us a patch, so we can improve this part. +We're also in the process of reworking our current Deployment documentation. Lots of people are trying to deploy Dancer using various configurations, and not all are well documented, or don't work as expected. If you use Dancer, and have deployed an application in a way not documened in our Deployement documentation, please join us on irc (#dancer on irc.perl.org) or contact us on the mailing list, or even better, send us a patch, so we can improve this part. -h3. Future +### Future There is also our next Dancer's meeting meeting to organize, at the end of Septembre. In October will take place the 2nd OSDC.fr, where I will talk about Plack, and alexis will present Dancer. -I want to thank my company ("linkfluence":http://linkfluence.net) and my boss ("camille":http://twitter.com/cmaussan) for giving me time to code on Dancer at work. +I want to thank my company ([Linkfluence](http://linkfluence.net)) and my boss ([Camille](http://twitter.com/cmaussan)) for giving me time to code on Dancer at work. As always, thanks to blob for reviewing my (slightly improving) english :) diff --git a/_posts/2010-09-17-spore.textile b/_posts/2010-09-17-spore.md index af66994..8725c98 100644 --- a/_posts/2010-09-17-spore.textile +++ b/_posts/2010-09-17-spore.md @@ -1,36 +1,36 @@ --- layout: post title: SPORE -category: misc +summary: In which I introduce SPORE. --- -h2. Specification to a POrtable Rest Environment +## Specification to a POrtable Rest Environment -More and more web services offer "ReST API":http://en.wikipedia.org/wiki/Representational_State_Transfer. Every time you want to access one of these services, you need to write a client for this API, or find an existing one. Sometimes, you will find the right library that will do exactly what you need. Sometimes you won't, and you end up writing your own. +More and more web services offer [ReST API](http://en.wikipedia.org/wiki/Representational_State_Transfer). Every time you want to access one of these services, you need to write a client for this API, or find an existing one. Sometimes, you will find the right library that will do exactly what you need. Sometimes you won't, and you end up writing your own. Some parts of an API client are always the same: - * build an uri - * make a request - * handle the response - * ... +* build an uri +* make a request +* handle the response +* ... With SPORE, I propose a better solution for this. SPORE is composed of two parts: - * a specification that describes an API - * a "framework" for clients (think this as "WSGI":http://www.python.org/dev/peps/pep-0333/, "Plack":http://plackperl.org/, "Rack":http://rack.rubyforge.org/, "JSGI":http://jackjs.org/jsgi-spec.html, ...) +* a specification that describes an API +* a "framework" for clients (think this as [WSGI](http://www.python.org/dev/peps/pep-0333/), [Plack](http://plackperl.org/), [Rack](http://rack.rubyforge.org/), [JSGI](http://jackjs.org/jsgi-spec.html), ...) -h3. API Specifications +## API Specifications -I know, at this point, you're thinking "what ? isn't it just what "WSDL":http://en.wikipedia.org/wiki/Web_Services_Description_Language does ?". Well, yes. But it's (in my opinion) simpler to write this than to write a WSDL. And when you say "WSDL" people think ""SOAP":http://en.wikipedia.org/wiki/SOAP_(protocol)", and that's definitly not a good thing. +I know, at this point, you're thinking "what ? isn't it just what [WSDL](http://en.wikipedia.org/wiki/Web_Services_Description_Language) does ?". Well, yes. But it's (in my opinion) simpler to write this than to write a WSDL. And when you say "WSDL" people think "[SOAP](http://en.wikipedia.org/wiki/SOAP_(protocol))", and that's definitly not a good thing. The first part is the specification to API. A ReST request is mainly : - * a *HTTP method* - * a *path* - * some *parameters* +* a **HTTP method** +* a **path** +* some **parameters** -This is *easy to describe*. For this example, I will use the "twitter API":http://dev.twitter.com/doc/get/statuses/public_timeline. So, if i want to describe the user timeline method, we will get something like this: +This is **easy to describe**. For this example, I will use the [twitter API](http://dev.twitter.com/doc/get/statuses/public_timeline). So, if i want to describe the user timeline method, we will get something like this: {% highlight yaml %} public_timeline: @@ -45,31 +45,31 @@ public_timeline: Whatever your language of choice is, you'll always need this informations. The idea with the API description, is that it can be reused by every language. If the API provider publishes this file, everyone can easily use it. It's very similar to a documentation (for the twitter description, all I needed to do was to copy/paste the documentation, it's really that simple) but it can be used by a framework to generate a client. -The specifications should be in JSON (I've written the example in YAML for the sake of readability). The complete description of the specifications are available "here":https://github.com/SPORE/specifications +The specifications should be in JSON (I've written the example in YAML for the sake of readability). The complete description of the specifications are available [here](https://github.com/SPORE/specifications). There is many advantages to do it this way: - * if you have a client in Perl and Javascript, the names of the methods are the same in both langages, and the names of parameters too - * if the API changes some endpoints, you don't have to change your code, you only need to update the description file +* if you have a client in Perl and Javascript, the names of the methods are the same in both langages, and the names of parameters too +* if the API changes some endpoints, you don't have to change your code, you only need to update the description file -I've started to write some specifications for a few services ("twitter":https://github.com/SPORE/api-description/blob/master/services/twitter.json, "github":https://github.com/SPORE/api-description/blob/master/services/github.json, "backtype":https://github.com/franckcuny/spore/blob/master/services/backtype.json, "backtweet":https://github.com/franckcuny/spore/blob/master/services/backtweet.json, ...) and applications ("couchdb":https://github.com/franckcuny/spore/blob/master/apps/couchdb.json, "presque":https://github.com/franckcuny/spore/blob/master/apps/presque.json). They are not complete yet, so you're welcomed to "fork the repository":https://github.com/franckcuny/spore, add missings methods, and add your own specifications! :) +I've started to write some specifications for a few services ([twitter](https://github.com/SPORE/api-description/blob/master/services/twitter.json), [github](https://github.com/SPORE/api-description/blob/master/services/github.json), [backtype](https://github.com/franckcuny/spore/blob/master/services/backtype.json), [backtweet](https://github.com/franckcuny/spore/blob/master/services/backtweet.json), ...) and applications ([couchdb](https://github.com/franckcuny/spore/blob/master/apps/couchdb.json), [presque](https://github.com/franckcuny/spore/blob/master/apps/presque.json)). They are not complete yet, so you're welcomed to [fork the repository](https://github.com/franckcuny/spore), add missings methods, and add your own specifications! :) -h3. Client Specification +## Client Specification -Now that we have a simple description for the API, we want to have "an easy solution to use it":https://github.com/franckcuny/net-http-spore/blob/master/spec/spore_implementation.pod. I will describe "the Perl implementation":https://github.com/franckcuny/net-http-spore, but there is also one for Ruby (will be published soon), and a early version for "Clojure":http://github.com/ngrunwald/clj-spore and "Python":http://github.com/elishowk/pyspore. +Now that we have a simple description for the API, we want to have [an easy solution to use it](https://github.com/franckcuny/net-http-spore/blob/master/spec/spore_implementation.pod). I will describe [the Perl implementation](https://github.com/franckcuny/net-http-spore), but there is also one for Ruby (will be published soon), and a early version for [Clojure](http://github.com/ngrunwald/clj-spore) and [Python](http://github.com/elishowk/pyspore). This kind of thing is really easy to implement in dynamic languages, and still doable in others. The client is composed of two parts: core and middlewares. -The core will create the appropriate functions using the previous description. Thanks to metaprogramming, it's very easy to do it. If we use "Moose":http://search.cpan.org/perldoc?Moose, all I need to do, is to extend the "Moose::Meta::Method":http://search.cpan.org/perldoc?Moose::Meta::Method and add new attributes like: +The core will create the appropriate functions using the previous description. Thanks to metaprogramming, it's very easy to do it. If we use [Moose](http://search.cpan.org/perldoc?Moose), all I need to do, is to extend the [Moose::Meta::Method](http://search.cpan.org/perldoc?Moose::Meta::Method) and add new attributes like: - * path - * method - * params - * authentication - * ... +* path +* method +* params +* authentication +* ... For each method declared in the description, I build a new Moose method I will attach to my class. Basicaly, the code looks like this: @@ -84,20 +84,20 @@ foreach my $method_name ( keys %$methods_spec ) { } {% endhighlight %} -The code of the *user_timelime* method will be generated via the *add_spore_method*. +The code of the `user_timelime` method will be generated via the `add_spore_method`. Middlewares are the nice part of it. By default, the core only creates a request, executes it, and gives you the result. Nothing more. By adding middlewares, you can handle the following stuff: - * headers manipulation - * authentication (basic, OAuth, ) - * (de)serialization (JSON, XML, YAML, CSV, ...) - * caching - * proxying - * ... +* headers manipulation +* authentication (basic, OAuth, ) +* (de)serialization (JSON, XML, YAML, CSV, ...) +* caching +* proxying +* ... <center>!/static/imgs/chart.png(schema)!</center> -The most obvious middleware is the one that handles the format. When you load the middleware Format::JSON, it will set various headers on your request. In case of a GET method, the *Accept* header will be set to *application/json*. For a POST, the *Content-Type* will be also set. Before returning the result to the client, the content will be transformed from JSON to a Perl structure. +The most obvious middleware is the one that handles the format. When you load the middleware Format::JSON, it will set various headers on your request. In case of a GET method, the **Accept** header will be set to **application/json**. For a POST, the **Content-Type** will be also set. Before returning the result to the client, the content will be transformed from JSON to a Perl structure. For twitter, I can have a client with this few lines: @@ -131,9 +131,9 @@ foreach my $tweet (@$tweets) { Middlewares are easy to write. They should implement a *call* method, which receive a request object as argument. The middleware can return: - * nothing: the next middleware will be executed - * a callback: it will be executed when the request is done, and will receive a response object - * a response object: no more middlewares will be executed +* nothing: the next middleware will be executed +* a callback: it will be executed when the request is done, and will receive a response object +* a response object: no more middlewares will be executed A simple middleware that add a runtime header to the response object will look like this: @@ -154,12 +154,12 @@ sub call { } {% endhighlight %} -I've tried to mimic as much as possible Plack's behavior. The result of a request is a Response object, but you can also use it as an arrayref, with the following values [http_code, [http_headers], body]. +I've tried to mimic as much as possible Plack's behavior. The result of a request is a Response object, but you can also use it as an arrayref, with the following values [http\_code, [http\_headers], body]. -h3. Conclusion +## Conclusion -The real target for this are not API developers (even if it's useful to have this when you write your own API, I will show some examples soon), neither client developers (even it's really easier to do with this), but people who want to play immediatly with an API to fetch data, without the coding skill or knowledge of what an HTTP request is, how to define headers, what is OAuth, ... As it was suggested to me, an implementation for "R":http://en.wikipedia.org/wiki/R_(programming_language) would be really usefull to a lot of people. +The real target for this are not API developers (even if it's useful to have this when you write your own API, I will show some examples soon), neither client developers (even it's really easier to do with this), but people who want to play immediatly with an API to fetch data, without the coding skill or knowledge of what an HTTP request is, how to define headers, what is OAuth, ... As it was suggested to me, an implementation for [R](http://en.wikipedia.org/wiki/R_(programming_language)) would be really usefull to a lot of people. Right now, I'm looking for people interested by this idea/project, and to work on completing the specification. I'm pretty happy with the current status, as it works with most API I've encountered. -I will present SPORE and its implementations "during OSDC.fr":http://act.osdc.fr/osdc2010fr/ next month. +I will present SPORE and its implementations [during OSDC.fr](http://act.osdc.fr/osdc2010fr/) next month. diff --git a/_posts/2010-09-27-jitterbug.md b/_posts/2010-09-27-jitterbug.md new file mode 100644 index 0000000..533286f --- /dev/null +++ b/_posts/2010-09-27-jitterbug.md @@ -0,0 +1,49 @@ +--- +layout: post +summary: In which I introduce Jitterbug. +title: jitterbug - a first step to continuous integration for Perl modules on GitHub +--- + +> If you'd like to be a jitter bug, +> First thing you must do is get a jug, +> Put whiskey, wine and gin within, +> And shake it all up and then begin. + +Earlier this month sukria set up a [shell script](http://github.com/sukria/capsule) to run the Dancer's tests suit. The script is simple, but does what we want: + +* every 15 minutes, it pulls from the github repository +* for every perl installed via perlbrew, it executes the tests suite +* it stores the results in text file +* finally, it creates a code coverage report for the master branch + +The only problem I had with this is that it wasn't really easy to find the appropriate test report you might want to check. + +That's when I decided to write an interface to this: [jitterbug](https://github.com/franckcuny/jitterbug). + +## Demo + +[You can check Dancer's version](http://jitterbug.perldancer.org/). The interface is really simple: there's a list of repositories, the status of the last build, and a link to a list of all the project's builds. + +<center>!/static/imgs/jitterbug.png(jitterbug)!</center> + +## How it works + +For each project you want to use with jitterbug, you set the url of the HTTP hook in the administration page of your project. Each time a push is detected, GH will send you a notification (you can see details of the content). If the project doesn't already exist in your setup, it will be created, so you don't need to maintain or update a configuration file each time you create a new repository. The notification creates a task in a queue, and a script pulls the task and executes it. + +> If you don't want anyone to use your setup for building their modules, you can protect the **/hook/** path with a .htaccess, and use the following kind of url : http://user:pass@.../ + +The script (`builder.pl`, provided with JB) will clone the repository, switch to the given commit and run the tests. It works both with classic distribution having a Makefile.PL and Dist::Zilla setups. The output is stored on the disk and some information about the project will be updated. [You can see the result for Dancer](http://jitterbug.perldancer.org/project/Dancer). + +This script relies on two important things: [perlbrew](http://github.com/gugod/App-perlbrew) and [cpanminus](http://github.com/miyagawa/cpanminus). So your tests will be executed against every Perl version you have installed and thanks to cpanminus, all the deps will be installed too. + +If the test fails, a mail is sent to the author. + +## What's next + +A list of some features I want to have: + +* IRC notifications (something like: [author's name]: link to the commit - commit message - link to the build) +* customizable builder script (so you can add your own hook to building a package, having private notifications, etc.) +* simple administration (restart a build, etc.) + +(thanks to [sawyer](http://github.com/xsawyerx) and [sukria](http://github.com/sukria/)). diff --git a/_posts/2010-09-27-jitterbug.textile b/_posts/2010-09-27-jitterbug.textile deleted file mode 100644 index f8d315b..0000000 --- a/_posts/2010-09-27-jitterbug.textile +++ /dev/null @@ -1,50 +0,0 @@ ---- -layout: post -category: perl -title: jitterbug - a first step to continuous integration for Perl modules on GitHub ---- - -bq. If you'd like to be a jitter bug, -First thing you must do is get a jug, -Put whiskey, wine and gin within, -And shake it all up and then begin. - - -Earlier this month sukria set up a "shell script":http://github.com/sukria/capsule to run the Dancer's tests suit. The script is simple, but does what we want: - - * every 15 minutes, it pulls from the github repository - * for every perl installed via perlbrew, it executes the tests suite - * it stores the results in text file - * finally, it creates a code coverage report for the master branch - -The only problem I had with this is that it wasn't really easy to find the appropriate test report you might want to check. - -That's when I decided to write an interface to this: "jitterbug":https://github.com/franckcuny/jitterbug. - -h3. demo - -"You can check Dancer's version":http://jitterbug.perldancer.org/. The interface is really simple: there's a list of repositories, the status of the last build, and a link to a list of all the project's builds. - -<center>!/static/imgs/jitterbug.png(jitterbug)!</center> - -h3. how it works - -For each project you want to use with jitterbug, you set the url of the HTTP hook in the administration page of your project. Each time a push is detected, GH will send you a notification (you can see details of the content). If the project doesn't already exist in your setup, it will be created, so you don't need to maintain or update a configuration file each time you create a new repository. The notification creates a task in a queue, and a script pulls the task and executes it. - -bq. If you don't want anyone to use your setup for building their modules, you can protect the */hook/* path with a .htaccess, and use the following kind of url : http://user:pass@.../ - -The script (*builder.pl*, provided with JB) will clone the repository, switch to the given commit and run the tests. It works both with classic distribution having a Makefile.PL and Dist::Zilla setups. The output is stored on the disk and some information about the project will be updated. "You can see the result for Dancer":http://jitterbug.perldancer.org/project/Dancer. - -This script relies on two important things: "perlbrew":http://github.com/gugod/App-perlbrew and "cpanminus":http://github.com/miyagawa/cpanminus. So your tests will be executed against every Perl version you have installed and thanks to cpanminus, all the deps will be installed too. - -If the test fails, a mail is sent to the author. - -h3. what's next - -A list of some features I want to have: - - * IRC notifications (something like: [author's name]: link to the commit - commit message - link to the build) - * customizable builder script (so you can add your own hook to building a package, having private notifications, etc.) - * simple administration (restart a build, etc.) - -(thanks to "sawyer":http://github.com/xsawyerx and "sukria":http://github.com/sukria/). diff --git a/_posts/2010-10-04-how-to-contribute-to-dancer.textile b/_posts/2010-10-04-how-to-contribute-to-dancer.md index 6ddf836..b35ee30 100644 --- a/_posts/2010-10-04-how-to-contribute-to-dancer.textile +++ b/_posts/2010-10-04-how-to-contribute-to-dancer.md @@ -1,39 +1,37 @@ --- layout: post -category: perl +summary: In which I explain how to contribute to Dancer. title: How to contribute to Dancer --- For our development projects, we rely a lot on Github. Lately, more and more people started contributing to Dancer, but not all of them are familiar with Github or git. Here is a little step-by-step guide on how to contribute. You don't need to be a Perl expert to contribute, you can provide help by correcting documentation error, or adding a new recipe to our cookbook. -h3. the code +## the code -The main repository is hosted "here":http://github.com/sukria/dancer. There are two main branches: +The main repository is hosted [here](http://github.com/sukria/dancer). There are two main branches: - * master - * devel +* master +* devel In the master branch we accept only bug fixes and doc fixes/updates. The current master branch will be the future 1.2 version. The devel branch is where we add new features, or improve existing features. -h3. contributing +## contributing -First, go to "github.com/sukria/dancer":http://github.com/sukria/dancer and click on the "fork" button. Now, here is a little tutorial on how to fetch the repository, list the local and remote branches, and track the remote devel branch. +First, go to [github.com/sukria/dancer](http://github.com/sukria/dancer) and click on the "fork" button. Now, here is a little tutorial on how to fetch the repository, list the local and remote branches, and track the remote devel branch. -<script src="http://gist.github.com/609443.js?file=forking%20dancer"></script> +Now that you know what the purpose of each branch is, you can decide to work on master or devel (`git checkout devel` to switch branch). -Now that you know what the purpose of each branch is, you can decide to work on master or devel (*git checkout devel* to switch branch). +## sending your patch -h3. sending your patch +As I've previously stated, we rely a lot on the github features and interface. So now you've written your patch. First, be sure to provide one or more tests, and to run the test suite (with `make test` or `prove -r t/`). If all the tests pass, you can send a pull request. For this, you go on your own fork on github (http://github.com/$user/dancer), and you click on the "Pull Request" button. -As I've previously stated, we rely a lot on the github features and interface. So now you've written your patch. First, be sure to provide one or more tests, and to run the test suite (with *make test* or *prove -r t/*). If all the tests pass, you can send a pull request. For this, you go on your own fork on github (http://github.com/$user/dancer), and you click on the "Pull Request" button. +You can at any time see all the commits done by others that have not yet been merged into one of our branches at [this url](http://github.com/sukria/Dancer/forkqueue). -You can at any time see all the commits done by others that have not yet been merged into one of our branches at "this url":http://github.com/sukria/Dancer/forkqueue. +## reporting and/or fixing bugs -h3. reporting and/or fixing bugs - -We prefer to use the github issue tracker instead of RT. So if you want to report a bug, go "there":http://github.com/sukria/dancer/issues. +We prefer to use the github issue tracker instead of RT. So if you want to report a bug, go [there](http://github.com/sukria/dancer/issues). If your commit fixes a bug reported there, please add in your commit message something like 'fixing GH #xxx" where xxx is the bug id. diff --git a/_posts/2010-10-12-osdcfr.md b/_posts/2010-10-12-osdcfr.md new file mode 100644 index 0000000..f91336e --- /dev/null +++ b/_posts/2010-10-12-osdcfr.md @@ -0,0 +1,107 @@ +--- +layout: post +summary: In which I summarize my OSDC.fr experience +title: OSDC.fr report +--- + +This weekend I went to the second edition of the OSDC.fr conference. This conference is organized by the Python, Ruby, PHP and Perl french organizations. This edition was really really good, and well organized (kudo to the whole team!). + +The first day of the conference, we had two excellents talk about Git. The first one by [mojombo](http://github.com/mojombo), about [advanced git usages](http://git-tips.heroku.com/#1). I've managed to get him to sign my copy of the [GitHub poster](http://lumberjaph.net/graph/2010/03/25/github-explorer.html). The second one by BooK was about his module [Git::Repository](http://search.cpan.org/perldoc?Git::Repository) (which I use for [jitterbug](http://github.com/franckcuny/jitterbug)). He show us how he used git to graph his familly tree. + +<center><img width="600" height="500" class="img_center" src="http://farm5.static.flickr.com/4070/5074738888_a6c2481b03_b.jpg" alt="github poster" /></center> + +Germain did an [introduction to Riak](http://www.slideshare.net/franckcuny/riak-a-file-system-for-internet), and [Julian](http://twitter.com/flngr) did a talk about [Gephi](http://gephi.org/), about how it relies on the netbeans platform, and why a tool to visualize graphs is useful. + +I've talked about Plack in the afternoon, and [sukria](http://sukria.net) talked about Dancer right after me. I think both our talks went well, and his one was a nice demonstration of Dancer, since he used [Broadway](http://github.com/sukria/broadway) to write his slides. I planned to do some demos during my talk, but a problem with my laptop prevented me to do this. Anyway, if you attended my talk and want to try them, here they are : + +{% highlight perl %} +use strict; +use warnings; +use Plack::Builder; + +my $app = sub { + return [ + 200, + [ 'Content-Type' => 'text/html' ], + [ '<html><body>Hello World</body></html>' ] + ]; +}; + +builder { + enable 'Debug'; + $app; +}; +{% endhighlight %} + +{% highlight perl %} +use strict; +use warnings; +use Plack::Builder; + +my $app1 = sub {[200, ['Content-Type' => 'text/html'], ['hello from app1']]}; +my $app2 = sub {[200, ['Content-Type' => 'text/html'], ['hello from app2']]}; + +builder { + mount "/app1" => $app1; + mount "/app2" => $app2; +}; +{% endhighlight %} + +{% highlight perl %} +use strict; +use warnings; +use Plack::Builder; + +my $app = sub { [ 200, [ 'Content-Type' => 'text/html' ], ['hello world'] ] }; + +my $middleware = sub { + my $env = shift; + my $res = $app->($env); + $res->[2]->[0] =~ s/world/OSDC.fr/; + return $res; +}; +{% endhighlight %} + +{% highlight perl %} +use strict; +use warnings; +use Plack::Builder; + +my $app = sub { + die "die"; + [ 200, [ 'Content-Type' => 'text/html' ], ['hello world'], ]; +}; + +builder { + enable "StackTrace"; + $app; +}; +{% endhighlight %} + +{% highlight perl %} +use strict; +use warnings; +use Plack::Builder; + +my $app = sub { return [ '200', [ 'Content-Type' => 'text/html' ], ['hello world'] ] }; + +builder { + enable "Throttle::Hourly", max => 1; + $app; +}; +{% endhighlight %} + +The evening GitHub offered the beers (yeah!) in a pub, and I had an intersting talk with [fperrad](http://github.com/fperrad) about SPORE. Francois surprised me earlier last week when he contacted me to announce me he add write a Lua version of SPORE. He had some questions and suggestions for the current spec and API, and we managed to handle all the issues. + +The sunday I talked about [SPORE](http://github.com/franckcuny/spore). Even if the talk went not so well, I managed to interest some people, since I had some questions and positive feedback. I've not seen much talk the sunday, as I've spent some time with sukria and others to discuss about code. But I managed to see the talk about Redis which was good, and gave me some ideas for [presque](http://github.com/franckcuny/presque). + +The ligthning talks were also interesting. [Bruno Michel](http://twitter.com/brmichel) talked about [EventMachine](http://rubyeventmachine.com/), [htty](http://github.com/htty) and [mongrel2](http://mongrel2.org/home), [dolmen](http://search.cpan.org/~dolmen/) showed us a small application to graph a svn repository, and [BooK](http://search.cpan.org/~book/) told us about his work on [Devel::TraceUse](http://search.cpan.org/perldoc?Devel::TraceUse). + +I wish I would have been able to attend [oz](http://twitter.com/ephoz) presentation of [nodejs](http://cyprio.net/nodejs_osdc.pdf), and I've also missed the talks given by carl masak, since I was working with sukria on Dancer at that time. + +All the slides for my talks are available on slideshares: + + * [Plack](http://www.slideshare.net/franckcuny/plack-et-les-middleware) + * [SPORE](http://www.slideshare.net/franckcuny/spore) + * [jitterbug](http://www.slideshare.net/franckcuny/jitterbug) + * [presque](http://www.slideshare.net/franckcuny/presque) diff --git a/_posts/2010-10-12-osdcfr.textile b/_posts/2010-10-12-osdcfr.textile deleted file mode 100644 index e539a10..0000000 --- a/_posts/2010-10-12-osdcfr.textile +++ /dev/null @@ -1,32 +0,0 @@ ---- -layout: post -category: conference -title: OSDC.fr report ---- - -This weekend I went to the second edition of the OSDC.fr conference. This conference is organized by the Python, Ruby, PHP and Perl french organizations. This edition was really really good, and well organized (kudo to the whole team!). - -The first day of the conference, we had two excellents talk about Git. The first one by "mojombo":http://github.com/mojombo, about "advanced git usages":http://git-tips.heroku.com/#1. I've managed to get him to sign my copy of the "github poster":http://lumberjaph.net/graph/2010/03/25/github-explorer.html. The second one by BooK was about his module "Git::Repository":http://search.cpan.org/perldoc?Git::Repository (which I use for "jitterbug":http://github.com/franckcuny/jitterbug). He show us how he used git to graph his familly tree. - -<center><img width="600" height="500" class="img_center" src="http://farm5.static.flickr.com/4070/5074738888_a6c2481b03_b.jpg" alt="github poster" /></center> - -Germain did an "introduction to Riak":http://www.slideshare.net/franckcuny/riak-a-file-system-for-internet, and "Julian":http://twitter.com/flngr did a talk about "Gephi":http://gephi.org/, about how it relies on the netbeans platform, and why a tool to visualize graphs is useful. - -I've talked about Plack in the afternoon, and "sukria":http://sukria.net talked about Dancer right after me. I think both our talks went well, and his one was a nice demonstration of Dancer, since he used "Broadway":http://github.com/sukria/broadway to write his slides. I planned to do some demos during my talk, but a problem with my laptop prevented me to do this. Anyway, if you attended my talk and want to try them, here they are : - -<script src="http://gist.github.com/609646.js"> </script> - -The evening github offered the beers (yeah!) in a pub, and I had an intersting talk with "fperrad":http://github.com/fperrad about SPORE. Francois surprised me earlier last week when he contacted me to announce me he add write a Lua version of SPORE. He had some questions and suggestions for the current spec and API, and we managed to handle all the issues. - -The sunday I talked about "SPORE":http://github.com/franckcuny/spore. Even if the talk went not so well, I managed to interest some people, since I had some questions and positive feedback. I've not seen much talk the sunday, as I've spent some time with sukria and others to discuss about code. But I managed to see the talk about Redis which was good, and gave me some ideas for "presque":http://github.com/franckcuny/presque. - -The ligthning talks were also interesting. "Bruno Michel":http://twitter.com/brmichel talked about "EventMachine":http://rubyeventmachine.com/, "htty":http://github.com/htty and "mongrel2":http://mongrel2.org/home, "dolmen":http://search.cpan.org/~dolmen/ show us a small application to graph a svn repository, and "BooK":http://search.cpan.org/~book/ told us about his work on "Devel::TraceUse":http://search.cpan.org/perldoc?Devel::TraceUse. - -I wish I would have been able to attend "oz":http://twitter.com/ephoz presentation of "nodejs":http://cyprio.net/nodejs_osdc.pdf, and I've also missed the talks given by carl masak, since I was working with sukria on Dancer at that time. - -All the slides for my talks are available on slideshares: - - * "Plack":http://www.slideshare.net/franckcuny/plack-et-les-middleware - * "SPORE":http://www.slideshare.net/franckcuny/spore - * "jitterbug":http://www.slideshare.net/franckcuny/jitterbug - * "presque":http://www.slideshare.net/franckcuny/presque diff --git a/_posts/2010-10-20-spore-update.md b/_posts/2010-10-20-spore-update.md new file mode 100644 index 0000000..14c3005 --- /dev/null +++ b/_posts/2010-10-20-spore-update.md @@ -0,0 +1,329 @@ +--- +title: SPORE update +layout: post +summary: In which I share some update on SPORE. +--- + +As I've said [in my OSDC report](http://lumberjaph.net/conference/2010/10/12/osdcfr.html), after I [presented SPORE](http://www.slideshare.net/franckcuny/spore) I've had some positive feedback. In the last ten days, I've created a [google group](http://groups.google.com/group/spore-rest) to discuss the current specification and implementations, a [SPORE account on github](http://github.com/SPORE) to hold the implementation specifications and the API descriptions files, and more importantly, we have some new implementations: + +* [Ruby](http://github.com/sukria/Ruby-Spore) +* [node.js](http://github.com/francois2metz/node-spore) +* [Javascript](http://github.com/nikopol/jquery-spore) (client side) +* PHP (not published yet) + +in addition to the already existing implementations: + +* [Perl](http://github.com/franckcuny/net-http-spore) +* [Lua](http://github.com/fperrad/lua-Spore) +* [Clojure](http://github.com/ngrunwald/clj-spore) +* [Python](http://github.com/elishowk/pyspore) + +In this post, I'll try to show some common usages for SPORE, in order to give a better explanation of why I think it's needed. + +## Consistency + +{% highlight lua %} +require 'Spore' + +local github = Spore.new_from_spec 'github.json' + +github:enable 'Format.JSON' +github:enable 'Runtime' + +local r = github:user_information{format = 'json', username = 'schacon'} + +print(r.status) --> 200 +print(r.headers['x-runtime']) --> 126ms +print(r.body.user.name) --> Scott Chacon +{% endhighlight %} + +{% highlight perl %} +use Net::HTTP::Spore; + +my $gh = Net::HTTP::Spore->new_from_spec('github.json'); + +$gh->enable('Format::JSON'); +$gh->enable('Runtime'); + +my $r= $gh->user_information( format => 'json', username => 'schacon' ); + +say "HTTP status => ".$r->status; # 200 +say "Runtime => ".$r->header('X-Spore-Runtime'); # 128ms +say "username => ".$r->body->{user}->{name}; # Scott Chacon +{% endhighlight %} + +{% highlight ruby %} + +require 'spore' + +gh = Spore.new(File.join(File.dirname(__FILE__), 'github.json')) + +gh.enable(Spore::Middleware::Runtime) # will add a header with runtime +gh.enable(Spore::Middleware::Format::JSON) # will deserialize JSON responses + +# API call +r = gh.user_information( :format => 'json', :username => 'schacon' ) + +puts "HTTP status => ".r.code # 200 +puts "Runtime => ".r.header('X-Spore-Runtime') # 128ms +puts "username => ".r.body['user']['name'] # Scott Chacon +{% endhighlight %} + +As you can see in the previous example, I do the same request on the GitHub API: fetch informations from the user "mojombo". In the three languages, the API for the client is the same: + +* you create a client using the github.json API description +* you enable some middlewares +* you execute your request: the method name is the same, the argument names are the same! +* you manipulate the result the same way + +## Easy to switch from a language to another + +You can switch from a language to another without any surprises. If you must provide an API client to a third-party, you don't have to care about what languages they use, you only need to provide a description. Your methods call will be the same between all the languages, so it's easy to switch between languages, without the need to chose an appropriate client for your API (if one exists), to read the documentation (when there is one), and having the client implementation going in your way. + +## Better maintanability + +What annoys me the most when I want to use an API, is that I have to choose between two, three, or more clients that will communicate with this API. I need to read the documentations, the code, and test thoses implementations to decide which one will best fit my needs, and won't go in my way. And what if I need to do caching before the content is deserialized ? And what if the remote API changes it's authentication method (like twitter, from basic auth to OAuth) and the maintainer of the client doesn't update the code ? + +With SPORE, you don't have to maintain a client, only a description file. Your API changes, all you have to do is to update your description, and all the clients, using any language, will be able to use your new API, without the need to release a new client specific for this API in javascript, Perl, Ruby, ... + +## Easy to use with APIs that are compatible + +If you want to use the Twitter public timeline: + +{% highlight perl %} +use Net::HTTP::Spore; + +my $client = Net::HTTP::Spore->new_from_spec('twitter.json'); + +$client->enable('Format::JSON'); + +my $r = $client->public_timeline( format => 'json' ); + +foreach my $t ( @{ $r->body } ) { + my $username = Encode::encode_utf8( $t->{user}->{name} ); + my $text = Encode::encode_utf8( $t->{text} ); + say $username . " says " . $text; +} +{% endhighlight %} + +And now on statusnet: + +{% highlight perl %} +use Net::HTTP::Spore; + +my $client = Net::HTTP::Spore->new_from_spec( + 'twitter.json', + base_url => 'http://identi.ca/api' +); + +$client->enable('Format::JSON'); + +my $r = $client->public_timeline( format => 'json' ); + +foreach my $t ( @{ $r->body } ) { + my $username = Encode::encode_utf8( $t->{user}->{name} ); + my $text = Encode::encode_utf8( $t->{text} ); + say $username . " says " . $text; +} +{% endhighlight %} + +easy, right ? As both APIs are compatible, the only thing you need to do is change the argument **base_url** when you create your new client. + +## It's easy to write a description + +It's really easy to write a description for your API. Let's take a look at the +one for github: + +{% highlight json %} +{ + "base_url" : "http://github.com/api/v2/", + "version" : "0.2", + "methods" : { + "follow" : { + "required_params" : [ + "user", + "format" + ], + "path" : "/:format/user/follow/:user", + "method" : "POST", + "authentication" : true + } + }, + "name" : "GitHub", + "authority" : "GITHUB:franckcuny", + "meta" : { + "documentation" : "http://develop.github.com/" + } +} +{% endhighlight %} + +The important parts are the basic API description (with a name, a base url for the API) and the list of available methods (here I've only put the 'follow' method). + +More descriptions are available on [GitHub](http://github.com/SPORE/api-description), as well as and the [full specification](http://github.com/SPORE/specifications/blob/master/spore_description.pod). + +We also have [a schema](http://github.com/SPORE/specifications/blob/master/spore_validation.rx) to validate your descriptions. + +## Middlewares + +By default, your SPORE client will only do a request and return a result. But it's easy to alter the default behavior with various middlewares. The most obvious one is the deserialization for a response, like the previous example with github and the middleware Format::JSON. + +### Control your workflow + +The use of middlewares allow you to control your workflow as with Plack/Rack/WSGI. You can easily imagine doing this: + +* check if the request has already been made and cached +* return the response if the cache is still valid +* perform the request +* send the content to a remote storer in raw format +* cache the raw data locally +* deserialize to json +* remove some data from the response +* give the response to the client + +Or to interrogate a site as an API: + +* send a request on a web page +* pass the response to a scraper, and put the data in JSON +* return the JSON with scraped data to the client + +### Creating a repository on GitHub + +In this example, we use a middleware to authenticate on the GitHub API: + +{% highlight perl %} +use Config::GitLike; +use Net::HTTP::Spore; + +my $c = Config::GitLike::Git->new(); $c->load; + +my $login = $c->get(key => 'github.user'); +my $token = $c->get(key => 'github.token'); + +my $github = Net::HTTP::Spore->new_from_spec('github.json'); + +$github->enable('Format::JSON'); +$github->enable( + 'Auth::Basic', + username => $login . '/token', + password => $token, +); + +my $res = $github->create_repo( + format => 'json', + payload => {name => $name, description => $desc} +); +{% endhighlight %} + +The middleware Auth::Basic will add the **authorization** header to the request, using the given tokens. + +### SPORE + MooseX::Role::Parameterized + +I really like [MooseX::Role::Parameterized](http://search.cpan.org/perldoc?MooseX::Role::Parameterized). This module allows you to build dynamically a Role to apply to your class/object: + +{% highlight perl %} +package My::App::Role::SPORE; + +use MooseX::Role::Parameterized; +use Net::HTTP::Spore; + +parameter name => ( isa => 'Str', required => 1, ); + +role { + my $p = shift; + my $name = $p->name; + + has $name => ( + is => 'rw', + isa => 'Object', + lazy => 1, + default => sub { + my $self = shift; + my $client_config = $self->context->{spore}->{$name}; + my $client = Net::HTTP::Spore->new_from_spec( + $client_config->{spec}, + %{ $client_config->{options} }, + ); + foreach my $mw ( @{ $client_config->{middlewares} } ) { + $client->enable($mw); + } + }, + ); +}; + +1; + +# in your app + +package My::App; + +use Moose; + +with 'My::App::Role::SPORE' => { name => 'couchdb' }, + 'My::App::Role::SPORE' => { name => 'url_solver' }; + +1; +{% endhighlight %} + +This Role will add two new attributes to my class: **couchdb** and **url_solver**, reading from a config file a list of middlewares to apply and the options (like base_uri). + +## Testing my application that uses CouchDB + +This is a common case. In your application you use CouchDB to store some information. When you run the tests for this application, you don't know if there will be a couchdb running on the host, if it will be on the default port, on what database should you do your tests, ... + +The Perl implementation of SPORE comes with a Mock middleware: + +{% highlight perl %} +package myapp; + +use Moose; +has couchdb_client => (is => 'rw', isa => 'Object'); + +use Test::More; +use JSON; + +use myapp; + +use Net::HTTP::Spore; + +my $content = { title => "blog post", website => "http://lumberjaph.net" }; + +my $mock_server = { + '/test_database/1234' => sub { + my $req = shift; + $req->new_response( + 200, + [ 'Content-Type' => 'application/json' ], + JSON::encode_json($content) + ); + }, +}; + +ok my $client = + Net::HTTP::Spore->new_from_spec( + '/home/franck/code/projects/spore/specifications/apps/couchdb.json', + base_url => 'http://localhost:5984' ); + +$client->enable('Format::JSON'); +$client->enable( 'Mock', tests => $mock_server ); + +my $app = myapp->new(couchdb_client => $client); + +my $res = + $app->client->get_document( database => 'test_database', doc_id => '1234' ); +is $res->[0], 200; +is_deeply $res->[2], $content; +is $res->header('Content-Type'), 'application/json'; + +done_testing; + +{% endhighlight %} + +The middleware catches the request, checks if it matches something defined by the user and returns a response. + +## So ... + +I really see SPORE as something Perlish: a glue. The various implementations are a nice addition, and I'm happy to see some suggestions and discussions about the specifications. + +I'm pretty confident that the current specification for the API description is stable at this point. We still need to write more middlewares to see if we can cover most of the usages easily, so we can decide if the specification for the implementation is valid. + +(as always, thanks to bl0b!). diff --git a/_posts/2010-10-20-spore-update.textile b/_posts/2010-10-20-spore-update.textile deleted file mode 100644 index 52badb2..0000000 --- a/_posts/2010-10-20-spore-update.textile +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: SPORE update -layout: post -category: misc ---- - -As I've said "in my OSDC report":http://lumberjaph.net/conference/2010/10/12/osdcfr.html, after I "presented SPORE":http://www.slideshare.net/franckcuny/spore I've had some positive feedback. In the last ten days, I've created a "google group":http://groups.google.com/group/spore-rest to discuss the current specification and implementations, a "SPORE account on github":http://github.com/SPORE to hold the implementation specifications and the API descriptions files, and more importantly, we have some new implementations: - - * "Ruby":http://github.com/sukria/Ruby-Spore - * "node.js":http://github.com/francois2metz/node-spore - * "Javascript":http://github.com/nikopol/jquery-spore (client side) - * PHP (not published yet) - -in addition to the already existing implementations: - - * "Perl":http://github.com/franckcuny/net-http-spore - * "Lua":http://github.com/fperrad/lua-Spore - * "Clojure":http://github.com/ngrunwald/clj-spore - * "Python":http://github.com/elishowk/pyspore - -In this post, I'll try to show some common usages for SPORE, in order to give a better explanation of why I think it's needed. - -h2. Consistency - -<script src="http://gist.github.com/636063.js"> </script> - -As you can see in the previous example, I do the same request on the GitHub API: fetch informations from the user "mojombo". In the three languages, the API for the client is the same: - - * you create a client using the github.json API description - * you enable some middlewares - * you execute your request: the method name is the same, the argument names are the same! - * you manipulate the result the same way - -h2. Easy to switch from a language to another - -You can switch from a language to another without any surprises. If you must provide an API client to a third-party, you don't have to care about what languages they use, you only need to provide a description. Your methods call will be the same between all the languages, so it's easy to switch between languages, without the need to chose an appropriate client for your API (if one exists), to read the documentation (when there is one), and having the client implementation going in your way. - -h2. Better maintanability - -What annoys me the most when I want to use an API, is that I have to choose between two, three, or more clients that will communicate with this API. I need to read the documentations, the code, and test thoses implementations to decide which one will best fit my needs, and won't go in my way. And what if I need to do caching before the content is deserialized ? And what if the remote API changes it's authentication method (like twitter, from basic auth to OAuth) and the maintainer of the client doesn't update the code ? - -With SPORE, you don't have to maintain a client, only a description file. Your API changes, all you have to do is to update your description, and all the clients, using any language, will be able to use your new API, without the need to release a new client specific for this API in javascript, Perl, Ruby, ... - -h2. Easy to use with APIs that are compatible - -If you want to use the Twitter public timeline: - -<script src="http://gist.github.com/636137.js?file=gistfile1.pl"></script> - -And now on statusnet: - -<script src="http://gist.github.com/636137.js?file=status%20net%20SPORE.pl"></script> - -easy, right ? As both APIs are compatible, the only thing you need to do is change the argument *base_url* when you create your new client. - -h2. It's easy to write a description - -It's really easy to write a description for your API. Let's take a look at the -one for github: - -<script src="http://gist.github.com/637152.js?file=GitHub%20API%20description"></script> - -The important parts are the basic API description (with a name, a base url for the API) and the list of available methods (here I've only put the 'follow' method). - -More descriptions are available on "github":http://github.com/SPORE/api-description, as well as and the "full specification":http://github.com/SPORE/specifications/blob/master/spore_description.pod. - -We also have "a schema":http://github.com/SPORE/specifications/blob/master/spore_validation.rx to validate your descriptions. - -h2. Middlewares - -By default, your SPORE client will only do a request and return a result. But it's easy to alter the default behavior with various middlewares. The most obvious one is the deserialization for a response, like the previous example with github and the middleware Format::JSON. - -h3. Control your workflow - -The use of middlewares allow you to control your workflow as with Plack/Rack/WSGI. You can easily imagine doing this: - - * check if the request has already been made and cached - * return the response if the cache is still valid - * perform the request - * send the content to a remote storer in raw format - * cache the raw data locally - * deserialize to json - * remove some data from the response - * give the response to the client - -Or to interrogate a site as an API: - - * send a request on a web page - * pass the response to a scraper, and put the data in JSON - * return the JSON with scraped data to the client - -h3. Creating a repository on Github - -In this example, we use a middleware to authenticate on the GitHub API: - -<script src="http://gist.github.com/636261.js?file=gistfile1.pl"></script> - -The middleware Auth::Basic will add the *authorization* header to the request, using the given tokens. - -h3. SPORE + MooseX::Role::Parameterized - -I really like "MooseX::Role::Parameterized":http://search.cpan.org/perldoc?MooseX::Role::Parameterized. This module allows you to build dynamically a Role to apply to your class/object: - -<script src="http://gist.github.com/623956.js"> </script> - -This Role will add two new attributes to my class: *couchdb* and *url_solver*, reading from a config file a list of middlewares to apply and the options (like base_uri). - -h3. Testing my application that uses CouchDB - -This is a common case. In your application you use CouchDB to store some information. When you run the tests for this application, you don't know if there will be a couchdb running on the host, if it will be on the default port, on what database should you do your tests, ... - -The Perl implementation of SPORE comes with a Mock middleware: - -<script src="http://gist.github.com/636316.js"> </script> - -The middleware catches the request, checks if it matches something defined by the user and returns a response. - -h2. So ... - -I really see SPORE as something Perlish: a glue. The various implementations are a nice addition, and I'm happy to see some suggestions and discussions about the specifications. - -I'm pretty confident that the current specification for the API description is stable at this point. We still need to write more middlewares to see if we can cover most of the usages easily, so we can decide if the specification for the implementation is valid. - -(as always, thanks to bl0b!). diff --git a/_posts/2010-10-25-perl-moderne.textile b/_posts/2010-10-25-perl-moderne.md index b7c70e9..a9f5ef1 100644 --- a/_posts/2010-10-25-perl-moderne.textile +++ b/_posts/2010-10-25-perl-moderne.md @@ -1,13 +1,13 @@ --- layout: post title: Perl Moderne -category: misc +summary: In which I do a review of the book "Moderne Perl" --- Exercice différent aujourd’hui, puisqu’il s’agit d’une critique d’un livre. -Il y a quelque jours, j’ai reçu une copie du livre "Perl moderne":http://www.pearson.fr/livre/?GCOI=27440100979970, à paraitre le 29 octobre aux éditions Pearson. Il est intéressant à plus d’un titre : c’est un livre original, pas une traduction d’un énième livre sur Perl ; il se concentre sur le Perl dit “Moderne”, c’est-à-dire les outils comme "Moose":http://search.cpan.org/perldoc?Moose, "DBIx::Class":http://search.cpan.org/perldoc?DBIx::Class, etc ; il est écrit par des personnes impliquées dans la communauté Perl (ce sont des auteurs de modules CPAN, qui organisent des conférences Perl). +Il y a quelque jours, j’ai reçu une copie du livre [Perl moderne](http://www.pearson.fr/livre/?GCOI=27440100979970), à paraitre le 29 octobre aux éditions Pearson. Il est intéressant à plus d’un titre : c’est un livre original, pas une traduction d’un énième livre sur Perl ; il se concentre sur le Perl dit “Moderne”, c’est-à-dire les outils comme [Moose](http://search.cpan.org/perldoc?Moose), [DBIx::Class](http://search.cpan.org/perldoc?DBIx::Class), etc ; il est écrit par des personnes impliquées dans la communauté Perl (ce sont des auteurs de modules CPAN, qui organisent des conférences Perl). -J’en profite pour saluer au passages les auteurs de l’ouvrage: "Maddingue":http://github.com/maddingue, "BooK":http://github.com/book, "jq":http://github.com/jquelin et "dams":http://github.com/dams. +J’en profite pour saluer au passages les auteurs de l’ouvrage: [Maddingue](http://github.com/maddingue), [BooK](http://github.com/book), [jq](http://github.com/jquelin) et [dams](http://github.com/dams). La première bonne surprise est la taille du livre : au format poche. Je trouve ça pratique pour le transporter dans son sac (usager du métro, bonjour) et pour le laisser traîner sur le bureau sans qu’il prenne de la place. La seconde bonne surprise est le nombre de sujets abordés : une introduction solide aux base de Perl ; la programmation objet; les expressions régulières; les bases de données ; les manipulations de fichier XML ; et les outils pour travail sur le web. @@ -19,8 +19,8 @@ Enfin, la dernière partie est celle consacrée au web, avec la présentation de Par ailleurs, si après la lecture de cet ouvrage il vous vient à l’idée de vouloir récupérer du contenu sur des pages web à l’aide d’expression régulières, il est fort probable que vous ayez lu ce livre à l’envers, ou c’est par pur goût de la provocation envers les auteurs. -A, et un point négatif, me diriez vous ? Bien sûr que j’en ai un. A mon grand regret, nulle part dans l’ouvrage il n’est fait référence à "Dancer":http://github.com/sukria/dancer. Tant pis ! +A, et un point négatif, me diriez vous ? Bien sûr que j’en ai un. A mon grand regret, nulle part dans l’ouvrage il n’est fait référence à [Dancer](http://github.com/sukria/dancer). Tant pis ! Dans l’ensemble c’est un bon livre pour qui veut découvrir Perl en 2010. Tous les outils que l’on s’attend à utiliser au quotidien sont présentés. L’organisation du livre, et le fait d’avoir de nombreux exemples, seront pratiques pour les débutants. Je pense en commander quelques exemplaires pour le travail, afin de le mettre à disposition de nos (futurs) stagiaires. -"Perl Moderne":http://www.pearson.fr/livre/?GCOI=27440100979970 est publié par "Pearson":http://www.pearson.fr/, parution le 29 octobre. ISBN: "978-2-7440-2419-1":http://www.amazon.fr/Perl-S%C3%A9bastien-Aperghis-Tramoni/dp/2744024198/ref=sr_1_1?ie=UTF8&qid=1288034071&sr=8-1 (22 €) +[Perl Moderne](http://www.pearson.fr/livre/?GCOI=27440100979970) est publié par [Pearson](http://www.pearson.fr/), parution le 29 octobre. ISBN: [978-2-7440-2419-1](http://www.amazon.fr/Perl-S%C3%A9bastien-Aperghis-Tramoni/dp/2744024198/ref=sr_1_1?ie=UTF8&qid=1288034071&sr=8-1) (22 €). diff --git a/_posts/2010-11-22-vagrant-rocks.md b/_posts/2010-11-22-vagrant-rocks.md new file mode 100644 index 0000000..9e66603 --- /dev/null +++ b/_posts/2010-11-22-vagrant-rocks.md @@ -0,0 +1,172 @@ +--- +title: Vagrant rocks +layout: post +summary: In which I share my enthusiasm for Vagrant +--- + +## tl;dr + +I've been toying with [vagrant](http://vagrantup.com/) lately, and it **really rocks**. You should definitly give it a try. If you're only looking for some resources to get started with it, go there: + + * [introduction](http://vagrantup.com/docs/index.html) + * [google group](http://groups.google.com/group/vagrant-up) + +## What is Vagrant + +"Vagrant is a tool for building and distributing virtualized development environments." This sentence summarizes perfectly the project. + +The idea is to use [Chef](http://www.opscode.com/chef) on top of [VirtualBox](http://www.virtualbox.org/) to deploy a VM like you would deploy a server in your production environment. + +I won't go into the details to describe Chef and VirtualBox, but here is a quick reminder. Chef is a framework to deploy infrastructures. It's written in ruby, it uses **cookbooks** to describe how to deploy stuff, and VirtualBox is a virtualization software from Oracle. + +> A little disclaimer. I don't use Chef outside from vagrant, so I may say/do some stupid things. The aim of this tutorial is not about writing a recipe for Chef, but to show what you can do thanks to Chef. So don't hesitate to correct me in the comments if I'm doing some utterly stupid things. + +## The basic + +To install vagrant, you'll need ruby and virtualbox. You have the basic instructions detailed [here](http://vagrantup.com/docs/getting-started/index.html). This will explain how to install vagrant and how to fetch a **base** image. + +### Creating a first project + +You'll probably want to start creating a new project now. For this tutorial, I'll create an image for [presque](https://github.com/franckcuny/presque). + +{% highlight sh %} +% mkdir presque +% vagrant init +{% endhighlight %} + +This will create a new image for your project, and create a new file in your directory: **Vagrantfile**. Modify this file to make it look like this: + +{% highlight ruby %} +Vagrant::Config.run do |config| + config.vm.box = "base" + config.vm.provisioner = :chef_solo + config.chef.cookbooks_path = "cookbooks" + config.chef.add_recipe("vagrant_main") + config.vm.forward_port("web", 5000, 8080) +end +{% endhighlight %} + +These instructions will: + + * tell vagrant to use the image named **base** (a lucid32 image by default) + * use chef in **solo** mode + * the recipes will be in a directory named **cookbooks** + * the main recipe will be named **vagrant_main** + * forward local HTTP port 4000 to 5000 on the VM + +### My recipes + +Now we need to create or use some recipes. First we create our **cookbooks** directory: + +{% highlight sh %} +% mkdir cookbooks +% mkdir -p cookbooks/vagrant_main/recipes +{% endhighlight %} + +We need to add some cookbooks. You will find them on [GitHub](https://github.com/opscode/cookbooks). Copy the following cookbooks inside the **cookbooks** repository: + + * apt: instructions on how to use apt + * ubuntu: this one manages the sources and executes **apt-get update** + * build-essential: installs the build-essential package + * git: installs git + * perl: configures CPAN + * runit: will be used to monitor redis and our web application + +Edit **vagrant_main/recipes/default.rb** to add them: + +{% highlight ruby %} +require_recipe "ubuntu" +require_recipe "git" +require_recipe "perl" +require_recipe "redis" +require_recipe "runit" +{% endhighlight %} + +If the VM is already started, you can run `vagrant provision` or `vagrant up`. This will deploy the previous cookbooks on the VM. When it's done, you can log on the VM with `vagrant ssh`. + +You'll need to additional recipes: one for redis; one for presque. You'll find them on my [GitHub account](http://github.com/franckcuny/cookbooks/). Copy the two recipes inside your cookbook directory, and execute `vagrant provision` to install them. + +If everything works fine, you should be able to start using presque. Test this: + +{% highlight sh %} +% curl http://localhost:8080/q/foo/ +{"error":"no job"} + +% curl -X POST -H "Content-Type: application/json" -d '{"foo":"bar"}' http://localhost:8080/q/foo/ + +% curl http://localhost:8080/q/foo/ +{"foo":"bar"} +{% endhighlight %} + +If everything is fine, you can shut down the VM with `vagrant halt`. + +### Mounting directories + +Instead of pulling from github, you may prefer to mount a local directory on the VM. For this, you'll need to modifiy the **Vagrantfile** to add this: + +{% highlight sh %} +config.vm.share_folder "v-code", "/deployment/code", "~/code/perl5" +config.vm.share_folder "v-data", "/deployment/data", "~/code/data" +{% endhighlight %} + +This will mount your local directories **perl5** and **data** under **/deployment/{code,data}** on the VM. So now you can edit your files locally and they will be automagically updated on the VM at once. + +## and now the awesome part + +If you're like me, you may end up with the need to have multiple VMs which will talk to each other. Common scenarios are a VM with the website, and another one with the DB, or one VM with a bunch of API webservices and another with Workers who need to interact with the VM. Rejoice, this kind of stuff is also handled by vagrant! + +Replace the content of the previous **Vagrantfile** with this: + +{% highlight ruby %} +Vagrant::Config.run do |config| + config.vm.box = "base" + config.vm.provisioner = :chef_solo + + config.chef.cookbooks_path = "cookbooks" + + config.vm.define :presque do |presque_config| + presque_config.chef.add_recipe("vagrant_presque") + presque_config.vm.network("192.168.1.10") + presque_config.vm.forward_port("presque", 80, 8080) + presque_config.vm.customize do |vm| + vm.name = "vm_presque" + end + end + + config.vm.define :workers do |workers_config| + workers_config.chef.add_recipe("vagrant_workers") + workers_config.vm.network("192.168.1.11") + workers_config.vm.customize do |vm| + vm.name = "vm_workers" + end + end +end +{% endhighlight %} + +In this configuration, we're creating two VMs, **presque** and **workers**. You'll need to create two new cookbooks, one for each new VM (vagrant_presque, with the same content as vagrant_main, and vagrant_workers, with only the recipe for ubuntu and the instructions to install curl). Once it's done, boot the two VMs: + +{% highlight sh %} +% vagrant up presque +% vagrant up workers +{% endhighlight %} + +Now let's log on the worker VM + +{% highlight sh %} +% vagrant ssh workers +vagrant@vagrantup:~$ curl http://192.168.1.10:5000/q/foo +{"error":"no job"} +{% endhighlight %} + +and voilà. + +## Conclusion + +I've started to use vagrant for all my new personal projects and for most of my stuff at work. I really enjoy using this, as it's easy to create a cookbook or add one, it's easy to setup a multi VM environment, you can share a configuration amongst your coworkers, etc. + +If you haven't started yet using a VM for your own projects, you really should give it a try, or use a simple VirtualBox setup. If you want to read more on the subject, these two blog posts may be relevant: + + * [Why you should be using virtualisation](http://morethanseven.net/2010/11/04/Why-you-should-be-using-virtualisation.html) + * [nothingmuch setup](http://blog.woobling.org/2010/10/headless-virtualbox.html) + +(oh, and BTW, did you notice that [Dancer 1.2](http://search.cpan.org/perldoc?Dancer) is out ?) diff --git a/_posts/2010-11-22-vagrant-rocks.textile b/_posts/2010-11-22-vagrant-rocks.textile deleted file mode 100644 index ea34e4a..0000000 --- a/_posts/2010-11-22-vagrant-rocks.textile +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: Vagrant rocks -layout: post -category: misc ---- - -h2. tl;dr - -I've been toying with "vagrant":http://vagrantup.com/ lately, and it *really rocks*. You should definitly give it a try. If you're only looking for some resources to get started with it, go there: - - * "introduction":http://vagrantup.com/docs/index.html - * "google group":http://groups.google.com/group/vagrant-up - -h2. What is Vagrant - -"Vagrant is a tool for building and distributing virtualized development environments." This sentence summarizes perfectly the project. - -The idea is to use "Chef":http://www.opscode.com/chef on top of "VirtualBox":http://www.virtualbox.org/ to deploy a VM like you would deploy a server in your production environment. - -I won't go into the details to describe Chef and VirtualBox, but here is a quick reminder. Chef is a framework to deploy infrastructures. It's written in ruby, it uses *cookbooks* to describe how to deploy stuff, and VirtualBox is a virtualization software from -Sun- Oracle. - -bq. A little disclaimer. I don't use Chef outside from vagrant, so I may say/do some stupid things. The aim of this tutorial is not about writing a recipe for Chef, but to show what you can do thanks to Chef. So don't hesitate to correct me in the comments if I'm doing some utterly stupid things. - -h2. The basic - -To install vagrant, you'll need ruby and virtualbox. You have the basic instructions detailed "here":http://vagrantup.com/docs/getting-started/index.html. This will explain how to install vagrant and how to fetch a _base_ image. - -h3. Creating a first project - -You'll probably want to start creating a new project now. For this tutorial, I'll create an image for "presque":https://github.com/franckcuny/presque. - -bc.. mkdir presque -vagrant init - -p. This will create a new image for your project, and create a new file in your directory: _Vagrantfile_. Modify this file to make it look like this: - -<script src="https://gist.github.com/705569.js?file=ruby"></script> - -These instructions will: - - * tell vagrant to use the image named _base_ (a lucid32 image by default) - * use chef in _solo_ mode - * the recipes will be in a directory named _cookbooks_ - * the main recipe will be named _vagrant_main_ - * forward local HTTP port 4000 to 5000 on the VM - -h3. My recipes - -Now we need to create or use some recipes. First we create our _cookbooks_ directory: - -bc.. mkdir cookbooks -mkdir -p cookbooks/vagrant_main/recipes - -p. We need to add some cookbooks. You will find them on "github":https://github.com/opscode/cookbooks. Copy the following cookbooks inside the _cookbooks_ repository: - - * apt: instructions on how to use apt - * ubuntu: this one manages the sources and executes _apt-get update_ - * build-essential: installs the build-essential package - * git: installs git - * perl: configures CPAN - * runit: will be used to monitor redis and our web application - -Edit _vagrant_main/recipes/default.rb_ to add them: - -<script -src="https://gist.github.com/705569.js?file=default.rb"></script> - -If the VM is already started, you can do - -bc.. vagrant provision - -p. or - -bc.. vagrant up - -p. This will deploy the previous cookbooks on the VM. When it's done, you can log on the VM: - -bc.. vagrant ssh - -p. You'll need to additional recipes: one for redis; one for presque. You'll find them on my "github account":http://github.com/franckcuny/cookbooks/. Copy the two recipes inside your cookbook directory, and execute @vagrant provision@ to install them. - -If everything works fine, you should be able to start using presque. Test this: - -<script -src="https://gist.github.com/705569.js?file=test-presque.sh"></script> - -If everything is fine, you can shut down the VM: - -bc.. vagrant halt - -h3. Mounting directories - -Instead of pulling from github, you may prefer to mount a local directory on the VM. For this, you'll need to modifiy the _Vagrantfile_ to add this: - -bc.. config.vm.share_folder "v-code", "/deployment/code", "~/code/perl5" -config.vm.share_folder "v-data", "/deployment/data", "~/code/data" - -p. This will mount your local directories _perl5_ and _data_ under _/deployment/{code,data}_ on the VM. So now you can edit your files locally and they will be automagically updated on the VM at once. - -h2. and now the awesome part - -If you're like me, you may end up with the need to have multiple VMs which will talk to each other. Common scenarios are a VM with the website, and another one with the DB, or one VM with a bunch of API webservices and another with Workers who need to interact with the VM. Rejoice, this kind of stuff is also handled by vagrant! - -Replace the content of the previous _Vagrantfile_ with this: - -<script -src="https://gist.github.com/705569.js?file=multiples%20vm"></script> - -In this configuration, we're creating two VMs, _presque_ and _workers_. You'll need to create two new cookbooks, one for each new VM (vagrant_presque, with the same content as vagrant_main, and vagrant_workers, with only the recipe for ubuntu and the instructions to install curl). Once it's done, boot the two VMs: - -bc.. vagrant up presque -vagrant up workers - -p. Now let's log on the worker VM - -bc.. $ vagrant ssh workers -vagrant@vagrantup:~$ curl http://192.168.1.10:5000/q/foo -{"error":"no job"} - -p. and voilà. - -h2. Conclusion - -I've started to use vagrant for all my new personal projects and for most of my stuff at work. I really enjoy using this, as it's easy to create a cookbook or add one, it's easy to setup a multi VM environment, you can share a configuration amongst your coworkers, etc. - -If you haven't started yet using a VM for your own projects, you really should give it a try, or use a simple VirtualBox setup. If you want to read more on the subject, these two blog posts may be relevant: - - * "Why you should be using virtualisation":http://morethanseven.net/2010/11/04/Why-you-should-be-using-virtualisation.html - * "nothingmuch setup":http://blog.woobling.org/2010/10/headless-virtualbox.html - -(oh, and BTW, did you notice that "Dancer 1.2":http://search.cpan.org/perldoc?Dancer is out ?) diff --git a/_posts/2010-12-06-fpw2O11.textile b/_posts/2010-12-06-fpw2O11.md index 52717c5..dd9030a 100644 --- a/_posts/2010-12-06-fpw2O11.textile +++ b/_posts/2010-12-06-fpw2O11.md @@ -1,12 +1,12 @@ --- layout: post title: French Perl Workshop -category: perl +summary: In which we start to talk about FPW 2011. --- -I joined the organization team for the next French Perl Workshop, which should take place in Paris in June. I'll help the existing team (composed of Laurent Boivins, Sebastien DeSeille and "maddingue":http://twitter.com/maddingue) and also the newcomers ("cmaussan":http://twitter.com/cmaussan and "eiro":http://github.com/eiro). +I joined the organization team for the next French Perl Workshop, which should take place in Paris in June. I'll help the existing team (composed of Laurent Boivins, Sebastien DeSeille and [maddingue](http://twitter.com/maddingue)) and also the newcomers ([cmaussan](http://twitter.com/cmaussan) and [eiro](http://github.com/eiro)). -We've already decided to organize it in Paris, since last year it was in "Calais":http://journeesperl.fr/fpw2010/, and the idea is to organize once every two years in Paris. We don't have yet a venue and a date, but we will try to keep you informed about this. +We've already decided to organize it in Paris, since last year it was in [Calais](http://journeesperl.fr/fpw2010/), and the idea is to organize once every two years in Paris. We don't have yet a venue and a date, but we will try to keep you informed about this. -The main theme should be "Modern Perl", and we would like to reach a wider audience than previous years: people who are not yet in the Perl community, as well as some foreigners (hey, Paris is a beautiful city). +The main theme should be "Modern Perl", and we would like to reach a wider audience than previous years: people who are not yet in the Perl community, as well as some foreigners (hey, Paris is a beautiful city). I'll try to communicate about our progress at least once a month, and find reasons to motivate you to come! :) diff --git a/_posts/2011-02-20-psgichrome.md b/_posts/2011-02-20-psgichrome.md new file mode 100644 index 0000000..bf8338a --- /dev/null +++ b/_posts/2011-02-20-psgichrome.md @@ -0,0 +1,75 @@ +--- +layout: post +summary: In which I show how to log from a PSGI app to Chrome. +title: PSGIChrome +--- + +Earlier this month, I've read about this extension: [chromePHP](http://www.chromephp.com/). + +The principle of this extension is to allow you to log from your PHP application to chrome. You may not be aware, but this is something you already have with every web application if you're using Plack. And not only for Chrome, but every webkit navigator, and Firefox too! + +Let's mimic their page. + +## Installation + +1. install [Plack::Middleware::ConsoleLogger](http://search.cpan.org/perldoc?Plack::Middleware::ConsoleLogger) (`cpanm Plack::Middleware::ConsoleLogger`) +2. no step 2 +3. no step 3 +4. write a simple PSGI application and log + +{% highlight perl %} +use strict; +use warnings; + +use Plack::Builder; + +my $app = sub { + my $env = shift; + my $content = "<html><body>this is foo</body></html>"; + foreach my $k ( keys %$env ) { + if ( $k =~ /HTTP_/ ) { + $env->{'psgix.logger'}->({ + level => 'debug', + message => "$k => " . $env->{$k}, + }); + } + } + $env->{'psgix.logger'}->({ + level => 'warn', + message => 'this is a warning', + }); + $env->{'psgix.logger'}->({ + level => 'error', + message => 'this is an error', + }); + return [ 200, [ 'Content-Type' => 'text/html' ], [$content] ]; +}; + +builder { + enable "ConsoleLogger"; + $app; +} +{% endhighlight %} + +Load this application with plackup: `plackup chromeplack.pl` + +point your browser to http://localhost:5000, activate the javascript console. + +If this works correctly, you should have a smiliar output in your console: + +<a href="http://f.lumberjaph.net/blog/misc/plack_chrome.png"><img class="img_center" src="http://f.lumberjaph.net/blog/misc/plack_chrome.png" /></a> + +## Dancer + +I don't know for other framework, but you can also log to your browser with [Dancer](http://perldancer.org/). + +First, you need to install [Dancer::Logger::PSGI](http://search.cpan.org/perldoc?Dancer::Logger::PSGI), then, in your application, you need to edit the environment file. You'll certainly want to change 'development.yml'. + +{% highlight yaml %} +logger: "PSGI" +plack_middlewares: + - + - ConsoleLogger +{% endhighlight %} + +Now you can start your application (running in a Plack environment, of course), and next time you'll use 'warning' or 'debug' or any other keyword from Dancer::Logger, the message will end up in your javascript console. diff --git a/_posts/2011-02-20-psgichrome.textile b/_posts/2011-02-20-psgichrome.textile deleted file mode 100644 index c05fd7f..0000000 --- a/_posts/2011-02-20-psgichrome.textile +++ /dev/null @@ -1,43 +0,0 @@ ---- -layout: post -category: perl -title: PSGIChrome ---- - -Earlier this month, I've read about this extension: "chromePHP":http://www.chromephp.com/. - -The principle of this extension is to allow you to log from your PHP application to chrome. You may not be aware, but this is something you already have with every web application if you're using Plack. And not only for Chrome, but every webkit navigator, and Firefox too! - -Let's mimic their page. - -h1. Installation - -# install "Plack::Middleware::ConsoleLogger":http://search.cpan.org/perldoc?Plack::Middleware::ConsoleLogger (*cpanm Plack::Middleware::ConsoleLogger*) -# no step 2 -# no step 3 -# write a simple PSGI application and log - -<script src="https://gist.github.com/750108.js?file=chromeplack.pl"></script> - -Load this application with plackup: - -bc. plackup chromeplack.pl - -point your browser to *http://localhost:5000*, activate the javascript console. - -if this works correctly, you should have a smiliar output in your console: - -<a href="http://f.lumberjaph.net/blog/misc/plack_chrome.png"><img class="img_center" src="http://f.lumberjaph.net/blog/misc/plack_chrome.png" /></a> - -h2. Dancer - -I don't know for other framework, but you can also log to your browser with "Dancer":http://perldancer.org/. - -First, you need to install "Dancer::Logger::PSGI":http://search.cpan.org/perldoc?Dancer::Logger::PSGI, then, in your application, you need to edit the environment file. You'll certainly want to change 'development.yml'. - -bc.. logger: "PSGI" -plack_middlewares: - - - - ConsoleLogger - -p. Now you can start your application (running in a Plack environment, of course), and next time you'll use 'warning' or 'debug' or any other keyword from Dancer::Logger, the message will end up in your javascript console. diff --git a/_posts/2011-03-06-how_to_use_github_effectively_for_your_project.md b/_posts/2011-03-06-how_to_use_github_effectively_for_your_project.md new file mode 100644 index 0000000..53547b6 --- /dev/null +++ b/_posts/2011-03-06-how_to_use_github_effectively_for_your_project.md @@ -0,0 +1,71 @@ +--- +layout: post +title: how to use GitHub effectively for your project +summary: In which we look at how we can use GitHub effectively for your project. +--- + +<a href="https://github.com/">GitHub</a> provide an awesome set of tools for opensource developers. For <a href="http://perldancer.org">Dancer</a>, we use them as much as possible. I'll show and explain how we do our development. + +<img class="img_center" src="/static/imgs/github_dancer.jpg" /> + +## code review + +<a href="https://github.com/sukria/dancer">Dancer</a>'s development goes fast. We do our best to ship often, we do a lot of refactoring, and we listen our users. This means processing pull request and issues as fast as possible. + +## pull request + +There is five developers with write access to the main repository. Each one is asked to do a review of pending pull request every morning (this is not required, neither enforced, it's a "if you have ten minutes in the morning while drinking your cofee, please, review the PR"). + +When we're reviewing something, most of the time we pull the submited patches into a branch named **review/username**. If the reviewer is happy with the modifications, he will add a comment to the pull request: "approved" (and some other comments if it's required). In case he's not happy, he will comment **disaproved** and give his reasons (tests doesn't passes, or there is no tests for the patch provided, or this is not something we want, etc). If the PR is about something the developer doesn't really knows, or has a doubt, he skip this one, and ask for someone else to take a look. + +In order to merge the branch **review/username**, we need two developers to comment **approved**. Of course, for something simple, like a documentation fix, or changing a few line somewhere, we can merge this without applying this process. + +As the work consists to read the code and comment, it's quiete easy to handle two/three pull request each day for a developer. When one of the developper with access to the repository find some time, he can go through the pull request, and merge the one marqued as approved, since he knows that the people who had already approved them understand the code. + +## issues + +We don't use "RT":http://bestpractical.com/rt/ to manage our issues. It's not that RT is bad or that we don't like it, it's just that GitHub's issues are more integrated with our workflow. GitHub's issues are really simple (one can even say naive), but most of the time it's ok for our usage. We don't need advanced features like "track how much time you've spend on this ticket" (even if I do track my time spent on Dancer, using [orgmode](http://orgmode.org/manual/Clocking-commands.html#Clocking-commands)). + +One of the nice feature of GitHub's issues, is that you can close them with a commit. If the commit's message looks like 'closes GH-123', the issue 123 will be closed, with a link to the commit in the comment ([take a look](https://github.com/sukria/Dancer/issues/249)). I find this feature really useful, since when refering to a closed issue, you can find the commit inside the ticket. + +Once or twice a week, we try to proceed a **triage**, where we go through the issues, and tag them. + +When someone report something on the mailing list or on irc, we ask them if they can open an issue, since it's easier for us to track the request. + +An issue doesn't need to be about a bug, it could be: + +* a feature request (I want to do x or y with dancer) +* something that need to be refactored +* an issue reported by an user that need feedback from the developers + +## commenting on code + +Another nice feature is the possibility to comment the code. Most of the time you'll do it while reviewing a pull request. But a user can also comment on something. + +Sometimes we push a branch that need some feedback, and "a discussion will be started":https://github.com/sukria/Dancer/commit/d8e79e0d63d0e1b0e05fd36f9e31c378678fccc3. + +## comparing branches + +You can easily diff two branches. With [this url](https://github.com/sukria/Dancer/compare/master...devel) you can do a quick diff of the changes between master and devel, see the list of commits, and which files have changed. + +This is usefull when you want to have an overview of the work. + +## gitflow + +We're using [gitflow](https://github.com/nvie/gitflow) to work. Gitflow is a nice tool that help you creating and merging branches. If you don't know gitflow, there is [a good article that explain the reasons behind it](http://nvie.com/posts/a-successful-git-branching-model/). + +Ok, this has nothing to do with GitHub, but I'll explain quickly what we do with it. + +We use the following conventions: + +* master: only for release +* devel: this is the development branch. This one should *always* work (tests can't be broken) +* topic/$name: when we start to develop a new feature, we create a topic branch +* hotfix/$name: when we need to release a new version right now, without merging devel before, we create a hofrix branch +* release/$version: when we're ready to ship, we create a new branch + +It's very rare that we need to push a hotfix or release branch to GitHub: thoses branches had a really short life span. But **topic** branches can be pushed, when we want feedback from users or other developers. + +## future + +We're already using [jitterbug](https://github.com/franckcuny/jitterbug) to do some continuous integration. When we push to GitHub, a payload is posted to jitterbug, and a build is triggered. In a near future, we will use [git's notes](http://progit.org/2010/08/25/notes.html) with jitterbug. The idea is to store inside a note, for each build, the duration of the build, and the result of the build (failure / success) and maybe the TAP output in case of a failure. This will allow developers to see directly in the logs the status. Of course, GiHub already display them, so this will be very useful for all of us. diff --git a/_posts/2011-03-06-how_to_use_github_effectively_for_your_project.textile b/_posts/2011-03-06-how_to_use_github_effectively_for_your_project.textile deleted file mode 100644 index 433147f..0000000 --- a/_posts/2011-03-06-how_to_use_github_effectively_for_your_project.textile +++ /dev/null @@ -1,71 +0,0 @@ ---- -layout: post -title: how to use GitHub effectively for your project -category: dancer ---- - -<a href="https://github.com/">GitHub</a> provide an awesome set of tools for opensource developers. For <a href="http://perldancer.org">Dancer</a>, we use them as much as possible. I'll show and explain how we do our development. - -<img class="img_center" src="/static/imgs/github_dancer.jpg" /> - -h2. code review - -<a href="https://github.com/sukria/dancer">Dancer</a>'s development goes fast. We do our best to ship often, we do a lot of refactoring, and we listen our users. This means processing pull request and issues as fast as possible. - -h3. pull request - -There is five developers with write access to the main repository. Each one is asked to do a review of pending pull request every morning (this is not required, neither enforced, it's a "if you have ten minutes in the morning while drinking your cofee, please, review the PR"). - -When we're reviewing something, most of the time we pull the submited patches into a branch named *review/username*. If the reviewer is happy with the modifications, he will add a comment to the pull request: "approved" (and some other comments if it's required). In case he's not happy, he will comment *disaproved* and give his reasons (tests doesn't passes, or there is no tests for the patch provided, or this is not something we want, etc). If the PR is about something the developer doesn't really knows, or has a doubt, he skip this one, and ask for someone else to take a look. - -In order to merge the branch *review/username*, we need two developers to comment *approved*. Of course, for something simple, like a documentation fix, or changing a few line somewhere, we can merge this without applying this process. - -As the work consists to read the code and comment, it's quiete easy to handle two/three pull request each day for a developer. When one of the developper with access to the repository find some time, he can go through the pull request, and merge the one marqued as approved, since he knows that the people who had already approved them understand the code. - -h3. issues - -We don't use "RT":http://bestpractical.com/rt/ to manage our issues. It's not that RT is bad or that we don't like it, it's just that GitHub's issues are more integrated with our workflow. GitHub's issues are really simple (one can even say naive), but most of the time it's ok for our usage. We don't need advanced features like "track how much time you've spend on this ticket" (even if I do track my time spent on Dancer, using "orgmode":http://orgmode.org/manual/Clocking-commands.html#Clocking-commands). - -One of the nice feature of GitHub's issues, is that you can close them with a commit. If the commit's message looks like 'closes GH-123', the issue 123 will be closed, with a link to the commit in the comment ("take a look":https://github.com/sukria/Dancer/issues/249). I find this feature really useful, since when refering to a closed issue, you can find the commit inside the ticket. - -Once or twice a week, we try to proceed a *triage*, where we go through the issues, and tag them. - -When someone report something on the mailing list or on irc, we ask them if they can open an issue, since it's easier for us to track the request. - -An issue doesn't need to be about a bug, it could be: - - * a feature request (I want to do x or y with dancer) - * something that need to be refactored - * an issue reported by an user that need feedback from the developers - -h3. commenting on code - -Another nice feature is the possibility to comment the code. Most of the time you'll do it while reviewing a pull request. But a user can also comment on something. - -Sometimes we push a branch that need some feedback, and "a discussion will be started":https://github.com/sukria/Dancer/commit/d8e79e0d63d0e1b0e05fd36f9e31c378678fccc3. - -h3. comparing branches - -You can easily diff two branches. With "this url":https://github.com/sukria/Dancer/compare/master...devel you can do a quick diff of the changes between master and devel, see the list of commits, and which files have changed. - -This is usefull when you want to have an overview of the work. - -h2. gitflow - -We're using "gitflow":https://github.com/nvie/gitflow to work. Gitflow is a nice tool that help you creating and merging branches. If you don't know gitflow, there is "a good article that explain the reasons behind it":http://nvie.com/posts/a-successful-git-branching-model/. - -Ok, this has nothing to do with GitHub, but I'll explain quickly what we do with it. - -We use the following conventions: - - * master: only for release - * devel: this is the development branch. This one should *always* work (tests can't be broken) - * topic/$name: when we start to develop a new feature, we create a topic branch - * hotfix/$name: when we need to release a new version right now, without merging devel before, we create a hofrix branch - * release/$version: when we're ready to ship, we create a new branch - -It's very rare that we need to push a hotfix or release branch to GitHub: thoses branches had a really short life span. But *topic* branches can be pushed, when we want feedback from users or other developers. - -h2. future - -We're already using "jitterbug":https://github.com/franckcuny/jitterbug to do some continuous integration. When we push to GitHub, a payload is posted to jitterbug, and a build is triggered. In a near future, we will use "git's notes":http://progit.org/2010/08/25/notes.html with jitterbug. The idea is to store inside a note, for each build, the duration of the build, and the result of the build (failure / success) and maybe the TAP output in case of a failure. This will allow developers to see directly in the logs the status. Of course, GiHub already display them, so this will be very useful for all of us. diff --git a/_posts/2011-04-22-new_job.textile b/_posts/2011-04-22-new_job.md index 1547ae6..0ca7f0e 100644 --- a/_posts/2011-04-22-new_job.textile +++ b/_posts/2011-04-22-new_job.md @@ -1,14 +1,14 @@ --- layout: post -category: misc +summary: In which I say goodbye to Linkfluence. title: new job --- -I've worked for nearly four years at "Linkfluence":http://linkfluence.net/. From this summer on, I'm switching to a new job at "Say Media":http://saymedia.com/, San Francisco. +I've worked for nearly four years at [Linkfluence](http://linkfluence.net/). From this summer on, I'm switching to a new job at [Say Media](http://saymedia.com/), San Francisco. Working at Linkfluence has been an awesome experience for me. I've learned a lot of things, worked on really interesting projects, and worked with an awesome team. -h2. Linkfluence is hiring +## Linkfluence is hiring Linkfluence is always looking for developers to join the team. If you want a Perl job in France, and want to work on interesting topics such as: @@ -19,18 +19,18 @@ Linkfluence is always looking for developers to join the team. If you want a Per Linkfluence is the company you want to join. If you're interested, send your CV to Camille Maussang (CTO at Linkfluence): camille.maussang at linkfluence.net. -h3. Technologies at Linkfluence +### Technologies at Linkfluence We're typically using the following technologies to solve our various problems: * Solr and ElasticSearch to index the contents of various social media (blogs, tweeter, etc) - * "Riak":http://labs.linkfluence.net/nosql/2011/03/07/moving_from_couchdb_to_riak.html to store the contents - * a *lot* of Perl (and not the Perl of your (dear) grandmother, but Catalyst, Moose, DBIx::Class, Dancer, perl-5.12, ...) + * [Riak](http://labs.linkfluence.net/nosql/2011/03/07/moving_from_couchdb_to_riak.html) to store the contents + * a **lot** of Perl (and not the Perl of your (dear) grandmother, but Catalyst, Moose, DBIx::Class, Dancer, perl-5.12, ...) * Redis * PostgreSQL - * "Gephi":http://gephi.org/ + * [Gephi](http://gephi.org/) -h3. Open Source at Linkfluence +### Open Source at Linkfluence Developers here are encouraged to contribute to open source projects, to publish code on GitHub, and talk about the things they do. @@ -44,20 +44,20 @@ The company also sends the developers to various open source conferences: You're encouraged to talk at those conferences, and you can prepare your talks during work time. Linkfluence is also working closely with the French Perl Mongueurs, and tries to organize events with them (technical meetings, Dancer hackaton, etc). -h3. Science at Linkfluence +### Science at Linkfluence Most of the developers in the company are coming from a scientific background (but this is not a requirement). Some of them do publish scientific papers on topics like social media and graphs analysis. - * "Stabilité globale et diversité globale dans la dynamique des commentaires de Flickr":www.liafa.jussieu.fr/~prieur/Publis/TSI2444-RauxPrieurV2.pdf (Raux, Prieur, 2011) - * "Describing the Web in less than 140 characters":http://www.icwsm.org/2011/index.php (Raux, Grunwald, Prieur, 2011) + * [Stabilité globale et diversité globale dans la dynamique des commentaires de Flickr](www.liafa.jussieu.fr/~prieur/Publis/TSI2444-RauxPrieurV2.pdf) (Raux, Prieur, 2011) + * [Describing the Web in less than 140 characters](http://www.icwsm.org/2011/index.php) (Raux, Grunwald, Prieur, 2011) * Essai de géographie de la blogosphère politique française (Cardon, Fouetillou, Lerondeau, Prieur 2011) - * "Two paths of glory - Structural positions and trajectories of websites within their topical territory":http://www.icwsm.org/2011/index.php (Cardon, Fouetillou, Roth 2011) - * "Clustering based on random graph model embedding vertex features":linkinghub.elsevier.com/retrieve/pii/S0167865510000413 (Zanghi, Volant, Ambroise) - * "Strategies for Online Inference of Network Mixture":lbbe.univ-lyon1.fr/annexes/franck.../SSB-RR-14-online-estimation.pdf (Zanghi, Picard, Miele, Ambroise) + * [Two paths of glory - Structural positions and trajectories of websites within their topical territory](http://www.icwsm.org/2011/index.php) (Cardon, Fouetillou, Roth 2011) + * [Clustering based on random graph model embedding vertex features](http://linkinghub.elsevier.com/retrieve/pii/S0167865510000413) (Zanghi, Volant, Ambroise) + * [Strategies for Online Inference of Network Mixture](http://lbbe.univ-lyon1.fr/annexes/franck.../SSB-RR-14-online-estimation.pdf) (Zanghi, Picard, Miele, Ambroise) * Approches modèles pour la structuration du Web vu comme un graphe (Zanghi) If you want to work with smart people on interesting topics like graphs, and help them to implement solutions or experiment with some algorithm, you'll feel at home there. -h2. My future +## My future I'll move to San Francisco this summer to start my new job at Say. I have a lot to do before moving, but I'm very excited with this new opportunity. Sadly, I'll probably miss YAPC::EU, but I should do YAPC::NA next year \o/. diff --git a/_posts/2011-05-08-french_perl_workshop.md b/_posts/2011-05-08-french_perl_workshop.md index 6415a95..2d27494 100644 --- a/_posts/2011-05-08-french_perl_workshop.md +++ b/_posts/2011-05-08-french_perl_workshop.md @@ -6,7 +6,7 @@ summary: In which I remind you of the French Perl Worksphop. The call for paper for the [French Perl Workshop](http://journeesperl.fr/fpw2011/) is open. This event will be held the 24th and 25th of June in Paris. As always, this is a free conference. -<img class="img_center" src="/images/affiche_fpw11.jpg" /> +<img align="left" src="/images/affiche_fpw11.jpg" /> ## Where diff --git a/_posts/2011-06-20-stargit.textile b/_posts/2011-06-20-stargit.md index 3d5b807..0438359 100644 --- a/_posts/2011-06-20-stargit.textile +++ b/_posts/2011-06-20-stargit.md @@ -1,78 +1,80 @@ --- title: StarGit layout: post -category: community +summary: In which we take a look at StarGit. --- -Last year I did a "small exploration of GitHub":http://lumberjaph.net/graph/2010/03/25/github-explorer.html to show the various communities using "GitHub":http://github.com and how they work. I wanted to do it again this year, but I was lacking time and motivation to start over. A couple of months ago, I got a message from "mojombo":https://twitter.com/#!/mojombo asking me if I was planning to do a new poster. This triggered the motivation to work on it again. +Last year I did a [small exploration of GitHub](http://lumberjaph.net/graph/2010/03/25/github-explorer.html) to show the various communities using [GitHub](http://github.com) and how they work. I wanted to do it again this year, but I was lacking time and motivation to start over. A couple of months ago, I got a message from [mojombo](https://twitter.com/#!/mojombo) asking me if I was planning to do a new poster. This triggered the motivation to work on it again. -This time I got help from "Alexis":https://twitter.com/#!/jacomyal to provide you with an awesome tool: "a real explorer of your graph":http://www.stargit.net, but more on this later ;) +This time I got help from [Alexis](https://twitter.com/#!/jacomyal) to provide you with an awesome tool: [a real explorer of your graph](http://www.stargit.net), but more on this later ;) <img class="img_center" src="/static/imgs/stargit.png" title="StarGit" /> -And of course, "the poster":http://labs.linkfluence.net. Feel free to print it yourself, the size of the poster is A1. +And of course, [the poster](http://labs.linkfluence.net). Feel free to print it yourself, the size of the poster is A1. <img class="img_center" src="/static/imgs/github-poster-v2.png" title="GitHub Poster" /> -h2. The data +## The data -All the data are available! Last year I got some mails asking me for the dataset. So this time I asked first if I could release the "data":http://maps.startigt.net/dump/github.tgz with the "code":https://github.com/franckcuny/StarGit and the poster, and the anwser is yes! So if you're intereseted, you can download it. +All the data are available! Last year I got some mails asking me for the dataset. So this time I asked first if I could release the [data](http://maps.startigt.net/dump/github.tgz) with the [code](https://github.com/franckcuny/StarGit) and the poster, and the anwser is yes! So if you're intereseted, you can download it. The data are stored in mongodb, so I provide the dump which you can easily use: - # @wget http://maps.stargit.net/dump/github.tgz@ - # @tar xvzf github.tgz@ - # @cd github@ - # @mongorestore -d github .@ +{% highlight sh %} +% wget http://maps.stargit.net/dump/github. +% tar xvzf github.tgz +% cd github +% mongorestore -d github . +{% endhighlight %} Now you can use mongodb to browse the imported database. There is 5 collections: profiles / repositories / relations / contributions / edges. -h2. Methodology +## Methodology Last year I did a simple "follower/following" graph. It was already interesting, but it was also *really* too simple. This time I wanted to go deeper in the exploration. The various step to process all this data are: - * using the GitHub API, fetch informations from the profiles. - * when all the profiles are collected, informations about the repositories are fetched. Only forked repositories are kept. - * "simple" relations (followers/following) are kept and used later to add weight to relations. - * tag user with the main programming language they use. Using the GitHub API, I was able to categorize ~40k profiles (about 1/3 of my whole dataset). - * using the GeoNames API, extract the name of the country the user is in. This time, about 55k profiles were tagged. - * fetch contributions for each repositories - * compute a score between the author of the contribution and the owner of the repo - * add a weight to each edges, using the computed score and "+1" if the developer follow the other developer +* using the GitHub API, fetch informations from the profiles. +* when all the profiles are collected, informations about the repositories are fetched. Only forked repositories are kept. +* "simple" relations (followers/following) are kept and used later to add weight to relations. +* tag user with the main programming language they use. Using the GitHub API, I was able to categorize ~40k profiles (about 1/3 of my whole dataset). +* using the GeoNames API, extract the name of the country the user is in. This time, about 55k profiles were tagged. +* fetch contributions for each repositories +* compute a score between the author of the contribution and the owner of the repo +* add a weight to each edges, using the computed score and "+1" if the developer follow the other developer For all the graphs, I've used the following colors for: - * <span style="color:#C40C0F">Ruby</span> - * <span style="color:#4C9E97">JavaScript</span> - * <span style="color:#3F9E16">Python</span> - * <span style="color:#8431C4">C (C++, C#)</span> - * <span style="color:#29519E">Perl</span> - * <span style="color:#9D61C4">PHP</span> - * <span style="color:#C4B646">JVM (Java, Clojure, Scala)</span> - * <span style="color:#90C480">Lisp (Emacs Lisp, Common Lisp)</span> - * <span style="color:#9C9E9C">Other</span> +* <span style="color:#C40C0F">Ruby</span> +* <span style="color:#4C9E97">JavaScript</span> +* <span style="color:#3F9E16">Python</span> +* <span style="color:#8431C4">C (C++, C#)</span> +* <span style="color:#29519E">Perl</span> +* <span style="color:#9D61C4">PHP</span> +* <span style="color:#C4B646">JVM (Java, Clojure, Scala)</span> +* <span style="color:#90C480">Lisp (Emacs Lisp, Common Lisp)</span> +* <span style="color:#9C9E9C">Other</span> -h2. Exploring +## Exploring Feel free to do your own analysis in the comments :) For each map, you'll find a PDF of the map, and the graph to explore using gephi (in GEXF or GDF format). -h3. but first, some numbers +### but first, some numbers I've collected: - * 123 562 profiles - * 2 730 organizations - * 40 807 repositories +* 123 562 profiles +* 2 730 organizations +* 40 807 repositories This took me about a month in order to collect the data and to build the adapted tools. -h4. Accounts creations +### Accounts creations The following chart show the number of account created by month. "Everyone" means the total of accounts created. You can also see the numbers for each communities. -On the "Everyone" graph, you can see a huge pick around April 2008, that's the date GitHub "was launched":https://github.com/blog/40-we-launched. +On the "Everyone" graph, you can see a huge pick around April 2008, that's the date GitHub [was launched](https://github.com/blog/40-we-launched). For most of the communities, the number of created accounts start to decrease since 2010. I think the reason is that most of the developers from those communities are now on GitHub. @@ -148,139 +150,137 @@ $(function () { }); </script> -h4. languages +### Languages (Keep in mind that these numbers are coming from the profiles I was able to tag, roughly 40k) - # Ruby: 10046 (28%) - # Python: 5403 (15%) - # JavaScript: 5282 (15%) (JavaScript + CoffeeScript) - # C: 5093 (14%) (C, C++, C#) - # PHP: 3933 (11%) - # JVM: 3790 (10%) (Java, Clojure, Scala, Groovy) - # Perl: 1215 (3%) - # Lisp: 348 (0%) (Emacs Lisp, Common Lisp) +* Ruby: 10046 (28%) +* Python: 5403 (15%) +* JavaScript: 5282 (15%) (JavaScript + CoffeeScript) +* C: 5093 (14%) (C, C++, C#) +* PHP: 3933 (11%) +* JVM: 3790 (10%) (Java, Clojure, Scala, Groovy) +* Perl: 1215 (3%) +* Lisp: 348 (0%) (Emacs Lisp, Common Lisp) Those numbers doesn't really match "what GitHub gave":https://github.com/languages, but it could be explained by the way I've selected my users. -h4. country +### Country - # United States: 19861 (36%) - # United Kingdom: 3533 (6%) - # Germany: 3009 (5%) - # Canada: 2657 (4%) - # Brazil: 2454 (4%) - # France: 1833 (3%) - # Japan: 1799 (3%) - # Russia: 1604 (2%) - # Australia: 1441 (2%) - # China: 1159 (2%) +* United States: 19861 (36%) +* United Kingdom: 3533 (6%) +* Germany: 3009 (5%) +* Canada: 2657 (4%) +* Brazil: 2454 (4%) +* France: 1833 (3%) +* Japan: 1799 (3%) +* Russia: 1604 (2%) +* Australia: 1441 (2%) +* China: 1159 (2%) The United States are still the main country represented on GitHub, no suprise here. -If you are interested in the "geography" of Open Source, you should read these two articles: "Coding Places":http://takhteyev.org/dissertation/ and "Investigating the Geography of Open Source Software through Github":http://takhteyev.org/papers/Takhteyev-Hilts-2010.pdf. +If you are interested in the "geography" of Open Source, you should read these two articles: [Coding Places](http://takhteyev.org/dissertation/) and [Investigating the Geography of Open Source Software through GitHub](http://takhteyev.org/papers/Takhteyev-Hilts-2010.pdf). -h4. companies +### companies Looking at the "company" field on user's profile, here are some stats about which companies has employees using GitHub: - # ThoughtWorks: 102 - # Google: 66 - # Mozilla: 65 - # Yahoo!: 65 - # Red Hat: 64 - # Globo.com: 55 - # Twitter: 53 - # Facebook: 45 - # Yandex: 43 - # Intridea: 34 - # Microsoft: 33 - # Engine Yard: 32 - # Pivotal Labs: 29 - # MIT: 28 - # Rackspace: 27 - # IBM: 24 - # Caelum: 23 - # Novell: 22 - # GitHub: 22 - # VMware: 22 +* ThoughtWorks: 102 +* Google: 66 +* Mozilla: 65 +* Yahoo!: 65 +* Red Hat: 64 +* Globo.com: 55 +* Twitter: 53 +* Facebook: 45 +* Yandex: 43 +* Intridea: 34 +* Microsoft: 33 +* Engine Yard: 32 +* Pivotal Labs: 29 +* MIT: 28 +* Rackspace: 27 +* IBM: 24 +* Caelum: 23 +* Novell: 22 +* GitHub: 22 +* VMware: 22 I didn't knew the first company, ThoughtWorks, and I was expecting to see FaceBook or Twitter as the company with most developpers on GitHub. It's also interesting to see Yandex here. -h3. Global graph (1628 nodes, 9826 edges) +## Global graph (1628 nodes, 9826 edges) -("download PDF":http://maps.stargit.net/global/global.pdf, "download GDF":http://maps.stargit.net/global/global.gdf) +([download PDF](http://maps.stargit.net/global/global.pdf, "download GDF":http://maps.stargit.net/global/global.gdf)) The main difference with last year, is the android / modders community. They're developing mostly in C and Java. The poster has been created from this map. -h3. Ruby (1968 nodes, 9662 edges) +## Ruby (1968 nodes, 9662 edges) -("download PDF":http://maps.stargit.net/ruby/ruby.pdf, "download GDF":http://maps.stargit.net/ruby/ruby.gdf, "download GEXF":http://maps.stargit.net/ruby/ruby.gexf) +([download PDF](http://maps.stargit.net/ruby/ruby.pdf), [download GDF](http://maps.stargit.net/ruby/ruby.gdf), [download GEXF](http://maps.stargit.net/ruby/ruby.gexf)) -This is still the main community on GitHub, even if JavaScript is now "the most popular language":https://github.com/languages/JavaScript. This graph is really dense, it's not easy to read, since there is no real cluster in this one. +This is still the main community on GitHub, even if JavaScript is now [the most popular language](https://github.com/languages/JavaScript). This graph is really dense, it's not easy to read, since there is no real cluster in this one. -h3. Python (1062 nodes, 2631 edges) +## Python (1062 nodes, 2631 edges) -("download PDF":http://maps.stargit.net/python/python.pdf, "download GDF":http://maps.stargit.net/python/python.gdf) +([download PDF](http://maps.stargit.net/python/python.pdf), [download GDF](http://maps.stargit.net/python/python.gdf)) Here we have some clusters. I'm not familiar with the Python community, so I can't really give any insight. -h3. Perl (608 nodes, 2967 edges) +## Perl (608 nodes, 2967 edges) -("download PDF":http://maps.stargit.net/perl/perl.pdf, "download GDF":http://maps.stargit.net/perl/perl.gdf, "download GEXF":http://maps.stargit.net/perl/perl.gexf) +([download PDF](http://maps.stargit.net/perl/perl.pdf), [download GDF](http://maps.stargit.net/perl/perl.gdf), [download GEXF](http://maps.stargit.net/perl/perl.gexf)) I really like this graph since it show (in my opinion) one of the real strength of this community: everybody works with everybody. People working on a webframework will collaborate with people working on Moose, or an ORM, or other tools. It shows that in this community, people are competent in more than one field. The Perl community is about the same size as last year. However, we can extract the following informations: - * the Japaneses Perl Hackers are still a cluster by themselves - * "miyagawa":http://github.com/miyagawa is still the glue between the Japanese community and the "rest of the world" - * other leaders are: Florian Ragwitz ("rafl":http://github.com/rafl), Andy Amstrong ("AndyA":http://github.com/andya), Dave Rolsky ("autarch":http://github.com/autarch) - * some clusters exists for the following projects: - ** Moose - ** Dancer +* the Japaneses Perl Hackers are still a cluster by themselves +* [miyagawa](http://github.com/miyagawa) is still the glue between the Japanese community and the "rest of the world" +* other leaders are: Florian Ragwitz ([rafl](http://github.com/rafl)), Andy Amstrong ([AndyA](http://github.com/andya)), Dave Rolsky ([autarch](http://github.com/autarch)) +* some clusters exists for Moose and Dancer. As we can see on the previous charts, the number of created accounts for the Perl developpers is stalling. -h3. United States (2646 nodes, 11344 edges) +## United States (2646 nodes, 11344 edges) -("download PDF":http://maps.startgit.net/unitedstates/unitedstates.pdf, "download GDF":http://maps.startgit.net/unitedstates/unitedstates.gdf, "download GEXF":http://maps.startgit.net/unitedstates/unitedstates.gexf) +([download PDF](http://maps.startgit.net/unitedstates/unitedstates.pdf), [download GDF](http://maps.startgit.net/unitedstates/unitedstates.gdf), [download GEXF](http://maps.startgit.net/unitedstates/unitedstates.gexf)) This one is really nice. We can clearly see all the communities. There is something interesting: - # C and Ruby are on the opposite side (C on the left, Ruby on the right) - # Python and Perl are also opposed (Perl at the bottom and Python at the top) +* C and Ruby are on the opposite side (C on the left, Ruby on the right) +* Python and Perl are also opposed (Perl at the bottom and Python at the top) I'll let you take some conclusion by yourself on this one ;) -h3. France (706 nodes, 1059 edges) +## France (706 nodes, 1059 edges) -("download PDF":http://maps.stargit.net/france/france.pdf, "download GDF":http://maps.stargit.net/france/france.gdf, "download GEXF":http://maps.stargit.net/france/france.gexf) +([download PDF](http://maps.stargit.net/france/france.pdf), [download GDF](http://maps.stargit.net/france/france.gdf), [download GEXF](http://maps.stargit.net/france/france.gexf)) We have a lot of small clusters on this one, and some very big authorities. -h3. Japan (464 nodes, 1091 edges) +## Japan (464 nodes, 1091 edges) -("download PDF":http://maps.stargit.net/japan/japan.pdf, "download GDF":http://maps.stargit.net/japan/japan.gdf, "download GEXF":http://maps.stargit.net/japan/japan.gexf) +([download PDF](http://maps.stargit.net/japan/japan.pdf), [download GDF](http://maps.stargit.net/japan/japan.gdf), [download GEXF](http://maps.stargit.net/japan/japan.gexf)) There is three dominants clusters on this one: - # Ruby - # Perl - # C +* Ruby +* Perl +* C The Ruby and Perl one are well connected. There is a lot of japanese hacker on CPAN using both languages. -h2. StarGit +## StarGit -"StarGit":http://stargit.net is a great tool we built with Alexis to let you explore *your* community on GitHub. You can read more about the application on "Alexis' blog":http://ofnodesandedges.com/2011/06/20/stargit.html +[StarGit](http://stargit.net) is a great tool we built with Alexis to let you explore **your** community on GitHub. You can read more about the application on [Alexis' blog](http://ofnodesandedges.com/2011/06/20/stargit.html). -It's hosted on "dotcloud":http://dotcloud.com (I'm still amazed at how easy it was to deploy the code ...), using the Perl "Dancer web framework":http://perldancer.org, MongoDB to store the data, and Redis to do some caching. +It's hosted on [dotcloud](http://dotcloud.com) (I'm still amazed at how easy it was to deploy the code ...), using the Perl [Dancer web framework](http://perldancer.org), MongoDB to store the data, and Redis to do some caching. -h2. Credits +## Credits I would like to thanks the whole GitHub team for being interested in the previous poster and to ask another one this year :) -A *huge* thanks to Alexis for his help on building the awesome StarGit. Another big thanks to Antonin for his work on the poster. +A **huge** thanks to Alexis for his help on building the awesome StarGit. Another big thanks to Antonin for his work on the poster. diff --git a/_posts/2012-02-17-HTTP_requests_with_python.md b/_posts/2012-02-17-HTTP_requests_with_python.md new file mode 100644 index 0000000..f3cbabb --- /dev/null +++ b/_posts/2012-02-17-HTTP_requests_with_python.md @@ -0,0 +1,131 @@ +--- +layout: post +summary: In which I express my frustration toward HTTP libraries in Python. +title: The state of HTTP's libraries in Python +--- + +## Hey! I'm alive! + +I've started to write some Python for work, and since I'm new at the game, I've decided to start using it for some personal project too. + +Most of what I do is related to web stuff: writing API, API client, web framweork, etc. At [Say](http://www.saymedia.com/) I'm working on our platform. Nothing fancy, but really interesting (at least to me) and challenging work (and we're recruting, drop me a mail if you want to know more). + +## Writing HTTP requests with Python + +### httplib + +[httplib](http://docs.python.org/library/httplib.html) is part of the standard library. The documentation says: *It is normally not used directly*. And when you look at the API you understand why: it's very low-level. It uses the HTTPMessage library (not documented, and not easily accessible). It will return an HTTPResponse object, but again, no documentation, and poor interface. + +### httplib2 + +[httplib2](http://code.google.com/p/httplib2/) is a very popular library for writing HTTP request with Python. It's the one used by Google for it's [google-api-python-client](http://code.google.com/p/google-api-python-client/) library. There's absolutly nothing in common between httplib's API and this one. + +I dont like it's API: the way the library handles the **Response** object seems wrong to me. You should get one object for the response, not a tuple with the response and the content. The request should also be an object. Also, The status code is considered as a header, and you lose the message that comes with the status. + +There is also an important issue with httplib2 that we discovered at work. In some case, if there is an error, httplib2 will retry the request. That means, in the case of a POST request, it will send twice the payload. There is [a ticket that ask to fix that](http://code.google.com/p/httplib2/issues/detail?id=124), marked as **won't fix**. [Even when there is a perfectly acceptable patch for this issue.](http://codereview.appspot.com/4365054/) (it's a [WAT](https://www.destroyallsoftware.com/talks/wat) moment). I'm really curious to know what was the motiviation behind this, because it doesn'nt makes sense at all. Why would you want your client to retry twice your request if it fails ? + +### urllib + +[urllib](http://docs.python.org/library/urllib.html) is also part of the standard library. I was suprised, because given the name, I was expecting a lib to *manipulate* an URL. And indeed, it also does that! This library mix too many different things. + +### urllib2 + +[urllib2](http://docs.python.org/library/urllib2.html) And because 2 is not enough, also ... + +### urllib3 + +[urllib3](http://code.google.com/p/urllib3/). I thought for a moment that, maybe, the number number was related to the version of Python. I'll spare you the suspense, it's not the case. Now I would have expected them to be related to each other (sharing some common API, the number being just a way to provides a better API than the previous version). Sadly it's not the case, they all implement different API. + +At least, urllib3 has some interesting features: + +* Thread-safe connection pooling and re-using with HTTP/1.1 keep-alive +* HTTP and HTTPS (SSL) support + +### request + +A few persons pointed me to [requests](http://pypi.python.org/pypi/requests). And indeed, this one is the nicest of all. Still, not exactly what *I*'m looking for. This library looks like [LWP::Simple](https://metacpan.org/module/LWP::Simple), a library build on top of various HTTP components to help you for the common case. For most of the developers it will be fine and do the work as intented. + +## What I want + +Since I'm primarly a Perl developer (here is were 99% of the readers are leaving the page), I've been using [LWP](https://metacpan.org/module/LWP) and HTTP::Messages for more than 8 years. LWP is an awesome library. It's 16 years old, and it's still actively developed by it's original author [Gisle Aas](https://metacpan.org/author/GAAS). He deserves a lot of respect for his dedication. + +There is a few other library in Perl to do HTTP request, like: + + * [AnyEvent::HTTP](https://metacpan.org/module/AnyEvent::HTTP): if you need to do asynchronous call + * [Furl](https://metacpan.org/module/Furl): by Tokuhiro and his yakuza gang + +but most of the time, you end up using LWP with HTTP::Messages. + +One of the reason this couple is so popular is because it provides the right abstraction: + +* a user-agent is provided by LWP::UserAgent (that you can easily extends to build some custom useragent) +* a Response class to encapsulates HTTP style responses, provided by HTTP::Message +* a Request class to encapsulates HTTP style request, provided by HTTP::Message + +The response and request objects use HTTP::Headers and HTTP::Cookies. This way, even if your building a web framework and not a HTTP client, you'll endup using HTTP::Headers and HTTP::Cookies since they provide the right API, they're well tested, and you only have to learn one API, wether you're in an HTTP client or a web framework. + +## http + +So now you start seeing where I'm going. And you're saying "ho no, don't tell me you're writing *another* HTTP library". Hell yeah, I am (sorry, Masa). But to be honest, I doubt you'll ever use it. It's doing the job *I* want, the way *I* want. And it's probably not what you're expecting. + +[http](https://github.com/franckcuny/httpclient/) is providing an abstraction for the following things: + +* http.headers +* http.request +* http.response +* http.date +* http.url (by my good old friend "bl0b":https://github.com/bl0b) + +I could have named it **httplib3**, but **http** seems a better choice: it's a library that deals with the HTTP protocol and provide abstraction on top of it. + +You can found the [documentation here](http://http.readthedocs.org/en/latest/index.html) and install it from [PyPI](http://pypi.python.org/pypi/http/). + +### examples + +A few examples + +{% highlight python %} +>>> from http import Request +>>> r = Request('GET', 'http://lumberjaph.net') +>>> print r.method +GET +>>> print r.url +http://lumberjaph.net +>>> r.headers.add('Content-Type', 'application/json') +>>> print r.headers +Content-Type: application/json + + +>>> +{% endhighlight %} + +{% highlight python %} +>>> from http import Headers +>>> h = Headers() +>>> print h + + +>>> h.add('X-Foo', 'bar') +>>> h.add('X-Bar', 'baz', 'foobarbaz') +>>> print h +X-Foo: bar +X-Bar: baz +X-Bar: foobarbaz + + +>>> for h in h.items(): +... print h +... +('X-Foo', 'bar') +('X-Bar', 'baz') +('X-Bar', 'foobarbaz') +>>> +{% endhighlight %} + +### a client + +With this, you can easily build a very simple client combining thoses classes, or a more complex one. Or maybe you want to build a web framework, or a framework to test HTTP stuff, and you need a class to manipulate HTTP headers. Then you can use http.headers. The same if you need to create some HTTP responses: http.response. + +I've started to write [httpclient](https://github.com/franckcuny/httpclient/) based on this library that will mimic LWP's API. + +I've started [to document this library](http://httpclient.readthedocs.org/en/latest/index.html) and I hope to put something on PyPI soon. diff --git a/_posts/2012-02-17-HTTP_requests_with_python.textile b/_posts/2012-02-17-HTTP_requests_with_python.textile deleted file mode 100644 index 1df19ac..0000000 --- a/_posts/2012-02-17-HTTP_requests_with_python.textile +++ /dev/null @@ -1,97 +0,0 @@ ---- -layout: post -category: python -title: The state of HTTP's libraries in Python ---- - -h2. Hey! I'm alive! - -I've started to write some Python for work, and since I'm new at the game, I've decided to start using it for some personal project too. - -Most of what I do is related to web stuff: writing API, API client, web framweork, etc. At "SAY:":http://www.saymedia.com/ I'm working on our platform. Nothing fancy, but really interesting (at least to me) and challenging work (and we're recruting, drop me a mail if you want to know more). - -h2. Writing HTTP requests with Python - -h3. httplib - -"httplib":http://docs.python.org/library/httplib.html is part of the standard library. The documentation says: "It is normally not used directly". And when you look at the API you understand why: it's very low-level. It uses the HTTPMessage library (not documented, and not easily accessible). It will return an HTTPResponse object, but again, no documentation, and poor interface. - -h3. httplib2 - -"httplib2":http://code.google.com/p/httplib2/ is a very popular library for writing HTTP request with Python. It's the one used by Google for it's "google-api-python-client":http://code.google.com/p/google-api-python-client/ library. There's absolutly nothing in common between httplib's API and this one. - -I dont like it's API: the way the library handles the *Response* object seems wrong to me. You should get one object for the response, not a tuple with the response and the content. The request should also be an object. Also, The status code is considered as a header, and you lose the message that comes with the status. - -There is also an important issue with httplib2 that we discovered at work. In some case, if there is an error, httplib2 will retry the request. That means, in the case of a POST request, it will send twice the payload. There is "a ticket that ask to fix that":http://code.google.com/p/httplib2/issues/detail?id=124, marked as *won't fix*. "Even when there is a perfectly acceptable patch for this issue.":http://codereview.appspot.com/4365054/ (it's a ""WAT":https://www.destroyallsoftware.com/talks/wat" moment). I'm really curious to know what was the motiviation behind this, because it doesn'nt makes sense at all. Why would you want your client to retry twice your request if it fails ? - -h3. urllib - -"urllib":http://docs.python.org/library/urllib.html is also part of the standard library. I was suprised, because given the name, I was expecting a lib to *manipulate* an URL. And indeed, it also does that! This library mix too many different things. - -h3. urllib2 - -"urllib2":http://docs.python.org/library/urllib2.html. And because 2 is not enough, also ... - -h3. urllib3 - -"urllib3":http://code.google.com/p/urllib3/. I thought for a moment that, maybe, the number number was related to the version of Python. I'll spare you the suspense, it's not the case. Now I would have expected them to be related to each other (sharing some common API, the number being just a way to provides a better API than the previous version). Sadly it's not the case, they all implement different API. - -At least, urllib3 has some interesting features: - -* Thread-safe connection pooling and re-using with HTTP/1.1 keep-alive -* HTTP and HTTPS (SSL) support - -h3. request - -A few persons pointed me to "requests":http://pypi.python.org/pypi/requests. And indeed, this one is the nicest of all. Still, not exactly what *I*'m looking for. This library looks like "LWP::Simple":https://metacpan.org/module/LWP::Simple, a library build on top of various HTTP components to help you for the common case. For most of the developers it will be fine and do the work as intented. - -h2. What I want - -Since I'm primarly a Perl developer (here is were 99% of the readers are leaving the page), I've been using "LWP":https://metacpan.org/module/LWP and HTTP::Messages for more than 8 years. LWP is an awesome library. It's 16 years old, and it's still actively developed by it's original author "Gisle Aas":https://metacpan.org/author/GAAS. He deserves a lot of respect for his dedication. - -There is a few other library in Perl to do HTTP request, like: - - * "AnyEvent::HTTP":https://metacpan.org/module/AnyEvent::HTTP : if you need to do asynchronous call - * "Furl":https://metacpan.org/module/Furl : by Tokuhiro and his yakuza gang - -but most of the time, you end up using LWP with HTTP::Messages. - -One of the reason this couple is so popular is because it provides the right abstraction: - - * a user-agent is provided by LWP::UserAgent (that you can easily extends to build some custom useragent) - * a Response class to encapsulates HTTP style responses, provided by HTTP::Message - * a Request class to encapsulates HTTP style request, provided by HTTP::Message - -The response and request objects use HTTP::Headers and HTTP::Cookies. This way, even if your building a web framework and not a HTTP client, you'll endup using HTTP::Headers and HTTP::Cookies since they provide the right API, they're well tested, and you only have to learn one API, wether you're in an HTTP client or a web framework. - -h2. http - -So now you start seeing where I'm going. And you're saying "ho no, don't tell me you're writing *another* HTTP library". Hell yeah, I am (sorry, Masa). But to be honest, I doubt you'll ever use it. It's doing the job *I* want, the way *I* want. And it's probably not what you're expecting. - -*"http":https://github.com/franckcuny/httpclient/* is providing an abstraction for the following things: - - * http.headers - * http.request - * http.response - * http.date - * http.url (by my good old friend "bl0b":https://github.com/bl0b) - -I could have named it *httplib3*, but *http* seems a better choice: it's a library that deals with the HTTP protocol and provide abstraction on top of it. - -You can found the "documentation here":http://http.readthedocs.org/en/latest/index.html and install it from "PyPI":http://pypi.python.org/pypi/http/. - -h3. examples - -A few examples - -<script src="https://gist.github.com/1659656.js?file=gistfile1.py"></script> - -<script src="https://gist.github.com/1659656.js?file=headers"></script> - -h3. a client - -With this, you can easily build a very simple client combining thoses classes, or a more complex one. Or maybe you want to build a web framework, or a framework to test HTTP stuff, and you need a class to manipulate HTTP headers. Then you can use http.headers. The same if you need to create some HTTP responses: http.response. - -I've started to write *"httpclient":https://github.com/franckcuny/httpclient/* based on this library that will mimic LWP's API. - -I've started "to document this library":http://httpclient.readthedocs.org/en/latest/index.html and I hope to put something on PyPI soon. diff --git a/_posts/2012-10-31-virtualenv-and-checkouts.md b/_posts/2012-10-31-virtualenv-and-checkouts.md index 039d4b3..c5356a9 100644 --- a/_posts/2012-10-31-virtualenv-and-checkouts.md +++ b/_posts/2012-10-31-virtualenv-and-checkouts.md @@ -4,17 +4,17 @@ title: Virtualenv and checkouts summary: In which I share a trick for virtualenv --- -I've started to do some Clojure in my spare time. The default tool adopted by the community to manage projects is [leiningen](http://leiningen.org). For those of you who don't know what **lein** is, it's a tool to automate your Clojure project: it will boostrap a new project, install the dependencies, and there's a plugin mechanism to extend the default possibilities of the tool. +I've started to do some Clojure in my spare time. The default tool adopted by the community to manage projects is [leiningen](http://leiningen.org). For those of you who don't know what `lein` is, it's a tool to automate your Clojure project: it will boostrap a new project, install the dependencies, and there's a plugin mechanism to extend the default possibilities of the tool. -One of the nice feature of the tool is the **checkouts/** directory. From the [FAQ](https://github.com/technomancy/leiningen/blob/preview/doc/FAQ.md): +One of the nice feature of the tool is the **checkouts** directory. From the [FAQ](https://github.com/technomancy/leiningen/blob/preview/doc/FAQ.md): > If you create a directory named checkouts in your project root and symlink some other project roots into it, Leiningen will allow you to hack on them in parallel. For Python projects at [$work](http://www.saymedia.com/careers) I use [virtualenvwrapper](http://virtualenvwrapper.readthedocs.org/en/latest/) to easily work on them without having to deal with conflicting dependencies. When I need to change a library that is used by one of the project, usually I go to the virtualenv directory and create a symlink so it uses the one I'm editing. -What I really want is a mechanism similar to **lein**, where I can have a **checkouts/** directory inside the main project, where I can clone a library or create a symlink. Since **virtualenvwrapper** provides a hook mechanism, I wrote a small hook inside **~/.virtualenvs/postactivate**: +What I really want is a mechanism similar to `lein`, where I can have a **checkouts/** directory inside the main project, where I can clone a library or create a symlink. Since `virtualenvwrapper` provides a hook mechanism, I wrote a small hook inside **~/.virtualenvs/postactivate**: -```sh +{% highlight sh %} #!/bin/bash # move to the directory of the project @@ -28,6 +28,6 @@ if [ -d checkouts ]; then export PYTHONPATH=proj_path/checkouts/$ext:$PYTHONPATH done fi -``` +{% endhighlight %} Then, when I type `workon $project_name` in my shell, the environment is activated, I'm moved to the right directory, and the library inside the **checkouts/** directory are added to my **PYTHONPATH**. diff --git a/_posts/2012-11-14-two-tech-talks-in-a-day.md b/_posts/2012-11-14-two-tech-talks-in-a-day.md index d911531..ea0ba6b 100644 --- a/_posts/2012-11-14-two-tech-talks-in-a-day.md +++ b/_posts/2012-11-14-two-tech-talks-in-a-day.md @@ -8,7 +8,7 @@ Today I assisted to two tech. talks. One of them was our "Reading Group" sessio ## Say's tech talk -I'm trying to organize at [work](http://saymedia.com), every two weeks during lunch time, a session where engineers can discuss about an article, tool, or paper they find interesting. Today we were a very small group (only 4 peoples), and we talked about two tools that [Masa](http://sekimura.typepad.com/blog/) wanted to explore: [Kage](https://github.com/cookpad/kage) and ]HTTP Archive](http://www.igvita.com/2012/08/28/web-performance-power-tool-http-archive-har/). +I'm trying to organize at [work](http://saymedia.com), every two weeks during lunch time, a session where engineers can discuss about an article, tool, or paper they find interesting. Today we were a very small group (only 4 peoples), and we talked about two tools that [Masa](http://sekimura.typepad.com/blog/) wanted to explore: [Kage](https://github.com/cookpad/kage) and [HTTP Archive](http://www.igvita.com/2012/08/28/web-performance-power-tool-http-archive-har/). ### Kage diff --git a/_posts/2012-11-27-ansible-and-chef.mdown b/_posts/2012-11-27-ansible-and-chef.md index 2f65ea8..2f65ea8 100644 --- a/_posts/2012-11-27-ansible-and-chef.mdown +++ b/_posts/2012-11-27-ansible-and-chef.md diff --git a/_posts/2012-11-28-perl-redis-and-anyevent-at-craiglist.mdown b/_posts/2012-11-28-perl-redis-and-anyevent-at-craiglist.md index f4e2ac0..f4e2ac0 100644 --- a/_posts/2012-11-28-perl-redis-and-anyevent-at-craiglist.mdown +++ b/_posts/2012-11-28-perl-redis-and-anyevent-at-craiglist.md diff --git a/_posts/2013-01-28-let-s-talk-about-graphite.md b/_posts/2013-01-28-let-s-talk-about-graphite.md index 7d84a83..2722123 100644 --- a/_posts/2013-01-28-let-s-talk-about-graphite.md +++ b/_posts/2013-01-28-let-s-talk-about-graphite.md @@ -1,6 +1,6 @@ --- layout: post -category: devops +summary: In which I share my experience with Graphite title: Let's talk about Graphite --- diff --git a/_posts/2013-07-28-patch.pm-report.md b/_posts/2013-07-28-patch.pm-report.md index dcbcfde..f29503a 100644 --- a/_posts/2013-07-28-patch.pm-report.md +++ b/_posts/2013-07-28-patch.pm-report.md @@ -1,16 +1,14 @@ --- layout: post -category: perl title: Patch.pm - report +summary: In which I summarize my activity during the first patch.pm --- This week-end I participated to [patch.pm](http://patch.pm/p0), the first version of a new hackathon organized by the Mongueurs. Sadly (or maybe not) I was not physically with my fellow mongueurs to work on code, and did it remotely. My goal for this two days was to get some work done on [Dancer2](https://github.com/PerlDancer/Dancer2): code review, fixes, submit bugs, and get some work done on the core. -## Saturday - -The day started by a discussion with [Alberto](https://github.com/ambs) and [Sawyer](https://github.com/xsawyerx) about what should be our priorities. None of us had really worked on Dancer2 so far, so we had a lot to catch up. We started with a list of things we think should be easy to work on, so we could focus on them in order to be able to deliver something by the end of the weekend. We quickly agreed on it, and then we started to code. +Saturday started by a discussion with [Alberto](https://github.com/ambs) and [Sawyer](https://github.com/xsawyerx) about what should be our priorities. None of us had really worked on Dancer2 so far, so we had a lot to catch up. We started with a list of things we think should be easy to work on, so we could focus on them in order to be able to deliver something by the end of the weekend. We quickly agreed on it, and then we started to code. Sawyer started his chainsaw and pushed a dozen of commits to clean up some code. He also write a few emails to the team with more short and medium terms objectives. @@ -18,11 +16,7 @@ On my side, I wanted to fix the code related to the hooks. Not all the hooks ava During that time, Alberto merged a few pull requests and fixed the travis build! -## Sunday - -I only managed to get two hours of work done that day. The biggest change was a refactoring of the Response and Request objects, to give them a similar API. Then I took a look at some of the changes made by Sawyer and did some review. - -## Laurent, when is the next one ?! +I only managed to get two hours of work done on Sunday. The biggest change was a refactoring of the Response and Request objects, to give them a similar API. Then I took a look at some of the changes made by Sawyer and did some review. It was fun. Working again with Alberto and Sawyer was really great, and I realized how much I missed working with this guys. I'm excited for the next comming weeks, more work will be done (there's still some issues that we need to fix, and some documentation clean up) and some releases will be pushed out. @@ -1,21 +1,33 @@ --- layout: default -title: I'm a lumberjaph --- -<h2>writing</h2> +{% assign post = site.posts.first %} -<ul> -{% for post in site.posts %} -<li> - <a href="{{ post.url }}">{{ post.title }}</a> - <span class="date">{{ post.date | date_to_string }}</span> -</li> -{% endfor %} -</ul> +<h1><a href="{{ post.url }}">{{ post.title }}</a></h1> -<hr/> +{% if post.previous.url %} + <p id="bigleft"><a href="{{post.previous.url}}">«</a></p><br /> +{% endif %} +{% if post.next.url %} + <p id="bigright"><a href="{{post.next.url}}">»</a></p> +{% endif %} -<footer> - <span>I sleep all night and I work all day.</span><br/> -</footer> +<div id="entry"> + {{ post.content }} + + <p class="timestamp"> + {% if post.previous.url %} + <a href="{{post.previous.url}}">« {{post.previous.title}}</a> | + {% endif %} + + {{ post.date | date_to_string }} + + {% if post.next.url %} + | <a href="{{post.next.url}}">{{post.next.title}} »</a> + {% endif %} + </p> + +</div> + +{% include footer.html %} @@ -0,0 +1,18 @@ +--- +layout: static +title: List of articles +--- + +<div id='wrapper'> + <h3>Posts</h3> + + <ul> + {% for post in site.posts %} + <li> + <a href="{{ post.url }}">{{ post.title }}</a> + <span class="date">{{ post.date | date_to_string }}</span> + </li> + {% endfor %} + </ul> + +</div> diff --git a/projects.md b/projects.md index 1a5815b..50f6325 100644 --- a/projects.md +++ b/projects.md @@ -3,7 +3,9 @@ layout: static title: Projects --- -<h2>Previous and Upcoming Talks</h2> +<div id='wrapper'> + +<h4>Previous and Upcoming Talks</h4> <ul> <li><article itemscope itemtype="http://data-vocabulary.org/Event" class="talk"> @@ -78,3 +80,6 @@ title: Projects <em>Catalyst Advent Calendar</em> — <a href="http://www.catalystframework.org/calendar/2009/19">Writing REST services with Catalyst</a> </li> </ul> + +</div> + diff --git a/resume.md b/resume.md new file mode 100644 index 0000000..39c82db --- /dev/null +++ b/resume.md @@ -0,0 +1,72 @@ +--- +layout: static +title: Résumé +--- + +<div id='resume'> + + <div style='text-align: center;'> + <a name='top'></a><h2>Franck Cuny</h2> + <address>franckcuny<span style='display: none;'>¡no spam thank you!</span>@gmail.com</address> + </div> + + <div> + <a name='skills'></a><h3>Skills</h3> + <dl> + <dt>Languages</dt> + <dd>Proficient in Perl. Experience with Python, Shell. Interested in Clojure, Scheme (racket). </dd> + + <dt>Tools</dt> + <dd>Used, deployed or managed Emacs, Git, Jenkins, Carbon, Graphite.</dd> + </dl> + </div> + + <div id='contributions'> + <a name='contributions'></a><h3>Free Software Contributions</h3> + I've contributed to <a hrefp="http://perldancer.org">Dancer</a>, a web framework in Perl. I also maintain and contribute to several CPAN modules. + + I've created SPORE. + + <p style='clear: both'><a href='/projects'>Details</a></p> + </div> + + <div> + <a name='work'></a><h3>Work Experience</h3> + <p><a href='http://saymedia.com'>Say Media</a>, San Francisco, CA [August 2011 - present]</p> + From August 2011 to December 2012 I was part of the platform team. Tasks included designing the internal APIs. + + Starting December 2012 I joined a newly created team to improve developers efficiency by writing tools and providing a better infrastructure. Projects included setting up better monitoring with Graphite. + + + <p><a href='https://developers.google.com/open-source/soc/'>Summer of Code Mentor</a>, Paris, France [May 2011 - August 2011]</p> + As part of the Summer of Code 2011, I mentored one student to work on the Perl Dancer web framework. My job was to set priorities, help with development practices and review the code. I was also involved in the evaluation process. + + <p><a href='http://linkfluence.net'>Linkfluence (RTGI)</a>, Paris, France [August 2007 - July 2011]</p> + Linkfluence is a social media research company. Projects I've been involved on: + <ul> + <li>Set up procedures for development practices and software deployment (git, unit testing, how and what to document, etc)</li> + <li>Lead developer of the backend used to aggregate data from various sources (feeds, web pages, external API, etc)</li> + <li>Defined the architecture and designed the ReST API for most of the internal backend</li> + <li>Wrote many libraries for accessing various webservices, data storage (Riak, CouchDB). Most of them have been open sourced</li> + </ul> + + <p>Axe Media Direct, Brest, France [January 2005 - August 2007]</p> + <ul> + <li>Development of an online customer loyalty program (consultation, listing, statistics, etc.) (Catalyst Framework)</li> + <li>PDF documents generation with XSLT</li> + <li>Web server configuration and maintenance for database consulting</li> + </ul> + + <p><a href='https://www.cmb.fr'>Crédit Mutuel de Bretagne</a>, Brest, France [April 2004- January 2005]<p + + </div> + + <div> + <a name='other'></a> <h3>Other</h3> + <p>Self-taught (no college degree).</p> + + <p> Native French speaker, fluent in English.</p> + + </div> + +</div> diff --git a/static/css/emacs.css b/static/css/emacs.css new file mode 100644 index 0000000..6c173e6 --- /dev/null +++ b/static/css/emacs.css @@ -0,0 +1,62 @@ +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #008800; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #AA22FF; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .cm { color: #008800; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #008800 } /* Comment.Preproc */ +.highlight .c1 { color: #008800; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #008800; font-weight: bold } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #808080 } /* Generic.Output */ +.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0040D0 } /* Generic.Traceback */ +.highlight .kc { color: #AA22FF; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #AA22FF; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #AA22FF; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #AA22FF } /* Keyword.Pseudo */ +.highlight .kr { color: #AA22FF; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #00BB00; font-weight: bold } /* Keyword.Type */ +.highlight .m { color: #666666 } /* Literal.Number */ +.highlight .s { color: #BB4444 } /* Literal.String */ +.highlight .na { color: #BB4444 } /* Name.Attribute */ +.highlight .nb { color: #AA22FF } /* Name.Builtin */ +.highlight .nc { color: #0000FF } /* Name.Class */ +.highlight .no { color: #880000 } /* Name.Constant */ +.highlight .nd { color: #AA22FF } /* Name.Decorator */ +.highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #00A000 } /* Name.Function */ +.highlight .nl { color: #A0A000 } /* Name.Label */ +.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #B8860B } /* Name.Variable */ +.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mf { color: #666666 } /* Literal.Number.Float */ +.highlight .mh { color: #666666 } /* Literal.Number.Hex */ +.highlight .mi { color: #666666 } /* Literal.Number.Integer */ +.highlight .mo { color: #666666 } /* Literal.Number.Oct */ +.highlight .sb { color: #BB4444 } /* Literal.String.Backtick */ +.highlight .sc { color: #BB4444 } /* Literal.String.Char */ +.highlight .sd { color: #BB4444; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #BB4444 } /* Literal.String.Double */ +.highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #BB4444 } /* Literal.String.Heredoc */ +.highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */ +.highlight .sx { color: #008000 } /* Literal.String.Other */ +.highlight .sr { color: #BB6688 } /* Literal.String.Regex */ +.highlight .s1 { color: #BB4444 } /* Literal.String.Single */ +.highlight .ss { color: #B8860B } /* Literal.String.Symbol */ +.highlight .bp { color: #AA22FF } /* Name.Builtin.Pseudo */ +.highlight .vc { color: #B8860B } /* Name.Variable.Class */ +.highlight .vg { color: #B8860B } /* Name.Variable.Global */ +.highlight .vi { color: #B8860B } /* Name.Variable.Instance */ +.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */ diff --git a/static/css/style.css b/static/css/style.css new file mode 100644 index 0000000..bed6558 --- /dev/null +++ b/static/css/style.css @@ -0,0 +1,144 @@ +* { + font-family: Helvetica, sans-serif; +} + +body { + padding: 1.5em 0; +} + +div#container { + width: 900px; + margin: 0 auto; +} + +h1 { + margin: 1.5em 0 0 0; +} + +h1 a { + text-decoration: none; +} + +blockquote { + background-color: #eee; + margin: 1em; + padding: 0.3em 1em; +} + + +#bigleft, #bigright { + position: fixed; + font-size: 32pt; +} + +#bigleft a, #bigright a { + color: #ddd; +} + +#bigleft a:hover, #bigright a:hover { + color: #005aa0; + text-decoration: none; +} + +#bigleft { + top: 4em; +} + +#bigright { + top: 5em; +} + +#entry, #wrapper { + font-size: 13pt; +} + +#entry img { + margin: 1em; + margin-left: 0; +} + +img.right { + float: right; + margin-left: 1em; +} + +#entry, #entries, #wrapper, #resume { + width: 40em; + margin-left: 5em; + text-align: justify; + line-height: 1.6em; +} + +#entries li { + margin: 0.5em; +} + +.footnotes { + font-size: 88%; + border-top: 1px solid grey; +} + +ul.spaced li { + margin-bottom: 1.5em; +} + +footer { + margin: 2em 0 7em 0; + border-top: 1px grey solid; +} + +footer p { + text-align: center; +} + +img.portrait { + margin-top: -3em; + margin-left: 2em; +} + +a { + text-decoration: none; + color: #005aa0; +} + +a:hover { + text-decoration: underline; + color: #11472b; +} + +a img { border: 0; } + +a.older, a.newer { + size: 80%; +} + +pre, pre span, tt, kbd { + font-family: Inconsolata, Consolas, monospace; +} + +kbd { + background: #eee; + padding: 0.2em; +} + +.code, .highlight { + border: 1px grey solid; + padding-left: 0.5em; + font-family: Inconsolata, Consolas, monospace; + font-size: 14px; + border-radius: 4px 4px 4px 4px; +} + +code { + font-family: Inconsolata, Consolas, monospace; +} + +.timestamp { + text-align: right; + color: grey; +} + +#resume h3 { margin-left: -1em; } +#resume dt { font-style: italic; } + + diff --git a/static/css/syntax.css b/static/css/syntax.css index 2774b76..7e10a17 100644 --- a/static/css/syntax.css +++ b/static/css/syntax.css @@ -1,60 +1,26 @@ -.highlight { background: #ffffff; } -.highlight .c { color: #999988; font-style: italic } /* Comment */ -.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ -.highlight .k { font-weight: bold } /* Keyword */ -.highlight .o { font-weight: bold } /* Operator */ -.highlight .cm { color: #999988; font-style: italic } /* Comment.Multiline */ -.highlight .cp { color: #999999; font-weight: bold } /* Comment.Preproc */ -.highlight .c1 { color: #999988; font-style: italic } /* Comment.Single */ -.highlight .cs { color: #999999; font-weight: bold; font-style: italic } /* Comment.Special */ -.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ -.highlight .gd .x { color: #000000; background-color: #ffaaaa } /* Generic.Deleted.Specific */ -.highlight .ge { font-style: italic } /* Generic.Emph */ -.highlight .gr { color: #aa0000 } /* Generic.Error */ -.highlight .gh { color: #999999 } /* Generic.Heading */ -.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ -.highlight .gi .x { color: #000000; background-color: #aaffaa } /* Generic.Inserted.Specific */ -.highlight .go { color: #888888 } /* Generic.Output */ -.highlight .gp { color: #555555 } /* Generic.Prompt */ -.highlight .gs { font-weight: bold } /* Generic.Strong */ -.highlight .gu { color: #aaaaaa } /* Generic.Subheading */ -.highlight .gt { color: #aa0000 } /* Generic.Traceback */ -.highlight .kc { font-weight: bold } /* Keyword.Constant */ -.highlight .kd { font-weight: bold } /* Keyword.Declaration */ -.highlight .kp { font-weight: bold } /* Keyword.Pseudo */ -.highlight .kr { font-weight: bold } /* Keyword.Reserved */ -.highlight .kt { color: #445588; font-weight: bold } /* Keyword.Type */ -.highlight .m { color: #009999 } /* Literal.Number */ -.highlight .s { color: #d14 } /* Literal.String */ -.highlight .na { color: #008080 } /* Name.Attribute */ -.highlight .nb { color: #0086B3 } /* Name.Builtin */ -.highlight .nc { color: #445588; font-weight: bold } /* Name.Class */ -.highlight .no { color: #008080 } /* Name.Constant */ -.highlight .ni { color: #800080 } /* Name.Entity */ -.highlight .ne { color: #990000; font-weight: bold } /* Name.Exception */ -.highlight .nf { color: #990000; font-weight: bold } /* Name.Function */ -.highlight .nn { color: #555555 } /* Name.Namespace */ -.highlight .nt { color: #000080 } /* Name.Tag */ -.highlight .nv { color: #008080 } /* Name.Variable */ -.highlight .ow { font-weight: bold } /* Operator.Word */ -.highlight .w { color: #bbbbbb } /* Text.Whitespace */ -.highlight .mf { color: #009999 } /* Literal.Number.Float */ -.highlight .mh { color: #009999 } /* Literal.Number.Hex */ -.highlight .mi { color: #009999 } /* Literal.Number.Integer */ -.highlight .mo { color: #009999 } /* Literal.Number.Oct */ -.highlight .sb { color: #d14 } /* Literal.String.Backtick */ -.highlight .sc { color: #d14 } /* Literal.String.Char */ -.highlight .sd { color: #d14 } /* Literal.String.Doc */ -.highlight .s2 { color: #d14 } /* Literal.String.Double */ -.highlight .se { color: #d14 } /* Literal.String.Escape */ -.highlight .sh { color: #d14 } /* Literal.String.Heredoc */ -.highlight .si { color: #d14 } /* Literal.String.Interpol */ -.highlight .sx { color: #d14 } /* Literal.String.Other */ -.highlight .sr { color: #009926 } /* Literal.String.Regex */ -.highlight .s1 { color: #d14 } /* Literal.String.Single */ -.highlight .ss { color: #990073 } /* Literal.String.Symbol */ -.highlight .bp { color: #999999 } /* Name.Builtin.Pseudo */ -.highlight .vc { color: #008080 } /* Name.Variable.Class */ -.highlight .vg { color: #008080 } /* Name.Variable.Global */ -.highlight .vi { color: #008080 } /* Name.Variable.Instance */ -.highlight .il { color: #009999 } /* Literal.Number.Integer.Long */ +/* zenburn syntax highlighting - from http://userstyles.org/styles/88895/github-zenburn */ +.highlight,.highlight pre,.highlight table { background:#383838 !important;color:#656555 !important; } +.highlight .gd .x { background:#dca3a3 !important;color:#dcdccc !important; } +.highlight .hll { background-color:#656555 !important; } +.highlight .err { color:#f0dfaf !important;background-color:#6f6f6f !important; } +.highlight .cs { color:#dca3a3 !important;background-color:#8c5353 !important; } +.highlight .gi { color:#7f9f7f !important; } +.highlight .gi .x { background:#5f5f5f !important;color:#dcdccc !important; } +.highlight .nf { color:#93e0e3 !important; } +.highlight .c,.highlight .cm,.highlight .c1 { color:#5c888b !important; } +.highlight .g,.highlight .l,.highlight .x,.highlight .ge,.highlight .gs,.highlight .ld,.highlight .ni, +.highlight .nl,.highlight .nx,.highlight .py,.highlight .n,.highlight .go { color:#6ca0a3 !important; } +.highlight .k { color:#f0dfaf !important; } +.highlight .s1 { color:#bc8383 !important; } +.highlight .nb,.highlight .bp { color:#8cd0d3 !important; } +.highlight .nc,.highlight .nn { color:#7cb8bb !important; } +.highlight .gd,.highlight .gd pre { background:#8c5353 !important;color:#dcdccc !important;border-color:#ac7373 !important; } +.highlight .o,.highlight .p,.highlight .na,.highlight .ne { color:#dcdccc !important; } +.highlight .gp,.highlight .w,.highlight .gh,.highlight .gu { color:#656555 !important; } +.highlight .cp,.highlight .s,.highlight .sb,.highlight .sc,.highlight .sd,.highlight .s2,.highlight .se, +.highlight .sh,.highlight .si,.highlight .ss,.highlight .kc,.highlight .kd,.highlight .kn,.highlight .kp, +.highlight .kr,.highlight .kt,.highlight .nt,.highlight .ow { color:#8fb28f !important; } +.highlight .gd,.highlight .gr,.highlight .gt,.highlight .m,.highlight .mf,.highlight .mh,.highlight .mi, +.highlight .mo,.highlight .sr,.highlight .il { color:#9c6363 !important; } +.highlight .no,.highlight .nv,.highlight .vc,.highlight .vg,.highlight .vi,.highlight .nd, +.highlight .sx { color:#dfaf8f !important; }
\ No newline at end of file |
