Background: #fff
Foreground: #000
PrimaryPale: #8cf
PrimaryLight: #18f
PrimaryMid: #04b
PrimaryDark: #014
SecondaryPale: #ffc
SecondaryLight: #fe8
SecondaryMid: #db4
SecondaryDark: #841
TertiaryPale: #eee
TertiaryLight: #ccc
TertiaryMid: #999
TertiaryDark: #666
Error: #f88
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='editor' macro='edit title'></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser excludeLists'></span></div>
<!--}}}-->
To get started with this blank [[TiddlyWiki]], you'll need to modify the following tiddlers:
* [[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
* [[MainMenu]]: The menu (usually on the left)
* [[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
You'll also need to enter your username for signing your edits: <<option txtUserName>>
<<importTiddlers>>
<!--{{{-->
<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml' />
<!--}}}-->
These [[InterfaceOptions]] for customising [[TiddlyWiki]] are saved in your browser

Your username for signing your edits. Write it as a [[WikiWord]] (eg [[JoeBloggs]])

<<option txtUserName>>
<<option chkSaveBackups>> [[SaveBackups]]
<<option chkAutoSave>> [[AutoSave]]
<<option chkRegExpSearch>> [[RegExpSearch]]
<<option chkCaseSensitiveSearch>> [[CaseSensitiveSearch]]
<<option chkAnimate>> [[EnableAnimations]]

----
Also see [[AdvancedOptions]]
<!--{{{-->
<div class='header' role='banner' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
<div class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
</div>
<div id='mainMenu' role='navigation' refresh='content' tiddler='MainMenu'></div>
<div id='sidebar'>
<div id='sidebarOptions' role='navigation' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' role='complementary' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea' role='main'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected {color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}
.readOnly {background:[[ColorPalette::TertiaryPale]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:alpha(opacity=60);}
/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0 1em 1em; left:0; top:0;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 0.3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0 0; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0;}
.wizardFooter .status {padding:0 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0 0 0.5em;}
.tab {margin:0 0 0 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0 0.25em; padding:0 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0 3px 0 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0; font-size:.9em;}
.editorFooter .button {padding-top:0; padding-bottom:0;}

.fieldsetFix {border:0; padding:0; margin:1px 0px;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0; right:0;}
#backstageButton a {padding:0.1em 0.4em; margin:0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin-left:3em; padding:1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
StyleSheet for use when a translation requires any css style changes.
This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
***/
/*{{{*/
body {font-size:0.8em;}
#sidebarOptions {font-size:1.05em;}
#sidebarOptions a {font-style:normal;}
#sidebarOptions .sliderPanel {font-size:0.95em;}
.subtitle {font-size:0.8em;}
.viewer table.listView {font-size:0.95em;}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none !important;}
#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
<!--{{{-->
<div class='toolbar' role='navigation' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
<div class='tagging' macro='tagging'></div>
<div class='tagged' macro='tags'></div>
<div class='viewer' macro='view text wikified'></div>
<div class='tagClear'></div>
<!--}}}-->
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="700" height="460" poster="" data-setup="{}">
    <source src="video/putty.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
<html>
<table border=0>
<tr border=0><TD align=center border=0>
<iframe src="https://www.google.com/calendar/embed?src=cl5tpjhd9f3bj9c52idttghtsg%40group.calendar.google.com&ctz=America/New_York" style="border: 0" width="1000" height="600" frameborder="0" scrolling="no"></iframe>
<BR><B>This calendar is authoritative should there be due date conflicts with other pages on the site</B>
</TD></TR>
</table>
</html>

A traditional class meeting on campus naturally allows for regular communication.  This is beneficial by helping students better understand the material along with allowing the instructor to more easily gauge how everyone is doing in the class.

I would like to ensure hosting this course online does not deprive us of regular communication.  Class participation will be worth 10% of your overall grade.  Each week's class participation will be worth ''50 points'' total. Multiple quality posts each week will be necessary to receive full credit.

Posts for each week must be made by Sunday, 11:55 p.m. (EST) the following week in order to receive full credit.  This allows one week to post questions about outstanding assignments and one week to post questions about labs after they have been returned.  Please post lab questions on the discussion board for the week they were assigned.

Joining group Zoom meetings will also earn class participation credit.  


!! Participation:

You may work collaboratively on assignments and provide assistance to one another in the Blackboard discussion boards. You can also provide ideas or helpful resources that assisted you on your assignments.  Credit may also be received for joining or participating in either regularly scheduled or ad-hoc group Zoom meetings.

!! Rubric for weekly class participation:

* 25 points - Actively participate in a group Zoom meeting
* 10-20 points - High quality posts which contain well-developed questions, actionable suggestions, or recommendations
* 15 points - Attend a group Zoom meeting
* 5-10 points - General comments regarding the assignments.  No specific insights directly related to the problem or responses to questions which are not actionable.


!! Quality of Remarks:

You will be evaluated based on the quality of your participation by making recommendations, answering questions and asking questions related to the problems, and making pertinent comments.

The discussion forum and Zoom meetings are a valuable component of learning since they allow you to see a variety of solutions and ideas just like you would in a classroom.

Generally, please do not post direct solutions to lab questions, especially unsolicited, before their due date.  Doing so will not be awarded participation points.  If someone is genuinely stuck on a problem and you'd like to help, guidance towards the solution is always a more beneficial place to start rather then just posting the answer.  If you just post the answer, I cannot tell if someone understands the problem or simply copied your solution.

Please be sure to check out the [[Using Blackboard]] and [[Using Discord]] pages to see more useful information.


!! Adding New Threads

Create new threads in the weekly discussion board forum in which the material was assigned.  When naming your threads, use something descriptive in the name and not just the lab and question number.  The highlighted thread is a model to follow and will make things easier to find as the number of posts grows.  Be sure to scan for an existing thread relating to your topic before creating a new one.  Usability is an important consideration in what you do.  ''Not using descriptive thread titles is detrimental to usability, so that post will not receive full credit.''

[img[img/discussionBoards.png]]



Our class utilizes a [[DigitalOcean|https://www.digitalocean.com/products/droplets/]] droplet for the first half and a [[Hetzner bare-metal auction|https://www.hetzner.com/sb?ram_from=8&ram_to=10&ssd=true&cpu_from=8000&cpu_to=30000&price_from=50&price_to=90&search=Xeon+E5]] server to support the student lab environment during the second half of the semester.

!! ~DigitalOcean droplet

Our needs are very minimal for the first half of the semester; we only require a Linux shell server everyone can access to practice the commands and submit their work.  ~CentOS is used as our Linux distribution.  

A $5 per month ~DigitalOcean droplet (virtual machine) is more than enough.  ~DigitalOcean droplets are great for small projects like this.  We'll use it for two months then take a snapshot and destroy the droplet to save money.  It'll then be brought back from the snapshot when it's needed again for the next semester.


!! Hetzner Bare-metal

About halfway through the semester we'll switch from being Unix users to administrators.  Each student will be given a small collection of virtual machines to install and configure.  Additional resources are required for this portion of the class since each student will require about 6 ~VMs. Instead of just a single VM to cover the entire class, we'll now need a full server.  Hetzner auction servers have been a reliable, low-cost option for such short-term needs.  

To provide enough resources for the entire class, I'll be looking for a server with the following minimum specs:
* 64gb RAM
* CPU with at least 6 cores at 3.4GHz.  I'm currently using the ~E5-1650V2.
* 2x 256gb SSD (SSD drives are important for disk speeds.  SATA are too slow)

A server with these specs comes to about 60,00 € per month.  We'll need it for 2 months.  The total infrastructure cost for this class per-semester is then about $150.

The server is initially provisioned by Hetzner with Debian Linux and [[Proxmox|https://www.proxmox.com/en/]] is then installed to act as our hypervisor.  Proxmox runs on Debian and can either be installed from [[its own CD image|https://www.proxmox.com/en/downloads]] or the [[packages can be installed on an existing Debian system|https://pve.proxmox.com/wiki/Install_Proxmox_VE_on_Debian_Buster]].  We'll use the latter option here since Hetzner must install the original OS for us and they do not have an option for Proxmox.

After the Hetzner server and Proxmox are installed, ~VMs will be created for our class shell server and for internal monitoring.  The class shell server will then be migrated from ~DigitalOcean.  Student ~VMs will be created from templates.

At the end of the semester everyone will be given the option to download their ~VMs for use locally with ~VirtualBox.  Student ~VMs will then be deleted, administrative ~VMs will be backed up to ~BackBlaze B2 storage for next time, then the server contract will be ended.

Other tools/services used:
* [[Fossil source code manager|https://fossil-scm.org/home/doc/trunk/www/index.wiki]] - Used to handle revision control for server configuration files and scripts
* [[SaltStack Infrastructure management|https://docs.saltproject.io/en/latest/]] - Used to orchestrate VM templates and manage infrastructure monitoring
* [[Naemon monitoring suite|https://www.naemon.org/]] - Used to monitor student ~VMs and provide feedback on completed/outstanding tasks
* [[Docker containerization|https://www.docker.com/]] - Used to rapidly deploy and isolate different services on the same VM in a way that can be easily repeated.
* [[BackBlaze B2 cloud storage|https://www.backblaze.com/b2/cloud-storage.html]] - Used to store management ~VMs and VM templates between semesters.  Storage here costs $0.005 per Gb.

The combination of these tools allow for the Hetzner server to be quickly brought online when needed for the new semester, it's ~VMs and templates downloaded from ~BackBlaze B2 storage, and made ready to support our class.


Useful concepts:
* [[Infrastructure as code|https://en.wikipedia.org/wiki/Infrastructure_as_code]] - Rapidly provision servers, ~VMs, and Docker containers for individual services using ~APIs & orchestration tools with pre-made definition files instead of manually.  Using this concept, our class lab server is brought up from bare metal to fully online and ready to support users in about 30 minutes with just a handful of commands.

/***
!! CollapseTiddlersPlugin
^^Author: Bradley Meck^^
^^Source: http://gensoft.revhost.net/Collapse.html^^

|ELS 2/24/2006: added fallback to "CollapsedTemplate if "WebCollapsedTemplate" is not found |
|ELS 2/6/2006: added check for 'readOnly' flag to use alternative "WebCollapsedTemplate" |

***/

config.commands.collapseTiddler = {
text: "fold",
tooltip: "Collapse this tiddler",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
if(e.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if(e.getAttribute("template") != t ){
e.setAttribute("oldTemplate",e.getAttribute("template"));
story.displayTiddler(null,title,t);
}
}
}
}

config.commands.expandTiddler = {
text: "unfold",
tooltip: "Expand this tiddler",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
story.displayTiddler(null,title,e.getAttribute("oldTemplate"));
}
}

config.macros.collapseAll = {
handler: function(place,macroName,params,wikifier,paramString,tiddler){
createTiddlyButton(place,"collapse all","",function(){
story.forEachTiddler(function(title,tiddler){
if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE])
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
story.displayTiddler(null,title,t);
})})
}
}

config.macros.expandAll = {
handler: function(place,macroName,params,wikifier,paramString,tiddler){
createTiddlyButton(place,"expand all","",function(){
story.forEachTiddler(function(title,tiddler){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if(tiddler.getAttribute("template") == t) story.displayTiddler(null,title,tiddler.getAttribute("oldTemplate"));
})})
}
}

config.commands.collapseOthers = {
text: "focus",
tooltip: "Expand this tiddler and collapse all others",
handler: function(event,src,title)
{
var e = story.findContainingTiddler(src);
story.forEachTiddler(function(title,tiddler){
if(tiddler.getAttribute("template") != config.tiddlerTemplates[DEFAULT_EDIT_TEMPLATE]){
var t = (readOnly&&store.tiddlerExists("WebCollapsedTemplate"))?"WebCollapsedTemplate":"CollapsedTemplate";
if (!store.tiddlerExists(t)) { alert("Can't find 'CollapsedTemplate'"); return; }
if (e==tiddler) t=e.getAttribute("oldTemplate");
//////////
// ELS 2006.02.22 - removed this line. if t==null, then the *current* view template, not the default "ViewTemplate", will be used.
// if (!t||!t.length) t=!readOnly?"ViewTemplate":"WebViewTemplate";
//////////
story.displayTiddler(null,title,t);
}
})
}
}
<div><div class='toolbar' macro='toolbar -closeTiddler closeOthers +editTiddler  permalink references jump newHere expandTiddler collapseOthers'></div>
<div class='title' macro='view title'></div></div>
[[Home]]
<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
<div class='title' macro='view title'></div>
<div class="editor">Title</div><div class='editor' macro='edit title'></div>
<div class="editor">Tags</div><div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser'></span></div>
<div macro='annotations'></div>
<div class='editor' macro='edit text'></div>
<!--}}}-->
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="984" height="768" poster="" data-setup="{}">
    <source src="video/FoxyProxy.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
!! Problem Reports:

If you have a problem, please send me a report I can work with. I need details of the problem, what you tried, steps you took to diagnose it, documentation you reviewed, screenshots, logs, etc. If you send me something vague like "//X command doesn't work//" with no supporting details, there may not be much I can do for you and I will wait for you to follow up your message with meaningful information. 

The level of assistance I provide will be proportionate to your effort to troubleshoot and supply details. If you do nothing to troubleshoot and send me little information to work with, you should then expect that much effort put into a response.


!! Time management & workload expectations:

SUNY Poly, as well as most others, [[requires 42.5 hours of work per credit hour|https://www.suny.edu/sunypp/documents.cfm?doc_id=168]].  A four-credit course will thus require 170 hours over the course of our 16 week term, or 10.5 hours per week. Going to college full time is effectively a full time job.  I will be expecting that time commitment each week.

Waiting until the last minute to complete, or even worse, begin, the lab assignments will not be a recipe for success.  Review the tasks early so you have plenty of time to research the problems, seek help in the discussion boards, and get up to speed if you are behind on any prerequisite material. 


!! Grading:

All course deliverables will be collected as PDF documents.  Graded copies of these PDF documents will be returned to you containing my annotations.  If you have questions regarding your grade or my comments, please contact me via email.

My grading is more traditional.  Meeting the bare minimum does not yield an A.  A high grade will require intellectual curiosity, problem-solving abilities, and thorough responses.


Letter grades will be assigned as follows:

| !Percent | !Grade |
| 95% ≥ | A |
| 90% ≥ | A- |
| 87% ≥ | B+ |
| 84% ≥ | B |
| 79% ≥ | B- |
| 77% ≥ | C+ |
| 74% ≥ | C |
| 69% ≥ | C- |
| 67% ≥ | D+ |
| 63% ≥ | D |
| ≤ 62% | F |


!NCS 205 Course Notes

Aug 29 [[Getting Started|Week 0]] - Administrative Tasks & course intro

Aug 29 [[Week 1, Part 1]] - Unix Intro
Aug 31 [[Week 1, Part 2]] - The filesystem
Sept 5 [[Week 2, Part 1]] - Exploring the system 
Sept 7 [[Week 2, Part 2]] - Manipulating Files & Directories
Sept 12 [[Week 3, Part 1]] - Links & File Globbing
Sept 14 [[Week 3, Part 2]] - Home Directories & Shell documentation
Sept 19 [[Week 4, Part 1]] -  File Permissions
Sept 21 [[Week 4, Part 2]] - Streams & Redirection, Introduction to filters
Sept 26 [[Week 5, Part 1]] - Filters Continued (awk, sed, & tr)
Sept 28 [[Week 5, Part 2]] - Working with grep
Oct 3 [[Week 6, Part 1]] - I/O practice & Quoting
Oct 5 [[Week 6, Part 2]] - Process management & Job control
Oct 10 - ~Mid-Semester Break!
Oct 12 [[Week 7, Part 2]] - Substitution
Oct 17 [[Week 8, Part 1]] - Text Editors & Shell Scripting Intro
Oct 19 [[Week 8, Part 2]] - Shell Scripting
Oct 24 [[Week 9, Part 1]] - Shell Scripting 2  
Oct 26 Week 9, Part 2 - Continue scripting work
Extra Material: [[Regular Expressions|Week B]] - Fits into the semester here if you'd like to review this extra-credit content.
Extra Material: [[The Environment|Week C]] - Fits about here too.  Also extra-credit.

Oct 31 [[Week 10, Part 1]] - Basic networking & SSH
Important background material - [[Working more efficiently with GNU screen & SSH keys]] & [[Tunnels & Proxies with SSH]]
Nov 2 [[Week 10, Part 2]] - System Basics - Starting and Stopping, init & run levels, layout of the operating system, system configuration (/etc/ files)
Nov 7 [[Week 11, Part 1]] - Expanding our systems: Working with rpm & yum, installing software from package and source
Nov 9 [[Week 11, Part 2]] - Web services and proxies
Nov 14 [[Week 12, Part 1]] - Time & Logging
Nov 16 [[Week 12, Part 2]] - DNS
Nov 21 - [[Week 13, Part 1]] - Complete lab 58 & either catch up or push ahead.
Nov 23 - [[Week 13, Part 2]] - Thanksgiving Break!
Nov 28 [[Week 14, Part 1]] - Crypto, Securing communications, & Scheduled tasks
Nov 30 [[Week 14, Part 2]] - Access control and user management
Dec 5 [[Week 15, Part 1]] - Linux Firewalls
Dec 7 [[Week 15, Part 2]] - Storage systems 

Dec 12 - [[Week 16]] - Finals Week


!!!Agendas for pages in italics are tentative


Extra Credit Material:
&nbsp;&nbsp; - We don't have time for this, but it's good stuff to know:
* [[Week B]] - Regular Expressions
* [[Week C]] - The Environment
* [[Week E]] - LVM
* [[Week F]] - Network File System (NFS)
* [[Week G]] - Backups & disaster recovery
/% * [[Virtualization & Containers]] %/
<html>
<font size="-2">Last Updated: 221205 00:10</font>
</html>
/***
To use, add {{{[[Styles HorizontalMainMenu]]}}} to your StyleSheet tiddler, or you can just paste the CSS in directly. See also HorizontalMainMenu and PageTemplate.
***/
/*{{{*/

#topMenu br {display:none; }
#topMenu { background: #39a; }
#topMenu { float: left; }
#topMenu { width: 90%; }
#topMenu { padding: 2px 0 2px 0; }
#topMenu .button,  #topMenu .tiddlyLink { padding-left:1em; padding-right:1em; color:white; font-size:115%;}
#displayArea { margin: 1em 15.7em 0em 1em; }


#rightMenu {
   float: right;
   background: #39a;
   width: 10%;
   padding: 2px 0 2px 0;
}
#rightMenu .button,  #rightMenu .tiddlyLink { padding-left:1em; padding-right:1em; color:white; font-size:115%;}

/* just in case want some QuickOpenTags in your topMenu */
#topMenu .quickopentag { padding:0px; margin:0px; border:0px; }
#topMenu .quickopentag .tiddlyLink { padding-right:1px; }
#topMenu .quickopentag .button { padding-left:1px; border:0px; }


/*}}}*/
!![[Lab 51 - Bring test and www online]]
Assigned [[Week 10, Part 1]]

* Set a root password for your test and www ~VMs so you are able to log into them via SSH.
* Ensure your test and www ~VMs are online and joined to the lab network.  
** The notes above will help you configure networking
*** Use the hostname {{Monospaced{''www.//username//.ncs205.net''}}} and second IP address in your range for your web server.  
** [[Virtual Machines]] - VM information (Linked on the top menu bar)
** Also complete and submit the [[Lab 51|labs/lab51.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
* Connect to your ~VMs via SSH from the class shell server

The Proxmox virtual console is a means to access the ~VMs for initial troubleshooting and in case something goes wrong.  Once your ~VMs are online, all work should be done via SSH login.  

You cannot log into these ~VMs directly since they are on private IP addresses (192.168.x.x) behind the class router.  They can only be accessed by first connecting to the class shell server from home.  Use putty (or another SSH client) to connect to the class shell server and then use the {{Command{ssh}}} command to connect to your ~VMs.

{{Warning{Be sure to keep your ~VMs online and do not power them down, else it'll look like the work hasn't been completed when it comes time for grading.}}}
!![[Lab 52 - VM updates & software installation]]
Assigned [[Week 11, Part 1]]

!!! On both ~VMs:
* Update the OS and currently installed software
* Install the following packages via {{Command{ yum}}}:  man wget nc telnet bind-utils openssh-clients rsync bzip2

* Also complete and submit the [[Lab 52|labs/lab52.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.

These packages will also need to be installed on all future ~VMs.  Make a note of it in your documentation.
!![[Lab 53 - Web Server]]
Assigned [[Week 11, Part 1]]

!!! Lab Tasks
<<<
1. Install apache and PHP on your web server
* Directions for this are above

2. Become familiar with apache and its configuration.  
* Check out the config files within {{File{/etc/httpd/conf/}}}
* The file {{File{/etc/httpd/conf/httpd.conf}}} is the main configuration file
* The Linux Administration Chapter 19 (Apache Web Server) may also be a useful resource.

3. Change the Apache {{Monospaced{''~DocumentRoot''}}} directory to {{File{/opt/work/htdocs}}}
* Create the directory {{File{/opt/work/htdocs}}} on your web server VM
* Make a backup of your Apache configuration file
** Always take a backup of a configuration file before making changes.  See the note below.  This way you'll have a known-good copy to refer to if there's any problems.
* Update the apache configuration lines necessary to make this change 
** (you may need to change this path in more then one location within the Apache config)
* Don't forget to restart Apache after changing its configuration file

4. Download the new {{File{index.html}}} file from my web server at 192.168.12.25 to your new Apache {{Monospaced{''~DocumentRoot''}}} directory
* The file {{File{index.html}}} is the default web page delivered to a client (eg: your web browser).  This file must exist in the correct location with correct permissions so your web server can provide content.

5. Ensure your web server is providing the correct website.  The new site should be 6 lines long and include ''Welcome to ~NCS205!'' in the body.
<<<

{{Warning{''Warning:'' It's always wise to make a backup of a configuration file before making changes.  The easiest way to do so is to copy the file with a timestamp appended to the new file name, for example:  {{Command{cp httpd.conf httpd.conf.210322-1522.bak}}}.  This captures the date & time in a way that's easily sortable.  The {{Command{diff}}} command can compare the config file to a backup, showing lines which differ between the two.  Example:  {{Command{diff httpd.conf httpd.conf.210322-1522.bak}}}
}}}

!!! Lab Deliverable

* Also complete and submit the [[Lab 53|labs/lab53.pdf]] worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
!![[Lab 54 - Set up MediaWiki]]
Assigned [[Week 11, Part 2]]

Complete the steps on this page to install and configure ~MediaWiki

Install [[MediaWiki|http://www.mediawiki.org/wiki/MediaWiki]] and customize it to your tastes.
* Install ~MariaDB
** Add a wiki user and database
* Download the ~MediaWiki source tarball
** Extract its contents to {{File{/opt/work/htdocs/}}}
** Rename the extracted directory to ''wiki''
* Update php and install dependencies
* Set up a tunnel or proxy to access your wiki
** You can access it by IP address until DNS is online:  http://your_IP/wiki/
** Be sure to replace //your_IP// with proper values.
* Configure ~MediaWiki to fully bring it online

* Be sure you can view the wiki after uploading the {{File{~LocalSettings.php}}} file.  It should look something like this:
[img[img/wiki.png]]


* Also complete and submit the [[Lab 54|labs/lab54.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated.
!! [[Lab 55 - Bring core VM online]]
Assigned [[Week 12, Part 1]]

<<<
Bring new core VM online:
* Hostname = {{Monospaced{core.//username//.ncs205.net}}}
* Use the third IP address in your range
* Apply outstanding updates and ensure your VM is running the latest available kernel
** A system reboot may be necessary if the kernel was also updated
* Also complete and submit the [[Lab 55|labs/lab55.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is online and ready to be evaluated for this lab.

Install additional software:
* Standard packages, as previously discussed and recorded in your class notes
* DNS server software.  The package is {{Monospaced{bind-chroot}}}
* Time packages:  {{Monospaced{ntpdate ntp}}}
<<<
!! [[Lab 56 - Time]]
Assigned [[Week 12, Part 1]]

!!! Modify Hosts file:

Add a record similar to the following to the {{File{/etc/hosts}}} file on all of your ~VMs.  Do not remove any lines which may already be in the file.
<<<
192.168.12.26    core core.merantn.ncs205.net ntp.merantn.ncs205.net loghost ntp
<<<
* The IP address should be the address for your core VM
* Replace my username with yours
* These steps won't work if that line is missing or incorrect. 
* Read about the {{File{/etc/hosts}}} file on page 356 in the Linux Administration textbook.  This file is necessary because we don't have DNS running yet.

/%
!!! Remove chronyd

{{Warning{The chronyd NTP service is already installed and running.  This service will prevent the steps laid out in this lab from successfully completing.  Stop and disable the {{Monospaced{chronyd}}} service before proceeding with this lab.}}}
%/

!!! Install NTP services and syncronize time:

Install {{Monospaced{ntp}}} and {{Monospaced{ntpdate}}} on all ~VMs

core VM:  Configure {{Monospaced{ntpd}}} ({{File{/etc/ntp.conf}}}) as a server:
* By default the ntp configuration allows global access to the NTP server.  
** This can easily be abused
** Disable the first restrict directive and lock things down further
*** Comment out this line:  {{Monospaced{restrict default nomodify notrap nopeer noquery}}}
** Insert this line after the restrict you just disabled:  {{Monospaced{restrict default ignore}}}
* Allow your block of 8 IP addresses to communicate with the NTP service running on your core VM
** Add the appropriate restrict directives
** Be sure to include {{Monospaced{nopeer}}} & {{Monospaced{noquery}}} options
** See my config below for examples
* Insert: {{Monospaced{disable monitor}}}
* Synchronize time from the lab ntp server instead of the ~CentOS servers
** lab ntp server: {{Monospaced{ntp.ncs205.net}}}
* Add restrict directives to allow the naemon server full access:
** {{Monospaced{restrict 192.168.12.15 nomodify notrap}}}

test & www VM (and future ~VMs):  Configure {{Monospaced{ntpd}}} ({{File{/etc/ntp.conf}}}) as a client:
* Synchronize time from the ntp service on your core VM instead of the ~CentOS servers
** Use the hostname {{Monospaced{ntp.//username//.ncs205.net}}} instead of IP addresses
** This hostname should resolve due to the entry you just added to the {{File{/etc/hosts}}} file.  Test it with ping.

My Configs for reference (click the yellow box to expand them):
* Be sure to change host names and IP addresses appropriately:
* +++[My NTP Server] 
{{Monospaced{core# }}} {{Command{grep -v ^# /etc/ntp.conf | uniq}}}
{{{
driftfile /var/lib/ntp/drift

restrict default ignore

restrict 127.0.0.1 
restrict ::1

# Allow your range of 8 IPs to access the NTP server.  Replace my starting IP (24) with yours.
restrict 192.168.12.24 mask 255.255.255.248 nomodify notrap nopeer noquery
# Allow nagios server to check status of NTP
restrict 192.168.12.10 nomodify notrap
restrict 192.168.12.15 nomodify notrap

server ntp.ncs205.net iburst
restrict ntp.ncs205.net notrap nopeer noquery

disable monitor

includefile /etc/ntp/crypto/pw

keys /etc/ntp/keys
}}}
===

* +++[My NTP Clients]
{{Monospaced{www# }}} {{Command{grep -v ^# /etc/ntp.conf | uniq}}}
{{{
driftfile /var/lib/ntp/drift

restrict default ignore

restrict 127.0.0.1 
restrict -6 ::1

server ntp.merantn.ncs205.net
restrict ntp.merantn.ncs205.net notrap nopeer noquery

includefile /etc/ntp/crypto/pw

keys /etc/ntp/keys
}}}
===


!!! Start & enable ntpd
All ~VMs (current and future):
* Set ntpd and ntpdate to start on boot on all ~VMs
* Start the ntpd service now on all ~VMs


!!!! Verify it is working and time is being synchronized properly:

My core VM, an NTP server:
{{{
[root@core ~]# ntpstat
synchronised to NTP server (192.168.12.15) at stratum 3
   time correct to within 56 ms
   polling server every 1024 s

[root@core ~]# ntpq -p
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*192.168.12.15    78.46.60.40    3 u   41   64  377    0.477  -18.800  13.181
}}}

My www server, an NTP client:

It took a few minutes after starting the services for the clock to syncronize:
{{{
[root@www ~]# ntpstat
unsynchronised
  time server re-starting
   polling server every 8 s
}}}

Eventually it did and I saw this:
{{{
[root@www ~]# ntpstat
synchronised to NTP server (192.168.12.26) at stratum 4
   time correct to within 232 ms
   polling server every 64 s

[root@www ~]# ntpq
ntpq> peers
     remote           refid      st t when poll reach   delay   offset  jitter
==============================================================================
*core            192.168.12.15     4 u   55   64  377    0.492  -35.066  62.617
}}}


* Also complete and submit the [[Lab 56|labs/lab56.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.


!!! Troubleshooting

Time synchronization with ntpd doesn't happen immediately.  The service needs some time to build trust in its upstream time provider so that it will use it as a time source.  Be sure to allow at least a 30 minute delay after starting the services for this trust to be established before reporting issues.

If you are having difficulty getting time to synchronize, the following commands may help direct you to the root cause:

* {{Command{systemctl status -l ntpd}}}
* {{Command{ntpstat}}}
* {{Command{ntpq -p}}}
* {{Command{echo associations | ntpq}}}
* {{Command{cat /etc/hosts}}}
* {{Command{ps aux | grep ntp}}}

Any requests for help in the discussion boards should include output from the above commands for both your NTP server and any impacted NTP clients along with their ntp configuration file.  A copy/paste of the text into the discussion boards is easier to work with and highlight issues than simple screenshots.  Be sure to include the command & shell prompt with any output included.  ''Do not'' only include output without the shell prompt and command which obtained that output.
!! [[Lab 57 - Logging]]
Assigned [[Week 12, Part 1]]

!!! Modify Hosts file:

Be sure a record similar to the following exists in the file {{File{/etc/hosts}}} file on all of your ~VMs.  This should have been completed in the previous lab.
<<<
{{Monospaced{192.168.12.26    core core.merantn.ncs205.net ntp.merantn.ncs205.net loghost ntp}}}
<<<
* The IP address should be the address for your core VM
* Replace my username with yours
* These steps won't work if that line is missing or incorrect. 

!!! syslog:

!!!! core VM:  
* configure syslog to receive log information from other hosts

On your core VM, find these lines at the top of the file {{File{/etc/rsyslog.conf}}}:
{{{
# Provides UDP syslog reception
#$ModLoad imudp
#$UDPServerRun 514
}}}

Remove the comments from the bottom 2 lines (the first is actually a comment and should remain so).

!!!! www VM:
* configure syslog to also send log information to the core VM

On your www VM, find this line at the bottom of the file {{File{/etc/rsyslog.conf}}}:

{{{
#*.* @@remote-host:514
}}}
* Remove the comment at the beginning of the line
* Change the double &#064;@ to single @  (A single @ means to use UDP)
* Change ''remote-host'' to ''loghost''  
** ''loghost'' is an alias for our core VM.
** Its handy to use aliases like this in case we need to move our log destination.  We can then easily change the alias to point to a different system.  This isn't so convenient when our systems are defined in the {{File{/etc/hosts}}} file, but easy once DNS is in place.

Experiment with logging.  Investigate the logging commands and the log files withing the directory {{File{/var/log/}}}.


!!! Also complete and submit the [[Lab 57|labs/lab57.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is online and ready to be evaluated for this lab.
!! [[Lab 58 - Working with logs]]
Assigned [[Week 12, Part 1]]

This lab provides a brief refresher on working with text files and an introduction to using your syslog logs to investigate issues on your systems.

!!! Complete and submit [[Lab 58|labs/lab58.pdf]].

!! [[Lab 59 - Bind config and zones]]
Assigned [[Week 12, Part 2]]

Our end goal is for everyone to configure their own authoritative name servers for their lab networks.  I own the domain ncs205.net and manage its authoritative name server.  I am delegating control of the subdomain //username//.ncs205.net to each of you.  You will set up the master DNS server for this zone on your core VM.  The DNS server ns5.ncs205.net is running within the lab network.  It is a slave for your //username//.ncs205.net zone.  After you make changes to your DNS server, you will signal to this slave that you have records ready for it.  It will then perform a zone transfer to obtain your DNS records.  Once this zone transfer is complete, the DNS records for your //username//.ncs205.net domain will be globally available.

Begin to configure the DNS server on your core VM 
* Create your forward and reverse zone files.  Be sure to include:
** A default TTL
*** This must be the first line of the zone file.  See below for an example.
** A SOA record
** NS records for your primary and slave DNS servers
** A and PTR records for each of your hosts (so far we have 3 ~VMs: test, web, and core)
*** Be sure to note that A and PTR records do not belong in the same zone file
** CNAME records for host name __directory__, __ntp__, and __loghost__ pointing to your core VM

* Also complete [[Lab 59|labs/lab59.pdf]] and submit this PDF after your DNS server is online.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.


!!! Configuring Bind

Lets stand up our authoritative name server

On your core VM: Install the packages:  {{Monospaced{bind-chroot bind-utils}}}
* bind runs [[chrooted|https://www.journaldev.com/38044/chroot-command-in-linux]] to protect the system.  This security measure isolates the service from the rest of the OS, so an attacker cannot access the rest of the system if they compromise the DNS service.

Edit its config file, {{File{/etc/named.conf}}}

{{Warning{''Note:'' You'll see my username and IP addresses in the configurations below.  Be sure to replace my values with yours.}}}


These options should be set within the options section of {{File{named.conf}}}.  Some options already exist, so make sure their values match what's below.  Add any which are not already present.
{{{
	listen-on port 53 { any; };

	allow-query     { any; };
	allow-recursion { ncs205; };
        forwarders { 192.168.12.10; };

}}}

!!! Access control lists:

Add access control lists to the top of the file, after the options section.  This ~ACLs will limit who can query your name server.
{{{
acl "ncs205" {
	127.0.0.1;
	localhost;
        192.168.12.24/29;
};
}}}
This is just an example.  ''24'' is my starting IP address.  Replace ''24'' with your starting IP address.


!!! Forwarders:
The name server will forward any query it can't answer locally (from authoritative zone data or from cache) to the forwarder.
Forwarders are queried in the listed order until an answer is received.  The forwarding IP address above is the name server for our lab ~VMs.
(This was already set above)


!!! Starting bind

It's always a good idea to verify your configuration before restarting the service.  DNS is a critical service.  If you issue a service restart and your configuration is invalid, you'll experience downtime while you sort out the problem.

* Verify the configuration:  {{Command{named-checkconf  /etc/named.conf}}}
** No output will be returned if the configuration file is valid.  Correct any errors which were displayed.

* Start the {{Monospaced{named-chroot}}} service now.
** Be sure to reload the service whenever there's a configuration change.
* Set this service to also start at system boot

{{Warning{''Warning: '' Two services are available - ''named'' and ''named-chroot''.  The two will conflict with eachother.  Be sure you're referring to the ''{{Monospaced{named-chroot}}}'' service when you need to enable, start, stop, or restart bind.}}}

Verify the service is working.  You should be able to look up a DNS record by querying your new DNS server.  The +noall +answer options provide an abbreviated output and do not always need to be used.
{{{
[root@core ~]# dig www.google.com @localhost +noall +answer

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> www.google.com @localhost +noall +answer
;; global options: +cmd
www.google.com.         63      IN      A       172.217.3.100
}}}
It might take a few minutes after the service is started/restarted to be able to query external records.  It might not work right away.  And it might return a different record than the one above.


!!! Defining zones:

Lets put our zone definitions at the bottom of  {{File{/etc/named.conf}}}.  Be sure to replace my username with yours:

!!!! Forward zone:
{{{
zone "merantn.ncs205.net" {
        type master;
        file "/etc/named/master/merantn.ncs205.net.fwd";
};
}}}

Here is my complete forward zone file.  Save this to {{File{/etc/named/master///username//.ncs205.net.fwd}}}
 - You may need to create the {{File{master}}} directory.
 - Replace usernames and IP addresses as necessary.

{{{
$TTL 5m
@ IN  SOA ns1.merantn.ncs205.net. hostmaster.merantn.ncs205.net. (
 2022102900    ; serial number
 1d    ; refresh
 5d    ; retry
 2w    ; expire
 30m    ; minimum
)
                IN  NS  ns1.merantn.ncs205.net.
                IN  NS  ns5.ncs205.net.

ns1             IN      A       192.168.12.26

test            IN      A       192.168.12.24
www             IN      A       192.168.12.25
core            IN      A       192.168.12.26

loghost         IN      CNAME   core
ntp             IN      CNAME   core
directory       IN      CNAME   core
}}}


!!! Allow zone transfers to the slave

We need to grant permission for the slave DNS server to perform a zone transfer against your server.

On your core vm, add a new ACL to the top of  {{File{/etc/named.conf}}}, after the options section:
{{{
acl "slaves" {
        127.0.0.1;              // allow transfer from localhost for testing
	192.168.12.10;		// allow the secondary authoritative name server to perform zone transfers
	192.168.12.x;		// allow your core system to preform zone transfers.  Replace x with the IP address of your core VM
};
}}}

and add this statement to the options section:
{{{
	allow-transfer  { slaves; };
}}}

You can verify your zone configuration with {{Command{named-checkzone //zone// //file//}}}
 - eg:  {{Command{named-checkzone merantn.ncs205.net /etc/named/master/merantn.ncs205.net.fwd}}}
 - again, if you make changes that break your zone file and restart the service, you'll have a critical outage.  Verify your configs first!

Restart bind to reload the configuration and test your server:

Be sure to replace my username with yours.
{{{
[root@core ~]# dig www.merantn.ncs205.net @localhost

; <<>> DiG 9.11.4-P2-RedHat-9.11.4-9.P2.el7 <<>> www.merantn.ncs205.net @localhost
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59402
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 2, ADDITIONAL: 2

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;www.merantn.ncs205.net.                IN      A

;; ANSWER SECTION:
www.merantn.ncs205.net. 300     IN      A       192.168.12.25

;; AUTHORITY SECTION:
merantn.ncs205.net.     300     IN      NS      ns1.merantn.ncs205.net.
merantn.ncs205.net.     300     IN      NS      ns5.ncs205.net.

;; ADDITIONAL SECTION:
ns1.merantn.ncs205.net. 300     IN      A       192.168.12.26

;; Query time: 0 msec
;; SERVER: ::1#53(::1)
;; WHEN: Sun Apr 12 02:16:37 EDT 2020
;; MSG SIZE  rcvd: 119
}}}


!!! Configure the slave  (I already did this for each of you)

On ns5.ncs205.net (the slave I manage), I need to add a configuration block similar to the following to named.conf for each student network in the class.  This will configure that server to become a slave for your zones:
{{{
zone "merantn.ncs205.net" {
        type slave;
        file "slave/ncs205.net/merantn";
        masters { 192.168.12.26; };
};
}}}


!!! Verification:

1. Once your zone file is published, you can check the directory {{File{/opt/pub/ncs205/zones/}}} on the class shell server.  You should see a file in that directory that matches your username.
2. You should be able to run the dig command on the shell server for one of your hosts, eg:  {{Command{dig www.merantn.ncs205.net}}}
3. You should be able to run a DNS lookup on your home system for one of your hosts, eg:  
4. The system log {{File{/var/log/messages}}} on your core VM may contain useful ~DNS-related messages.

Testing from my home Windows PC:
{{{
Microsoft Windows [Version 6.1.7601]
Copyright (c) 2009 Microsoft Corporation.  All rights reserved.

C:\Users\nick>nslookup www.merantn.ncs205.net
Server:  one.one.one.one
Address:  1.1.1.1

Non-authoritative answer:
Name:    www.merantn.ncs205.net
Address:  192.168.12.25
}}}

If all of these check out, congrats - your DNS zones are now globally accessible!


!!! DNS client configuration

Once your DNS server is working, modify {{File{/etc/resolv.conf}}} on each of your ~VMs to add your new nameserver and expand the search domains
* We want to query our nameserver first and the lab DNS server second
* We want to search our domain first

Here's mine:
{{{
[root@core ~]# cat /etc/resolv.conf
nameserver 192.168.12.26
nameserver 192.168.12.10
search merantn.ncs205.net ncs205.net
}}}

The //search// keyword in the {{File{resolv.conf}}} is a list of domains to add to unqualified host names.  If you don't specify a fully qualified host name, then those domains will be appended in order until a match is found.  This lets us save some typing and refer to our hosts by their unqualified name instead of having to type out the full domain name each time.

Unqualified:  Just the host name, eg: //core//
Fully qualified domain name:  //core.merantn.ncs205.net//

You can access your ~VMs by host name from anywhere within the lab network once you have DNS set up.

The //ncs205.net// domain is also in the search string on the class shell server.  This means you can easily access your ~VMs from the shell server by entering //host//.//username//.

For example:
{{{
[merantn@shell ~]$ cat /etc/resolv.conf
nameserver 192.168.12.10
search ncs205.net

[merantn@shell ~]$ ssh core.merantn -l root
root@core.merantn's password:
Last login: Sun Apr 12 13:15:11 2020 from 192.168.12.10
[root@core ~]#
}}}


!! Misc DNS topics

Don't forget to update your serial number after making changes!
- Notifications are broadcast to slave NS only if the serial number increments
- Records may be stale if you forget to increment the serial

You can use the command {{Command{rndc reload}}} to refresh your DNS zones after updates are made.
!! [[Lab 60 - SSL Certificates]]
Assigned [[Week 14, Part 1]]

Follow the directions above to add SSL encryption support to your web server.

Also complete and submit the [[Lab 60|labs/lab60.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!! [[Lab 60 - Scheduled Tasks]]
Assigned [[Week 13, Part 1]]


!!! SSL Certificate renewal

Our lab web server SSL certificate will expire in 90 days.  This isn't much of an issue for us because class will have ended and this server will be decommissioned by then.

But if we were doing this for real, renewing that SSL certificate would be a task we would need to account for.  SSL certificates created with the {{Command{acme.sh}}} tool can be renewed by running the command with the {{Monospaced{&#45;-cron}}} option.  Any certs in its configuration will be checked for upcoming expiration and automatically renewed if they are about to expire.

We don't want to worry about running this manually and potentially forgetting about it.  We can instead use cron to run this command for us at a set interval.


!!! Cron:

Create the following cron task on your www VM:

* Schedule {{Command{acme.sh &#45;-cron}}} to run every other day at 6pm.
* Save your scheduled job to the file {{File{/etc/cron.d/acme}}}

Also complete and submit the [[Lab 60|labs/lab60.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
* ''Note:'' Lab 60 is a late addition to the semester, so it's getting a high number.  
!! [[Lab 61 - Scheduled Tasks]]
Assigned [[Week 14, Part 1]]


!!! SSL Certificate renewal

Our lab web server SSL certificate will expire in 90 days.  This isn't much of an issue for us because class will have ended and this server will be decommissioned by then.

But if we were doing this for real, renewing that SSL certificate would be a task we would need to account for.  SSL certificates created with the {{Command{acme.sh}}} tool can be renewed by running the command with the {{Monospaced{&#45;-cron}}} option.  Any certs in its configuration will be checked for upcoming expiration and automatically renewed if they are about to expire.

We don't want to worry about running this manually and potentially forgetting about it.  We can instead use cron to run this command for us at a set interval.


!!! Cron:

Create the following cron task on your www VM:

* Schedule {{Command{acme.sh &#45;-cron}}} to run every other day at 6pm.
* Save your scheduled job to the file {{File{/etc/cron.d/acme}}}

Also complete and submit the [[Lab 61|labs/lab61.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!! [[Lab 62 - VM Lockdown - Secure your VMs]]
Assigned [[Week 14, Part 2]]

!!! Add user accounts to all ~VMs

Add two local user accounts to your ~VMs
* First account - set the user name to your campus username
** UID = 1000
** GID = 100
** Set a valid password
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
* Second account - username = merantn
** UID = 7289
** GID = 100
** Create a home directory within {{File{/home/}}}
** Copy the environment configuration files from {{File{/etc/skel/}}} to the new home directory
** Copy my SSH public key (see below) to the user's {{File{~/.ssh/authorized_keys}}} file
*** You will likely need to create the directory and file
*** Be sure you understand how SSH key-based authentication works.
** Use this password hash (encrypted password):  
{{{
$6$nmUnix22$FCHlRIf.MFckb664yGEMGIC09cxfIk6NO/6fz/ou5EBbLQuo5.J0.szsg7aRswSIvxVjPGYWhiQ2XKD62eg4Y0
}}}
''Note:'' The old password hash: ({{Monospaced{@@$6$hiaEgh6A$cEew6uUV8v5IBrwIMRahAyoOlgnKOaonnFx4sCzW4bu6mr17/2LcSdKknVa0GuytKqby391Z3p03FNelrNGD2.@@}}}) will also be accepted.


* Verify permissions:
** Both user's home directories and all files below them must be owned by the user and GID 100
** The user's home directory must have proper directory permissions - it must not be writable by the group or others for proper SSH function.
* Verify ~SELinux
** ~SELinux must be disabled for SSH public key authentication to function properly
** Edit {{File{/etc/selinux/config}}} and change {{Monospaced{enforcing}}} to {{Monospaced{disabled}}} on line #7 to disable ~SELinux on system startup
** Execute {{Command{setenforce 0}}} to disable ~SELinux for the current boot
** This may already have been completed.

My SSH public key
{{{
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBVLQcFklXcim/xylMML4QnLy4iuzrdgOUWivktOAlNX merantn@shell.ncs205.net
}}}

{{Note{''Note:'' You can test logins to your ~VMs using my user account by creating your own SSH keypair and adding your SSH public key to the {{File{~/.ssh/authorized_keys}}} file in my home directory on your VM.  See the directions in [[Working more efficiently with GNU screen & SSH keys]] for how to create an SSH keypair.  The {{File{authorized_keys}}} file can contain multiple public keys.  Any of the corresponding private keys will be accepted for login.}}}


!!! Disable direct root login via SSH on all ~VMs

# Adjust the sshd configuration to disable direct root logins.  All users must first login as a regular, unprivileged user and then elevate privileges.  
** Look for the {{Monospaced{~PermitRootLogin}}} configuration option in {{File{/etc/ssh/sshd_config}}}
# Adjust PAM to require wheel group membership in order to su to root
** Look in {{File{/etc/pam.d/su}}}
# Don't forget to add both user accounts to the wheel group

{{Warning{''Warning:'' When messing with authentication, it's always wise to verify everything works before logging out.  Open a new putty window, ssh in, and elevate up to a root prompt before disconnecting from your original putty session.  Otherwise, if you log out and something is broken, you may have difficulty accessing the system.}}}

!!! Verification Worksheet

Also complete and submit the [[Lab 62|labs/lab62.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!![[Lab 63 - sudo]]:  apache configuration access
Assigned [[Week 14, Part 2]]

We would like to provide the webmaster the ability to update the apache configuration and restart the service on the web server virtual machine without granting full root level access.  The {{Command{sudo}}} and {{Command{sudoedit}}} utilities can be use to accomplish this.

!!! Create a webmaster user on your web server VM
* username = wes
* uid = 2000
* gid = 100
* Fully configure the environment for this user

!!! Create a new group for the webmasters
* group name = webmaster
* gid = 1000
* add wes to this group

!!! Configure {{Command{sudo}}} / {{Command{sudoedit}}} to:
# Grant the user ''wes'' the ability to edit the primary apache configuration file
# Grant the user ''wes'' the ability to execute the {{Command{apachectl}}} command as root.

Be sure you understand why {{Command{sudoedit}}} is used for modifying root-owned files instead of just {{Command{sudo}}} followed by an editor, eg: {{Command{sudo vi /etc/httpd/conf/httpd.conf}}}.


!!! Verification Worksheet

Also complete and submit the [[Lab 63|labs/lab63.pdf]] verification worksheet.  This submitted worksheet will indicate your VM is ready to be evaluated for this lab.
!! [[Lab 64 - Enable Two-Factor Authentication]]
Assigned [[Week 14, Part 2]]

Passwords are increasingly proven to be insufficient as the sole means of authentication.  Passwords are too easily phished, captured via shoulder surfing or key loggers, or stolen from data breeches.  We also generally do a poor job of selecting passwords.  Password effectiveness is greatly reduced due to reuse across multiple sites and the selection of poor quality, weak passwords.  Strong, secure passwords should be unique and contain a minimum of 12 random characters across the full alphabetic, numeric, and symbol character space.  This then makes them difficult to remember.

These shortcomings can be mitigated with the use of multifactor authentication.  Utilizing a hardware token is ideal.  Google recently [[made the news|https://krebsonsecurity.com/2018/07/google-security-keys-neutralized-employee-phishing/]] for introducing hardware tokens for their employees to access corporate resources with great success.  The Google-made [[Titan Security Key|https://cloud.google.com/titan-security-key/]] is now available for general purchase.  [[YubiKeys|https://www.yubico.com/store/]] are another popular alternative for general use.  Such keys can easily be used to add multi-factor authentication to operating system logins, services, or web sites after these systems are enabled to support hardware tokens.

Soft tokens are available as a free alternative to hardware tokens.  A soft token is a desktop or mobile application which generates a one-time pin which can be entered along with a password to prove identity.  Instead of a token on your keychain, your desktop or phone becomes "something you have".  Multi-factor authentication should be used for any services where a higher level of security is warranted due to an increased exposure to attack.

Google Authenticator ([[Android|https://play.google.com/store/apps/details?id=com.google.android.apps.authenticator2&hl=en_US]] or [[Apple|https://itunes.apple.com/us/app/google-authenticator/id388497605?mt=8]]) is a popular soft token with wide support.  The Google Authenticator can be used as a second factor for ssh authentication to Linux servers.  

If not for our class's virtual lab infrastructure protecting us from the outside world, our class ~VMs would all otherwise be exposed to the internet and open to attack.  Any externally accessible server with such an increased exposure to attack would necessitate the deployment of multi-factor authentication.

!!! We will now implement two-factor authentication using Google Authenticator for access to our core VM.

{{Note{''Note:''  Two-factor authentication with the Google Authenticator will be set up for your regular user, not the root account.}}}

* Ensure your user account exists on your core VM and you are able to authenticate with a password.
** None of this will work if your user account is not fully functional
* Get started by installing the Google Authenticator app on your phone.  
* We must next generate a barcode or key to add to the Google Authenticator App.  
** Log in to your core VM via SSH and elevate to root
*** Ensure the {{Monospaced{epel-release}}} package is installed on your core VM
*** Install the {{Monospaced{google-authenticator}}} package on your core VM
** Exit the root login and log in to your core VM as a regular user
*** Run the command {{Command{google-authenticator}}} to initialize the token
*** Answer ''y'' to the question: ''Do you want authentication tokens to be time-based (y/n)''

You will be presented with a QR code to scan from the Google Authenticator app on your phone along with a secret key and a series of emergency scratch codes.  The secret key can be used to add this account to the Google Authenticator in case you are unable to scan the barcode.  Emergency scratch codes should be stored somewhere safe and are used to authenticate in case you lose your phone.
** Save the secret Key.  We'll need it later.

Next, on your phone, launch the Google Authenticator app and choose the option to scan a barcode or enter a key and provide the appropriate input.

[img[img/googleauth1.png]]

Return to your VM and answer the remaining questions:

{{Monospaced{Do you want me to update your "/home/merantn/.google_authenticator" file? (y/n) ''y''}}}

{{Monospaced{Do you want to disallow multiple uses of the same authentication token? This restricts you to one login about every 30s, but it increases your chances to notice or even prevent man-in-the-middle attacks (y/n) ''y''}}}

{{Monospaced{By default, a new token is generated every 30 seconds by the mobile app. In order to compensate for possible time-skew between the client and the server, we allow an extra token before and after the current time. This allows for a time skew of up to 30 seconds between authentication server and client. If you experience problems with poor time synchronization, you can increase the window from its default size of 3 permitted codes (one previous code, the current code, the next code) to 17 permitted codes (the 8 previous codes, the current
code, and the 8 next codes). This will permit for a time skew of up to 4 minutes between client and server. Do you want to do so? (y/n) ''n''}}}

{{Monospaced{If the computer that you are logging into isn't hardened against brute-force login attempts, you can enable rate-limiting for the authentication module. By default, this limits attackers to no more than 3 login attempts every 30s. Do you want to enable rate-limiting? (y/n) ''n''}}}

{{Warning{''Warning:'' Answering no to the last question is a poor security choice.  If we were implementing this in a production environment we would answer yes to enable rate-limiting.  We are only answering no because we are testing something new and do not want to lock ourselves out in the process.}}}

The file {{File{~/.google_authenticator}}} will contain your 2FA configuration.

You should now have the Google Authenticator app installed on your phone and an account configured for use.  Next we must configure the operating system to require this second form of authentication for SSH logins.  We will not modify the configuration for Console logins, so if things go wrong we can always log in through the Proxmox console to fix it.

!!! Configure the core server to require two-factor authentication

* Escalate to root privileges

* Edit the file {{File{/etc/pam.d/sshd}}}  and add the following line to the bottom:

{{{
auth required pam_google_authenticator.so nullok
}}}

* Edit the file {{File{/etc/ssh/sshd_config}}} and search for //~ChallengeResponseAuthentication// .  Ensure the value is set to ''yes'':

{{{
ChallengeResponseAuthentication yes
}}}

* Save and close the file, then restart the sshd service:


Finally, ''without logging out'', attempt to log in to your core VM from itself.  Launch the Google Authenticator App to generate a new token.  When making changes to remote connection services, we do not want to log out until we can verify those changes are functioning properly.  If we disconnect and something went wrong, we might end up locked out!

[img[img/googleauth2.jpg]]

Logging in with two-factor authentication:
{{{
[merantn@core ~]$ ssh localhost -l merantn
Password:
Verification code:
Last login: Sun Apr 19 00:22:40 2020 from localhost
[merantn@core ~]$
}}}

With the Google Authenticator changes in place, I'm prompted for my password as usual along with the verification code from the Authenticator App.  ''Note:'' Each code is valid only once to prevent replay attacks.  Once you log in, you may need to wait up to 30 seconds for a new code to be generated before you can log in again.


!!! Verification Worksheet

Also complete and submit the [[Lab 64|labs/lab64.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!! [[Lab 65 - SSH Intrusion]]
Assigned [[Week 14, Part 2]]

Details to come.  This will be available for extra credit once it's finished.
!! [[Lab 66 - Host-based Firewalls]]
Assigned [[Week 15, Part 1]]

!!! Implement a host-based firewall on your ~VMs

* Complete the [[Lab 66|labs/lab66.pdf]] worksheet and upload it to the class shell server
* Take note of the state of your services in Naemon.  It's always a good idea to have a known baseline of what things look like before making network changes.  Taking a screenshot may be helpful.
** If something is down after you make changes and you don't know what things looked like before, you won't know if your change was the reason for the outage.
* Enable the firewalld service so it starts on boot and start the service now
* Request a scan of your services on Naemon.  Take note of any changes to the alarms.
* Add the firewall rules you identified in the Lab 66 PDF.
* Recheck your services in Naemon and ensure all new alarms have cleared.
!![[Lab 67 - Bring Files VM online]]
Assigned [[Week 15, Part 2]]

Bring your files VM online:
* A new VM was added for you
* Assign it the 4th IP address in your range
* Add the hostname {{Monospaced{''files.//username//.ncs205.net''}}} to the file {{File{/etc/hostname}}}
* Add an A record to your DNS zone for this new VM
* Reboot the VM to ensure all network settings were properly applied
* Install the standard software packages
* Apply any outstanding updates
* Configure NTP to synchronize time against your core VM and ensure time is fully synchronized
* Apply the steps in [[Lab 62 - VM Lockdown - Secure your VMs]] to harden this VM.


!!! Verification Worksheet

Also complete and submit the [[Lab 67|labs/lab67.pdf]] verification worksheet.  This submitted worksheet will indicate your ~VMs are ready to be evaluated for this lab.
!![[Lab 68 - Storage Expansion]]
Assigned [[Week 15, Part 2]]

Some systems need additional storage beyond what was initially provisioned.  Here, we have a file server VM that was created with an additional disk.  We now need to make that additional disk available to the operating system for storing additional data.

Perform the following steps on your files VM.

!!! Observe available storage devices

The {{Command{lsblk}}} command is a quick way to visualize all storage devices available to a system.  Here, we can see that there are two unallocated drives - {{File{vdb}}} and {{File{vdc}}}.  We'll use {{File{vdb}}} for this lab and leave {{File{vdc}}} alone.
{{{
[root@files ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom
vda             252:0    0   16G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   15G  0 part
  ├─centos-root 253:0    0 13.4G  0 lvm  /
  └─centos-swap 253:1    0  1.6G  0 lvm  [SWAP]
vdb             252:16   0    2G  0 disk
vdc             252:32   0    2G  0 disk
}}}

!!! Create vdb1 Partition

It's generally preferred to create partitions on the drives instead of using the bare device.  Partitions are logical divisions of the physical disk that will then hold the filesystem.  Here, we're going to devote the entire disk to a single partition and a single filesystem.  Creating multiple partitions on a disk allow it to hold separate filesystems.  In some instances, physically dividing groups of files into separate filesystems is preferred.  One example is logs.  If you have a system, such as a webserver, that may generate a lot of logs, it's wise to store those logs on their own filesystem.  If everything is stored on the same filesystem, excessive logs could fill the disk and interfere with the database's ability to store new data.

Refer to the //Storage Layers// diagram.  We'll be following the path on the left from Storage Devices to Partitions to Filesystems.
[img[img/storage-layers.jpg]]

Duplicate this interaction with the {{Command{parted}}} command to create a new disk label and new partition.  The first {{Command{print}}} command shows the disk is currently bare.
{{{
[root@files ~]# parted /dev/vdb
GNU Parted 3.1
Using /dev/vdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) print
Error: /dev/vdb: unrecognised disk label
Model: Virtio Block Device (virtblk)
Disk /dev/vdb: 2147MB
Sector size (logical/physical): 512B/512B
Partition Table: unknown
Disk Flags:
(parted)
}}}
{{{
(parted) mklabel gpt
(parted) mkpart
Partition name?  []? storage
File system type?  [ext2]? xfs
Start? 1
End? 100%
(parted) quit
Information: You may need to update /etc/fstab.

[root@files ~]#
}}}


Now run {{Command{ lsblk }}} to verify the new partition was created.  It's always wise to add verification steps as you proceed instead of just blindly assuming everything is working as it should.  If you compare this output to the one above, you'll see that the {{File{ vdb1 }}} partition has been created.
{{{
[root@files ~]# lsblk
NAME            MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
sr0              11:0    1 1024M  0 rom
vda             252:0    0   16G  0 disk
├─vda1          252:1    0    1G  0 part /boot
└─vda2          252:2    0   15G  0 part
  ├─centos-root 253:0    0 13.4G  0 lvm  /
  └─centos-swap 253:1    0  1.6G  0 lvm  [SWAP]
vdb             252:16   0    2G  0 disk
└─vdb1          252:17   0    2G  0 part
vdc             252:32   0    2G  0 disk
}}}


!!! Create the filesystem

We can see from the {{Command{ lsblk }}} command that the new partition, {{File{vdb1}}}, has been successfully created.  Now we must put a filesystem on it.  Partitions are the physical divisions of a disk.  Filesystems are the data structures the operating system interacts with in order to store files.
{{{
[root@files ~]# mkfs.xfs /dev/vdb1
meta-data=/dev/vdb1              isize=512    agcount=4, agsize=130944 blks
         =                       sectsz=512   attr=2, projid32bit=1
         =                       crc=1        finobt=0, sparse=0
data     =                       bsize=4096   blocks=523776, imaxpct=25
         =                       sunit=0      swidth=0 blks
naming   =version 2              bsize=4096   ascii-ci=0 ftype=1
log      =internal log           bsize=4096   blocks=2560, version=2
         =                       sectsz=512   sunit=0 blks, lazy-count=1
realtime =none                   extsz=4096   blocks=0, rtextents=0
}}}


!!! Create the mount point

A mount point is a representation of the filesystem that we can interact with.  On Windows systems, mount points are generally drive letters, like C:\ or D:\.  In the Unix/Linux world, everything is one big filesystem tree.  Linux mount points are directories on that tree.  We identify a directory to mount our new filesystem to, and then any interaction with that directory and all items within it will be directed to our new disk volume.  Here, we want to make our new disk available to the system at the directory {{File{ /opt/storage/ }}}.

We first need to ensure the new mount point exists:
{{{
[root@files ~]# mkdir /opt/storage
}}}


!!! Edit the filesystem table

The ''f''ile''s''ystem ''tab''le, {{File{/etc/fstab}}}, is the configuration file which specifies which disk volumes are mounted at system startup.  Add your new disk volume to the file so it is mounted on boot.

Here's a copy of my {{File{ /etc/fstab }}} file.  The last line is the one you need to copy to yours.  Each line contains:
* the physical volume, {{File{ /dev/vdb1 }}}
* the mount point, {{File{/opt/storage }}}
* the filesystem type, {{Monospaced{ xfs }}}
* any special mount options.  Here, just the {{Monospaced{ defaults }}}
* a binary value ({{Monospaced{0}}} or {{Monospaced{1}}}) to indicate whether the filesystem should be backed up.  This is largely deprecated.
* the order in which filesystem checks ({{Command{fsck}}} command) should be run.  A value of {{Monospaced{ 0 }}} disables these checks
{{{
[root@files ~]# cat /etc/fstab

#
# /etc/fstab
# Created by anaconda on Fri Mar 13 00:03:20 2020
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root /                       xfs     defaults        0 0
UUID=f68b9069-7271-48de-b968-00d62e825144 /boot                   xfs     defaults        0 0
/dev/mapper/centos-swap swap                    swap    defaults        0 0

/dev/vdb1               /opt/storage      xfs   defaults        0 0
}}}


!!! Mount the new filesystem

Changes to the {{File{/etc/fstab}}} file should be tested and filesystems mounted with the {{Command{ mount -a }}} command.  This will catch any errors in the file.  If there is an error mounting a filesystem on system startup, the OS will not fully load and your only option will be to fix the problem on console.  This can be a nasty surprise if you don't have easy access to console.

The {{Command{df -h}}} command adds a verification step that the filesystem is fully mounted and accessible.  The old proverb //trust, but verify// must apply to everything you do.
{{{
[root@files ~]# mount -a

[root@files ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
devtmpfs                 232M     0  232M   0% /dev
tmpfs                    244M  120K  244M   1% /dev/shm
tmpfs                    244M   29M  215M  12% /run
tmpfs                    244M     0  244M   0% /sys/fs/cgroup
/dev/mapper/centos-root   14G  2.4G   12G  18% /
/dev/vda1               1014M  228M  787M  23% /boot
tmpfs                     49M     0   49M   0% /run/user/7289
/dev/vdb1                2.0G   33M  2.0G   2% /opt/storage
}}}


!!! Verification worksheet

You should now have successfully added a new storage volume to your files server VM.  Complete and submit the [[Lab 68|labs/lab68.pdf]] verification worksheet when you are ready for review.
Mastery of this subject material will only come with practice. To that end, this will be a very hands-on and practical course. Expect graded lab assignments regularly to provide ample practice with the assigned material.  Properly completing lab assignments on time is necessary to receive a good grade for this course. Not competing lab assignments at all will likely result in a failing grade.

Any deliverables will be collected for grading on their due date. Late assignments may be accepted, subject to time-dependent grade penalty of up to 50%. Presentation of submitted assignments will also impact grade.

{{Note{''Note:'' It is much better to have correct work submitted late than obviously wrong or incomplete work submitted on time.  If you're having trouble with some of the material and need more time, please let me know and we can discuss adjusting due dates.  Submitting poor quality work to meet a due date is not a wise professional strategy.}}}


!! Submitting Homework Assignments
Homework assignments are to be uploaded to the class shell server using a file transfer program like ~WinSCP and saved to the directory {{File{/opt/pub/ncs205/submit/}}}. I will then grade/annotate your work and return the files to you for review. Most homework assignments will be PDF forms to complete. Download the lab PDF and open it in [[Acrobat Reader|https://get.adobe.com/reader/]].  ''Do not use the PDF viewer in your web browser''.  It will not properly save the file and you will upload a blank document.  Grades will be posted to Blackboard.

After downloading the PDF assignment and opening the file in [[Acrobat Reader|https://get.adobe.com/reader/]], add your name to the top, fill in your responses, then save & close the file.  It would be wise to reopen the PDF in Acrobat Reader to make sure everything saved correctly before uploading to the server.  You should be in the habit of verifying your work before submitting it.

Files must be named appropriately so we don't have filename collisions among everyone's uploaded files. Rename your PDF document following this naming convention: ''ncs205-lab#-username.pdf''
* replace # with the lab number
* replace username with your campus username

Uploaded labs ''must'' contain your name at the top of the document and their file names ''must'' follow this file name format __exactly__ in order to be graded. This includes case - all letters must be lowercase. The Unix operating systems are case sensitive, so {{File{~NCS205-lab1-jdoe12.pdf}}} is a different file than {{File{ncs205-lab1-jdoe12.pdf}}}.  The former would not be accepted for review.

{{Note:{''Note:'' The Microsoft Windows operating system hides file extensions by default.  This is a terrible setting for a security practitioner and should be disabled.  A common mistake is to fail to take this into account and upload files with a double extension, such as {{File{ncs205-lab1-jdoe12.pdf.pdf}}}.  This file would not be named correctly and thus not accepted for review.}}}

!! How to upload your lab assignments:
--A video will be posted here demonstrating the process in the coming days.--  Please let me know if you have trouble figuring this out.

!! Late Penalties
Point penalties for late lab assignments will be assessed as follows:

|!Penalty|!Condition|
| 0 |Sneak it in past the due date but before I grade the labs|
| 10% |Submitted after the batch has been graded|
| 20% |Submitted after graded labs have been returned|
| 30% |Submitted after we've reviewed a lab|
| 40% |Submitted after I've posted a review video or we've held an online meeting to discuss a lab.|

{{Warning{''Note:'' Labs 1 through 25 will not be accepted after the last date to Withdraw from the course unless prior approval is obtained.}}}

!! The workflow
# You upload a completed lab PDF to {{File{/opt/pub/ncs205/submit/}}}
# Every hour a script will collect new submissions which are properly named and copy them to the grading queue, {{File{/opt/pub/ncs205/queue/}}}.
# The queue will be synchronized to my tablet for review. Only new files will be copied.
# Any annotations will be recorded and synchronized back to the shell server, saved to the directory {{File{/opt/pub/ncs205/graded/}}}.
** You can download this file and preview your graded lab before grades are finalized and entered
# Grades are entered to Blackboard.
# After grades are entered, the script will move graded labs ready to be returned to {{File{/opt/pub/ncs205/returned/}}}. You may download them from this directory to see entered grades and my annotations.
The directories {{File{/opt/pub/ncs205/queue/}}} and {{File{/opt/pub/ncs205/graded/}}} are staging directories in the workflow pipeline.  You can view the contents of these directories but cannot write to them.  Your access is only so you can have full visibility on where your labs reside in the workflow.

tl;dr: You upload new labs to {{File{/opt/pub/ncs205/submit/}}} and retrieve graded copies from {{File{/opt/pub/ncs205/returned/}}}.
!![[Lab E - Logical Volume Manager]]
Assigned [[Week 15, Part 1]]

Complete the steps in the [[Lab E Instructions|labs/labE-instructions.pdf]] PDF on your files VM to become familiar with the Linux logical volume manager.

Add additional filesystems to your core VM server
* See the last page in the [[Lab E Instructions|labs/labE-instructions.pdf]]
* Complete the [[Lab E Deliverable|labs/labE.pdf]] and submit this PDF to {{File{/opt/pub/ncs205/submit/}}} on the class shell server

This lab will involve restarting your file server VM.  Be sure the necessary services are configured to start on boot and ~SELinux and firewalld are properly configured.
!! [[Lab G - Backups & Cron]] - @@EXTRA CREDIT@@
Assigned [[Week G]]

!!! Backup script:

Write a bash shell script which will use the dump utility to back up your file server VM's /home filesystem 
Save your script on the files VM as {{Command{/root/scripts/backup.sh}}}
Submit it to the {{File{/opt/pub/ncs205/submit/}}} directory following the normal shell script submission process.
 - No need to capture a demo with the script command

You have two script options:
* Create a basic script: sufficient but will be more strictly graded since it is simple 
* Create a more advanced version: for those who are up to the challenge of writing a more real-world script. +5 points of extra credit towards lab grade for pulling it off.

Your script must either:

Basic version:
* Mount the /opt/backups/ filesystem if it is not already mounted. 
* Create the directory /opt/backups&#047;//hostname//&#047; if it does not already exist.
* Use the dump utility to back up the filesystem and save a level 0 dump file to the directory /opt/backups&#047;//hostname//&#047; with an appropriate file name
* The dump file is to be bzip2 compressed
* Generate an appropriate syslog message noting success or failure of the backups
** Direct this message to the user facility and info severity level
** Tag = Backups
** The {{Command{logger}}} command can be used to record a message to syslog.  Check its man page for usage details.

Advanced version:
* Mount the /opt/backups/ filesystem from your file server if it is not already mounted.  ({{Command{man mountpoint}}}.  Evaluate exit status:  {{Command{mountpoint -q /opt/backups || mount /opt/backups}}}
* Create the directory /opt/backups&#047;//hostname//&#047; if it does not already exist.
* Use the dump utility to back up the filesystem and save the dump file to the directory /opt/backups&#047;//hostname//&#047; with an appropriate file name, such as home.$date.$level.dump.bz2
** where $date is a 6 digit YYMMDD date  (see date manpage)
** and $level is the dump level
* The dump file is to be bzip2 compressed
* Accept the dump level as an optional command line argument to your script
* If no argument is specified, use a dump level on the following schedule:
** Level 0 on the first Sunday of the month
** Level 1 on each subsequent Sunday
** Level 2 on each remaining day of the week
* Generate an appropriate syslog message noting success or failure of the backups
** Direct this message to the user facility and info severity level
** Tag = Backups
** The {{Command{logger}}} command can be used to record a message to syslog.  Check its man page for usage details.

Also check the date command man page for date formatting codes


This should get you started:
''Note:''  This is just a sample of some control structure logic.  The commands may need to be adjusted.
{{{
#!/bin/sh
# File name:
# Author:
# Date Written:
# Assignment:
# Purpose:
# Description:
#

# Test for a command line argument and ensure it is a single digit
if [ $(expr "$1" : '^[0-9]\{1,\}$') -eq 1 ]
then
        level=$1

# Else, see if it is currently the first day of the month
elif [ $(date +%d) = "01" ]
then
        level=0

# If a valid level cannot be determined from our argument or if statement above
# display usage and error out with positive exit status
else
        echo Error: Invalid level
        echo Usage: $0 [0-9]
	# Execute logger here to record this 
        exit 1
fi

echo Level: $level
}}}


!!! Cron:

Create the following cron task on the files VM:

* Execute the backup script every Sunday morning at 4am

| !Character | !Shortcut | !Most Useful |
| ~CTRL-C |Send interrupt signal to a running command (abort)| * |
|~|Clear entered command line text|
| ~CTRL-A |Move cursor to beginning of command line| * |
| ~CTRL-E |Move cursor to end of command line| * |
| ~CTRL-L |Clear Screen; move cursor to top to screen| * |
| ~ALT-B |Move one word backward on command line|
| ~ALT-F |Move one word forward on command line|
| ~CTRL-U |Erase line to left|
| ~CTRL-K |Erase line to the right|
| ~CTRL-W |Erase a word to left on command line| * |
| ~ALT-D |Erase a word to right on command line|
| ~CTRL-Y |Paste previously erased text|
| ~CTRL-D |Send EOF signal, ending input| * |
|~|Erase character under cursor| * |
|~|Log out (when no other text is on the command line)| * |
| ~Shift-INS |Paste clipboard at cursor| * |
| ~Shift-PgUp |Scroll window up|
| ~Shift-PgDn |Scroll window down|
| Tab |Auto-complete command or file name| * |
| Up Arrow |Previous Command| * |
| Down Arrow |Next command| * |
| Page Up |Previous command search| * |
| Page Down |Next command search| * |

{{Note{''Note:'' The above key sequences were listed with uppercase letters for clarity.  It is not necessary to also press the shift key.}}}

!! Tab Completion

The tab key will auto-complete commands or file names, pausing when it reaches a decision point.  

If I type the letters ''ad'' on the command line and press tab, the shell will autocomplete it to the string ''add'' before it reaches a decision point and cannot proceed without input.  If I press tab twice it will then show me the options I have to complete the command:
<<<
[root@shell data]# add
addgnupghome  addpart       addr2line     adduser
<<<

If I press the letter p and then tab again, the shell will know which command I'm looking for and auto-complete the command ''addpart''

The same auto-completion can be used for files.  The path to the networking configuration file on Linux systems is rather long.  Try this scenario on the class shell server:
* Type {{Command{cat /etc/sysco}}} and press ''tab''.  The shell should autocomplete that to {{Command{cat /etc/sysconfig/}}}.
* We're at a decision point since there are many different ways we could proceed.  Type: {{Command{netw}}} and press tab.  The shell will autocomplete that to {{Command{cat /etc/sysconfig/network}}}.
* Press the {{Command{-}}} key and press tab again.  The shell will autocomplete that to {{Command{cat /etc/sysconfig/network-scripts/}}}.
* Type {{Command{ifcfg-eth}}} and press tab twice.  We are presented with the available options.
* Type {{Command{0}}} and hit enter to view the network configuration file.

Using tab helped me identify the available files and reduced the amount of letters I needed to type to view the file.  It's slow at first, but once you get used to it greatly speeds up the speed and efficiency of using the shell and reduces the amount of information you have to remember.


!! Command recall

The page up and page down keys can be used to scroll through the recently used commands.  This isn't universal; the shell needs to be configured to support it, but its supported by most systems out of the box.

If you have a long command string that wasn't used very recently, rather then press the up arrow several times to find it, you can enter the first few letters of that command and then ~Page-Up.  The shell will cycle through your recent commands which began with those letters.

For example, a few days ago I ran the command {{Command{fail2ban-client status sshd-root}}} to see how many systems were trying to break into the class shell server.  Rather then type out that entire command (or have to remember it), if I enter the first few letters {{Command{fai}}} and then press ~Page-Up, the shell will search backward in my command history and bring me right to it.  If I used the up arrow, I'd first have to scroll through the hundreds of commands I may have entered since then.


!! Copy/Paste

In putty and most other terminal emulators, highlighting text with the mouse will copy it to the clipboard.  Clicking the right mouse button will paste text from the clipboard into the terminal at the position of the cursor.  If you are connecting from a Linux host like Kali instead of Windows, clicking the middle mouse button or scroll wheel will paste text to the terminal.  Shift-Insert will also paste text from the clipboard into the terminal.

// //''Name:'' Calendar plugin
// //''Version:'' 0.1.0
// //''Author:'' SteveRumsby

// //''Syntax:''
// //<< {{{listTags tag //sort// //prefix//}}} >>

// //''Description:''
// //Generate a list of tiddlers tagged with the given tag.
// //If both //sort// and //prefix// are omitted the list is sorted in increasing order of title, with one tiddler per line.
// //If //sort// is specified the list is sorted in increasing order of the given tiddler property. Possible properties are: title. modified, modifier.
// //If //prefix// is specified the given string is inserted before the tiddler title. The insertion happens before the text is wikified. This can be used to generated bulleted or numbered lists.

// //''Examples:''
// //<< {{{listTags usage}}} >> - generate a plain list of all tiddlers tagged with tag //usage//, sorted by title
// //<< {{{listTags usage modified}}} >> - the same list, with most recently modified tiddlers last
// //<< {{{listTags usage title #}}} >> - generate a numbered list if tiddlers tagged with //usage//, sorted by title

// //''Code section:''
version.extensions.listTags = {major: 0, minor: 1, revision: 0, date: new Date(2005, 6,16)};

config.macros.listTags = {
text: "Hello"
};

config.macros.listTags.handler = function(place,macroName,params)
{
 var tagged = store.getTaggedTiddlers(params[0], params[1]);
 var string = "";
 for(var r=0;r<tagged.length;r++)
 {
 if(params[2]) string = string + params[2] + " ";
 string = string + "[[" + tagged[r].title + "]]\n";
 }
 wikify(string, place, null, null);
}
&nbsp; <<defaultHome>>  [[Notebook]]  [[Virtual Machines]]  [[Outline]]  [[Calendar]]

<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="1572" height="724" poster="" data-setup="{}">
    <source src="video/naemon.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
/***
''NestedSlidersPlugin for TiddlyWiki version 1.2.x and 2.0''
^^author: Eric Shulman
source: http://www.TiddlyTools.com/#NestedSlidersPlugin
license: [[Creative Commons Attribution-ShareAlike 2.5 License|http://creativecommons.org/licenses/by-sa/2.5/]]^^

Quickly make any tiddler content into an expandable 'slider' panel, without needing to create a separate tiddler to contain the slider content.  Optional syntax allows ''default to open'', ''custom button label/tooltip'' and ''automatic blockquote formatting.''

You can also 'nest' these sliders as deep as you like (see complex nesting example below), so that expandable 'tree-like' hierarchical displays can be created.  This is most useful when converting existing in-line text content to create in-line annotations, footnotes, context-sensitive help, or other subordinate information displays.

For more details, please click on a section headline below:
++++!!!!![Configuration]>
Debugging messages for 'lazy sliders' deferred rendering:
<<option chkDebugLazySliderDefer>> show debugging alert when deferring slider rendering
<<option chkDebugLazySliderRender>> show debugging alert when deferred slider is actually rendered
===
++++!!!!![Usage]>
When installed, this plugin adds new wiki syntax for embedding 'slider' panels directly into tiddler content.  Use {{{+++}}} and {{{===}}} to delimit the slider content.  Additional optional syntax elements let you specify
*default to open
*cookiename
*heading level
*floater (with optional CSS width value)
*mouse auto rollover
*custom label/tooltip/accesskey
*automatic blockquote
*deferred rendering
The complete syntax, using all options, is:
//{{{
++++(cookiename)!!!!!^width^*[label=key|tooltip]>...
content goes here
===
//}}}
where:
* {{{+++}}} (or {{{++++}}}) and {{{===}}}^^
marks the start and end of the slider definition, respectively.  When the extra {{{+}}} is used, the slider will be open when initially displayed.^^
* {{{(cookiename)}}}^^
saves the slider opened/closed state, and restores this state whenever the slider is re-rendered.^^
* {{{!}}} through {{{!!!!!}}}^^
displays the slider label using a formatted headline (Hn) style instead of a button/link style^^
* {{{^width^}}} (or just {{{^}}})^^
makes the slider 'float' on top of other content rather than shifting that content downward.  'width' must be a valid CSS value (e.g., "30em", "180px", "50%", etc.).  If omitted, the default width is "auto" (i.e., fit to content)^^
* {{{*}}}^^
automatically opens/closes slider on "rollover" as well as when clicked^^
* {{{[label=key|tooltip]}}}^^
uses custom label/tooltip/accesskey.  {{{=key}}} and {{{|tooltip}}} are optional.  'key' is must be a ''single letter only''.  Default labels/tootips are: ">" (more) and "<" (less), with no default access key assignment.^^
* {{{">"}}} //(without the quotes)//^^
automatically adds blockquote formatting to slider content^^
* {{{"..."}}} //(without the quotes)//^^
defers rendering of closed sliders until the first time they are opened.  //Note: deferred rendering may produce unexpected results in some cases.  Use with care.//^^

//Note: to make slider definitions easier to read and recognize when editing a tiddler, newlines immediately following the {{{+++}}} 'start slider' or preceding the {{{===}}} 'end slider' sequence are automatically supressed so that excess whitespace is eliminated from the output.//
===
++++!!!!![Examples]>
simple in-line slider: 
{{{
+++
   content
===
}}}
+++
   content
===
----
use a custom label and tooltip: 
{{{
+++[label|tooltip]
   content
===
}}}
+++[label|tooltip]
   content
===
----
content automatically blockquoted: 
{{{
+++>
   content
===
}}}
+++>
   content
===
----
all options combined //(default open, cookie, heading, sized floater, rollover, label/tooltip/key, blockquoted, deferred)//
{{{
++++(testcookie)!!!^30em^*[label=Z|click or press Alt-Z to open]>...
   content
===
}}}
++++(testcookie)!!!^30em^*[label=Z|click or press Alt-Z to open]>...
   content
===
----
complex nesting example:
{{{
+++^[get info...=I|click for information or press Alt-I]
   put some general information here, plus a floating slider with more specific info:
   +++^10em^[view details...|click for details]
      put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
   ===
===
}}}
+++^[get info...=I|click for information or press Alt-I]
   put some general information here, plus a floating slider with more specific info:
   +++^10em^[view details...|click for details]
      put some detail here, which could include a rollover with a +++^25em^*[glossary definition]explaining technical terms===
   ===
===
----
nested floaters
>menu: <<tiddler NestedSlidersExample>>
(see [[NestedSlidersExample]] for definition)
----
===
!!!!!Installation
<<<
import (or copy/paste) the following tiddlers into your document:
''NestedSlidersPlugin'' (tagged with <<tag systemConfig>>)
<<<
!!!!!Revision History
<<<
''2006.05.11 - 1.9.0'' added optional '^width^' syntax for floating sliders and '=key' syntax for setting an access key on a slider label
''2006.05.09 - 1.8.0'' in onClickNestedSlider(), when showing panel, set focus to first child input/textarea/select element
''2006.04.24 - 1.7.8'' in adjustSliderPos(), if floating panel is contained inside another floating panel, subtract offset of containing panel to find correct position
''2006.02.16 - 1.7.7'' corrected deferred rendering to account for use-case where show/hide state is tracked in a cookie
''2006.02.15 - 1.7.6'' in adjustSliderPos(), ensure that floating panel is positioned completely within the browser window (i.e., does not go beyond the right edge of the browser window)
''2006.02.04 - 1.7.5'' add 'var' to unintended global variable declarations to avoid FireFox 1.5.0.1 crash bug when assigning to globals
''2006.01.18 - 1.7.4'' only define adjustSliderPos() function if it has not already been provided by another plugin.  This lets other plugins 'hijack' the function even when they are loaded first.
''2006.01.16 - 1.7.3'' added adjustSliderPos(place,btn,panel,panelClass) function to permit specialized logic for placement of floating panels.  While it provides improved placement for many uses of floating panels, it exhibits a relative offset positioning error when used within *nested* floating panels.  Short-term workaround is to only adjust the position for 'top-level' floaters.
''2006.01.16 - 1.7.2'' added button property to slider panel elements so that slider panel can tell which button it belongs to.  Also, re-activated and corrected animation handling so that nested sliders aren't clipped by hijacking Slider.prototype.stop so that "overflow:hidden" can be reset to "overflow:visible" after animation ends
''2006.01.14 - 1.7.1'' added optional "^" syntax for floating panels.  Defines new CSS class, ".floatingPanel", as an alternative for standard in-line ".sliderPanel" styles.
''2006.01.14 - 1.7.0'' added optional "*" syntax for rollover handling to show/hide slider without requiring a click (Based on a suggestion by tw4efl)
''2006.01.03 - 1.6.2'' When using optional "!" heading style, instead of creating a clickable "Hn" element, create an "A" element inside the "Hn" element.  (allows click-through in SlideShowPlugin, which captures nearly all click events, except for hyperlinks)
''2005.12.15 - 1.6.1'' added optional "..." syntax to invoke deferred ('lazy') rendering for initially hidden sliders
removed checkbox option for 'global' application of lazy sliders
''2005.11.25 - 1.6.0'' added optional handling for 'lazy sliders' (deferred rendering for initially hidden sliders)
''2005.11.21 - 1.5.1'' revised regular expressions: if present, a single newline //preceding// and/or //following// a slider definition will be suppressed so start/end syntax can be place on separate lines in the tiddler 'source' for improved readability.  Similarly, any whitespace (newlines, tabs, spaces, etc.) trailing the 'start slider' syntax or preceding the 'end slider' syntax is also suppressed.
''2005.11.20 - 1.5.0'' added (cookiename) syntax for optional tracking and restoring of slider open/close state
''2005.11.11 - 1.4.0'' added !!!!! syntax to render slider label as a header (Hn) style instead of a button/link style
''2005.11.07 - 1.3.0'' removed alternative syntax {{{(((}}} and {{{)))}}} (so they can be used by other
formatting extensions) and simplified/improved regular expressions to trim multiple excess newlines
''2005.11.05 - 1.2.1'' changed name to NestedSlidersPlugin
more documentation
''2005.11.04 - 1.2.0'' added alternative character-mode syntax {{{(((}}} and {{{)))}}}
tweaked "eat newlines" logic for line-mode {{{+++}}} and {{{===}}} syntax
''2005.11.03 - 1.1.1'' fixed toggling of default tooltips ("more..." and "less...") when a non-default button label is used
code cleanup, added documentation
''2005.11.03 - 1.1.0'' changed delimiter syntax from {{{(((}}} and {{{)))}}} to {{{+++}}} and {{{===}}}
changed name to EasySlidersPlugin
''2005.11.03 - 1.0.0'' initial public release
<<<
!!!!!Credits
<<<
This feature was implemented by EricShulman from [[ELS Design Studios|http:/www.elsdesign.com]] with initial research and suggestions from RodneyGomes, GeoffSlocock, and PaulPetterson.
<<<
!!!!!Code
***/
//{{{
version.extensions.nestedSliders = {major: 1, minor: 9, revision: 0, date: new Date(2006,5,11)};
//}}}

//{{{
// options for deferred rendering of sliders that are not initially displayed
if (config.options.chkDebugLazySliderDefer==undefined) config.options.chkDebugLazySliderDefer=false;
if (config.options.chkDebugLazySliderRender==undefined) config.options.chkDebugLazySliderRender=false;

// default styles for 'floating' class
setStylesheet(".floatingPanel { position:absolute; z-index:10; padding:0.5em; margin:0em; \
	background-color:#eee; color:#000; border:1px solid #000; text-align:left; }","floatingPanelStylesheet");
//}}}

//{{{
config.formatters.push( {
	name: "nestedSliders",
	match: "\\n?\\+{3}",
	terminator: "\\s*\\={3}\\n?",
	lookahead: "\\n?\\+{3}(\\+)?(\\([^\\)]*\\))?(\\!*)?(\\^(?:[^\\^\\*\\[\\>]*\\^)?)?(\\*)?(\\[[^\\]]*\\])?(\\>)?(\\.\\.\\.)?\\s*",
	handler: function(w)
		{
			var lookaheadRegExp = new RegExp(this.lookahead,"mg");
			lookaheadRegExp.lastIndex = w.matchStart;
			var lookaheadMatch = lookaheadRegExp.exec(w.source)
			if(lookaheadMatch && lookaheadMatch.index == w.matchStart)
			{
				// location for rendering button and panel
				var place=w.output;

				// default to closed, no cookie, no accesskey
				var show="none"; var title=">"; var tooltip="show"; var cookie=""; var key="";

				// extra "+", default to open
				if (lookaheadMatch[1])
					{ show="block"; title="<"; tooltip="hide"; }

				// cookie, use saved open/closed state
				if (lookaheadMatch[2]) {
					cookie=lookaheadMatch[2].trim().slice(1,-1);
					cookie="chkSlider"+cookie;
					if (config.options[cookie]==undefined)
						{ config.options[cookie] = (show=="block") }
					if (config.options[cookie])
						{ show="block"; title="<"; tooltip="hide"; }
					else
						{ show="none"; title=">"; tooltip="show"; }
				}

				// parse custom label/tooltip/accesskey: [label=X|tooltip]
				if (lookaheadMatch[6]) {
					title = lookaheadMatch[6].trim().slice(1,-1);
					var pos=title.indexOf("|");
					if (pos!=-1) { tooltip = title.substr(pos+1,title.length); title=title.substr(0,pos); }
					if (title.substr(title.length-2,1)=="=") { key=title.substr(title.length-1,1); title=title.slice(0,-2); }
					if (pos==-1) tooltip += " "+title; // default tooltip: "show/hide <title>"
				}

				// create the button
				if (lookaheadMatch[3]) { // use "Hn" header format instead of button/link
					var lvl=(lookaheadMatch[3].length>6)?6:lookaheadMatch[3].length;
					var btn = createTiddlyElement(createTiddlyElement(place,"h"+lvl,null,null,null),"a",null,null,title);
					btn.onclick=onClickNestedSlider;
					btn.setAttribute("href","javascript:;");
					btn.setAttribute("title",tooltip);
				}
				else
					var btn = createTiddlyButton(place,title,tooltip,onClickNestedSlider);
				btn.sliderCookie = cookie; // save the cookiename (if any) in the button object
				btn.keyparam=key; // save the access key letter ("" if none)
				if (key.length) {
					btn.setAttribute("accessKey",key); // init access key
					btn.onfocus=function(){this.setAttribute("accessKey",this.keyparam);}; // **reclaim** access key on focus
				}

				// "non-click" MouseOver open/close slider
				if (lookaheadMatch[5]) btn.onmouseover=onClickNestedSlider;

				// create slider panel
				var panelClass=lookaheadMatch[4]?"floatingPanel":"sliderPanel";
				var panel=createTiddlyElement(place,"div",null,panelClass,null);
				panel.style.display = show;
				if (lookaheadMatch[4] && lookaheadMatch[4].length>2) panel.style.width=lookaheadMatch[4].slice(1,-1); // custom width
				panel.button = btn; // so the slider panel know which button it belongs to
				btn.sliderPanel=panel;

				// render slider (or defer until shown) 
				w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length;
				if ((show=="block")||!lookaheadMatch[8]) {
					// render now if panel is supposed to be shown or NOT deferred rendering
					w.subWikify(lookaheadMatch[7]?createTiddlyElement(panel,"blockquote"):panel,this.terminator);
					// align slider/floater position with button
					adjustSliderPos(place,btn,panel,panelClass);
				}
				else {
					var src = w.source.substr(w.nextMatch);
					var endpos=findMatchingDelimiter(src,"+++","===");
					panel.setAttribute("raw",src.substr(0,endpos));
					panel.setAttribute("blockquote",lookaheadMatch[7]?"true":"false");
					panel.setAttribute("rendered","false");
					w.nextMatch += endpos+3;
					if (w.source.substr(w.nextMatch,1)=="\n") w.nextMatch++;
					if (config.options.chkDebugLazySliderDefer) alert("deferred '"+title+"':\n\n"+panel.getAttribute("raw"));
				}
			}
		}
	}
)

// TBD: ignore 'quoted' delimiters (e.g., "{{{+++foo===}}}" isn't really a slider)
function findMatchingDelimiter(src,starttext,endtext) {
	var startpos = 0;
	var endpos = src.indexOf(endtext);
	// check for nested delimiters
	while (src.substring(startpos,endpos-1).indexOf(starttext)!=-1) {
		// count number of nested 'starts'
		var startcount=0;
		var temp = src.substring(startpos,endpos-1);
		var pos=temp.indexOf(starttext);
		while (pos!=-1)  { startcount++; pos=temp.indexOf(starttext,pos+starttext.length); }
		// set up to check for additional 'starts' after adjusting endpos
		startpos=endpos+endtext.length;
		// find endpos for corresponding number of matching 'ends'
		while (startcount && endpos!=-1) {
			endpos = src.indexOf(endtext,endpos+endtext.length);
			startcount--;
		}
	}
	return (endpos==-1)?src.length:endpos;
}
//}}}

//{{{
window.onClickNestedSlider=function(e)
{
	if (!e) var e = window.event;
	var theTarget = resolveTarget(e);
	var theLabel = theTarget.firstChild.data;
	var theSlider = theTarget.sliderPanel
	var isOpen = theSlider.style.display!="none";
	// if using default button labels, toggle labels
	if (theLabel==">") theTarget.firstChild.data = "<";
	else if (theLabel=="<") theTarget.firstChild.data = ">";
	// if using default tooltips, toggle tooltips
	if (theTarget.getAttribute("title")=="show")
		theTarget.setAttribute("title","hide");
	else if (theTarget.getAttribute("title")=="hide")
		theTarget.setAttribute("title","show");
	if (theTarget.getAttribute("title")=="show "+theLabel)
		theTarget.setAttribute("title","hide "+theLabel);
	else if (theTarget.getAttribute("title")=="hide "+theLabel)
		theTarget.setAttribute("title","show "+theLabel);
	// deferred rendering (if needed)
	if (theSlider.getAttribute("rendered")=="false") {
		if (config.options.chkDebugLazySliderRender)
			alert("rendering '"+theLabel+"':\n\n"+theSlider.getAttribute("raw"));
		var place=theSlider;
		if (theSlider.getAttribute("blockquote")=="true")
			place=createTiddlyElement(place,"blockquote");
		wikify(theSlider.getAttribute("raw"),place);
		theSlider.setAttribute("rendered","true");
	}
	// show/hide the slider
	if(config.options.chkAnimate)
		anim.startAnimating(new Slider(theSlider,!isOpen,e.shiftKey || e.altKey,"none"));
	else
		theSlider.style.display = isOpen ? "none" : "block";
	// if showing panel, set focus to first 'focus-able' element in panel
	if (theSlider.style.display!="none") {
		var ctrls=theSlider.getElementsByTagName("*");
		for (var c=0; c<ctrls.length; c++) {
			var t=ctrls[c].tagName.toLowerCase();
			if (t=="input" || t=="textarea" || t=="select")
				{ ctrls[c].focus(); break; }
		}
	}
	if (this.sliderCookie && this.sliderCookie.length)
		{ config.options[this.sliderCookie]=!isOpen; saveOptionCookie(this.sliderCookie); }
	// align slider/floater position with target button
	adjustSliderPos(theSlider.parentNode,theTarget,theSlider,theSlider.className);
	return false;
}

// hijack animation handler 'stop' handler so overflow is visible after animation has completed
Slider.prototype.coreStop = Slider.prototype.stop;
Slider.prototype.stop = function() { this.coreStop(); this.element.style.overflow = "visible"; }

// adjust panel position based on button position
if (window.adjustSliderPos==undefined) window.adjustSliderPos=function(place,btn,panel,panelClass) {
	if (panelClass=="floatingPanel") {
		var left=0;
		var top=btn.offsetHeight; 
		if (place.style.position!="relative") {
			var left=findPosX(btn);
			var top=findPosY(btn)+btn.offsetHeight;
			var p=place; while (p && p.className!='floatingPanel') p=p.parentNode;
			if (p) { left-=findPosX(p); top-=findPosY(p); }
		}
		if (left+panel.offsetWidth > getWindowWidth()) left=getWindowWidth()-panel.offsetWidth-10;
		panel.style.left=left+"px"; panel.style.top=top+"px";
	}
}

function getWindowWidth() {
	if(document.width!=undefined)
		return document.width; // moz (FF)
	if(document.documentElement && ( document.documentElement.clientWidth || document.documentElement.clientHeight ) )
		return document.documentElement.clientWidth; // IE6
	if(document.body && ( document.body.clientWidth || document.body.clientHeight ) )
		return document.body.clientWidth; // IE4
	if(window.innerWidth!=undefined)
		return window.innerWidth; // IE - general
	return 0; // unknown
}
//}}}
* [[Class Syllabus|syllabus/NCS205Syllabus2209.pdf]]
* [[General SOPs]]
* [[Lab Assignments]]
* [[Class Participation]]
** [[Using Discord]]
** [[Using Blackboard]]
* [[Shell script submission requirements]]
/% * [[Shell scripting best practices]] %/

* [[Material Sections]]

Other helpful material to make things easier:
* [[Working more efficiently with GNU screen & SSH keys]]
* [[Tunnels & Proxies with SSH]]


!!Handouts
[[Command line summary handout|handouts/UnixCommandSummary.pdf]]
[[Substitution Handout|handouts/SubstitutionHandout.pdf]] (from tcsh man page)
[[ASCII Chart|handouts/ascii-chart.gif]]
[[Shell Metacharacter Table|handouts/ShellMetacharacterTable.pdf]]
[[Regular expression metacharacters]]
* [[Metacharacter Handout|handouts/Metacharacters.pdf]] - Metacharacters and how they differ in the shell & regular expression contexts.
[[vi diagram handout|handouts/viDiagram.pdf]]
[[awk handout|handouts/awkHandout.pdf]]


!!Reference Material
[[Class technology stack]] - Mostly my notes for setting up our class servers
[[UNIX in a Nutshell|http://books.google.com/books?id=YkNiiLupct4C&dq=unix+in+a+nutshell&printsec=frontcover&source=bn&hl=en&ei=aKlWS43lJJCOlQeW3rSCBA&sa=X&oi=book_result&ct=result&resnum=5&ved=0CCIQ6AEwBA#v=onepage&q=&f=false]] - Google books
[[The Linux Command Line (No Starch Press)|http://www.merantn.net/reference/TLCL-19.01.pdf]]
[[UNIX Toolbox|http://www.cs.sunyit.edu/~merantn/docs/unixtoolbox.dognet.xhtml]]
[[Shell scripting notes]]
[[Table of Commands]]
[[Linux Shortcuts]]
/***
|Name|OpenTopPlugin|
|Created by|SaqImtiaz|
|Location|http://lewcid.googlepages.com/lewcid.html#OpenTopPlugin|
|Version|0.1|
|Requires|~TW2.x|
!!!Description:
Open new tiddlers at the top of the screen.

!!!Code
***/
//{{{
Story.prototype.coreLewcidDisplayTiddler=Story.prototype.displayTiddler ;
Story.prototype.displayTiddler =
function(srcElement,title,template,unused1,unused2,animate,slowly)
{
       var srcElement=null;
       if (document.getElementById(this.idPrefix + title))
          {story.closeTiddler(title);}
       this.coreLewcidDisplayTiddler(srcElement,title,template,unused1,unused2,animate,slowly);
       window.scrollTo(0,0);
}
//}}}
<<option chkSaveBackups>> SaveBackups
<<option chkAutoSave>> AutoSave
<<option chkRegExpSearch>> RegExpSearch
<<option chkCaseSensitiveSearch>> CaseSensitiveSearch
<<option chkAnimate>> EnableAnimations
----
Also see AdvancedOptions
[[Week 1, Part 2]] - The Filesystem
* Read Chapter 2 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] - Navigation
* Complete [[Lab 2|labs/Lab2.pdf]]

[[Week 1, Part 1]] - Unix Intro
* Read Chapter 1 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] - What is the Shell?
* Complete [[Lab 1|labs/Lab1.pdf]]
<!--{{{-->
<div class='header' macro='gradient vert #000 #069'>
<div id='topTitle' class='headerShadow'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div id='topTitle' class='headerForeground'>
<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;&nbsp;&nbsp;
<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
</div>
<div id='topMenu' refresh='content' tiddler='MainMenu'></div>
<div id='rightMenu' refresh='content' tiddler='RightMenu'></div>
</div>
<div id='sidebar'>
<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
</div>
<div id='displayArea'>
<div id='messageArea'></div>
<div id='tiddlerDisplay'></div>
</div>
<!--}}}-->
function onClickDefaultHome(e) {
story.closeAllTiddlers();
config.options.txtDefaultTiddlers = "";
saveOptionCookie('txtDefaultTiddlers');
var start = store.getTiddlerText("DefaultTiddlers");
if(start)
story.displayTiddlers(null,start.readBracketedList());
}

config.macros["defaultHome"] = {label: "Home", prompt: "Show the default tiddlers", title: "Home"};
config.macros.defaultHome.handler = function(place) {
createTiddlyButton(place,this.label,this.prompt,onClickDefaultHome);

}
config.macros.listTags = { text: "Hello" };
config.macros.listTags.handler = function(place,macroName,params)
{
 var tagged = store.getTaggedTiddlers(params[0]);
 var ul = createTiddlyElement(place,"ul",null,null,"");
 for(var r=0;r<tagged.length;r++)
 {
 var li = createTiddlyElement(ul,"li",null,null,"");
 createTiddlyLink(li,tagged[r].title,true);
 }
}
Type the text for 'Plugins'
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="466" height="448" poster="" data-setup="{}">
    <source src="video/PuttyProxy.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
/***
|Name:|QuickOpenTagPlugin|
|Description:|Changes tag links to make it easier to open tags as tiddlers|
|Version:|3.0.1 ($Rev: 3861 $)|
|Date:|$Date: 2008-03-08 10:53:09 +1000 (Sat, 08 Mar 2008) $|
|Source:|http://mptw.tiddlyspot.com/#QuickOpenTagPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License:|http://mptw.tiddlyspot.com/#TheBSDLicense|
***/
//{{{
config.quickOpenTag = {

	dropdownChar: (document.all ? "\u25bc" : "\u25be"), // the little one doesn't work in IE?

	createTagButton: function(place,tag,excludeTiddler) {
		// little hack so we can do this: <<tag PrettyTagName|RealTagName>>
		var splitTag = tag.split("|");
		var pretty = tag;
		if (splitTag.length == 2) {
			tag = splitTag[1];
			pretty = splitTag[0];
		}

		var sp = createTiddlyElement(place,"span",null,"quickopentag");
		createTiddlyText(createTiddlyLink(sp,tag,false),pretty);

		var theTag = createTiddlyButton(sp,config.quickOpenTag.dropdownChar,
                        config.views.wikified.tag.tooltip.format([tag]),onClickTag);
		theTag.setAttribute("tag",tag);
		if (excludeTiddler)
			theTag.setAttribute("tiddler",excludeTiddler);
    		return(theTag);
	},

	miniTagHandler: function(place,macroName,params,wikifier,paramString,tiddler) {
		var tagged = store.getTaggedTiddlers(tiddler.title);
		if (tagged.length > 0) {
			var theTag = createTiddlyButton(place,config.quickOpenTag.dropdownChar,
                        	config.views.wikified.tag.tooltip.format([tiddler.title]),onClickTag);
			theTag.setAttribute("tag",tiddler.title);
			theTag.className = "miniTag";
		}
	},

	allTagsHandler: function(place,macroName,params) {
		var tags = store.getTags(params[0]);
		var filter = params[1]; // new feature
		var ul = createTiddlyElement(place,"ul");
		if(tags.length == 0)
			createTiddlyElement(ul,"li",null,"listTitle",this.noTags);
		for(var t=0; t<tags.length; t++) {
			var title = tags[t][0];
			if (!filter || (title.match(new RegExp('^'+filter)))) {
				var info = getTiddlyLinkInfo(title);
				var theListItem =createTiddlyElement(ul,"li");
				var theLink = createTiddlyLink(theListItem,tags[t][0],true);
				var theCount = " (" + tags[t][1] + ")";
				theLink.appendChild(document.createTextNode(theCount));
				var theDropDownBtn = createTiddlyButton(theListItem," " +
					config.quickOpenTag.dropdownChar,this.tooltip.format([tags[t][0]]),onClickTag);
				theDropDownBtn.setAttribute("tag",tags[t][0]);
			}
		}
	},

	// todo fix these up a bit
	styles: [
"/*{{{*/",
"/* created by QuickOpenTagPlugin */",
".tagglyTagged .quickopentag, .tagged .quickopentag ",
"	{ margin-right:1.2em; border:1px solid #eee; padding:2px; padding-right:0px; padding-left:1px; }",
".quickopentag .tiddlyLink { padding:2px; padding-left:3px; }",
".quickopentag a.button { padding:1px; padding-left:2px; padding-right:2px;}",
"/* extra specificity to make it work right */",
"#displayArea .viewer .quickopentag a.button, ",
"#displayArea .viewer .quickopentag a.tiddyLink, ",
"#mainMenu .quickopentag a.tiddyLink, ",
"#mainMenu .quickopentag a.tiddyLink ",
"	{ border:0px solid black; }",
"#displayArea .viewer .quickopentag a.button, ",
"#mainMenu .quickopentag a.button ",
"	{ margin-left:0px; padding-left:2px; }",
"#displayArea .viewer .quickopentag a.tiddlyLink, ",
"#mainMenu .quickopentag a.tiddlyLink ",
"	{ margin-right:0px; padding-right:0px; padding-left:0px; margin-left:0px; }",
"a.miniTag {font-size:150%;} ",
"#mainMenu .quickopentag a.button ",
"	/* looks better in right justified main menus */",
"	{ margin-left:0px; padding-left:2px; margin-right:0px; padding-right:0px; }",
"#topMenu .quickopentag { padding:0px; margin:0px; border:0px; }",
"#topMenu .quickopentag .tiddlyLink { padding-right:1px; margin-right:0px; }",
"#topMenu .quickopentag .button { padding-left:1px; margin-left:0px; border:0px; }",
"/*}}}*/",
		""].join("\n"),

	init: function() {
		// we fully replace these builtins. can't hijack them easily
		window.createTagButton = this.createTagButton;
		config.macros.allTags.handler = this.allTagsHandler;
		config.macros.miniTag = { handler: this.miniTagHandler };
		config.shadowTiddlers["QuickOpenTagStyles"] = this.styles;
		store.addNotification("QuickOpenTagStyles",refreshStyles);
	}
}

config.quickOpenTag.init();

//}}}

| !Symbol | !Meaning | !Escape | !Not supported by |
| ^ |Start of line| | |
| $ |End of line| | |
| [ ] |Character Classes (match any one character listed) | | |
|~|Characters may be specified singly or in ranges| | |
| [^ ] |Negated character class (match any one character not listed| | |
| ? |Optional item.  Match 0 or 1. | | sed |
| ( ) |Alternation (match any one of the sub-expressions)| | |
|~|Grouping| | |
|~|Capture backreference Access with \//n//| * | |
| {{{|}}} |Or.  Match either expression it separates.  Use with ( )| | |
| . |Any single character| | |
| + |Repetition:  1 or more. | | sed |
| * |Repetition: 0 or more| | |
| { } |Defined range of matches (bounds) {//min//,//max//} or {//min//,} or {//exactly//}| * | |
| \ |Suppress normal behavior of a metacharacter| | |
|~|Access a backreference:  \//n//| | |
| \< |Match start of word.| * | bsd sed |
| \> |Match end of word.| * | bsd sed |


| !Symbol | !File Globbing   | !Regex | !Regex Equivalent |
| ? |Exactly 1|0 or 1| . |
| { } |Sets|# of matches| ( ) |
/***
|Name:|RenameTagsPlugin|
|Description:|Allows you to easily rename or delete tags across multiple tiddlers|
|Version:|3.0 ($Rev: 5501 $)|
|Date:|$Date: 2008-06-10 23:11:55 +1000 (Tue, 10 Jun 2008) $|
|Source:|http://mptw.tiddlyspot.com/#RenameTagsPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License|http://mptw.tiddlyspot.com/#TheBSDLicense|
Rename a tag and you will be prompted to rename it in all its tagged tiddlers.
***/
//{{{
config.renameTags = {

	prompts: {
		rename: "Rename the tag '%0' to '%1' in %2 tidder%3?",
		remove: "Remove the tag '%0' from %1 tidder%2?"
	},

	removeTag: function(tag,tiddlers) {
		store.suspendNotifications();
		for (var i=0;i<tiddlers.length;i++) {
			store.setTiddlerTag(tiddlers[i].title,false,tag);
		}
		store.resumeNotifications();
		store.notifyAll();
	},

	renameTag: function(oldTag,newTag,tiddlers) {
		store.suspendNotifications();
		for (var i=0;i<tiddlers.length;i++) {
			store.setTiddlerTag(tiddlers[i].title,false,oldTag); // remove old
			store.setTiddlerTag(tiddlers[i].title,true,newTag);  // add new
		}
		store.resumeNotifications();
		store.notifyAll();
	},

	storeMethods: {

		saveTiddler_orig_renameTags: TiddlyWiki.prototype.saveTiddler,

		saveTiddler: function(title,newTitle,newBody,modifier,modified,tags,fields,clearChangeCount,created) {
			if (title != newTitle) {
				var tagged = this.getTaggedTiddlers(title);
				if (tagged.length > 0) {
					// then we are renaming a tag
					if (confirm(config.renameTags.prompts.rename.format([title,newTitle,tagged.length,tagged.length>1?"s":""])))
						config.renameTags.renameTag(title,newTitle,tagged);

					if (!this.tiddlerExists(title) && newBody == "")
						// dont create unwanted tiddler
						return null;
				}
			}
			return this.saveTiddler_orig_renameTags(title,newTitle,newBody,modifier,modified,tags,fields,clearChangeCount,created);
		},

		removeTiddler_orig_renameTags: TiddlyWiki.prototype.removeTiddler,

		removeTiddler: function(title) {
			var tagged = this.getTaggedTiddlers(title);
			if (tagged.length > 0)
				if (confirm(config.renameTags.prompts.remove.format([title,tagged.length,tagged.length>1?"s":""])))
					config.renameTags.removeTag(title,tagged);
			return this.removeTiddler_orig_renameTags(title);
		}

	},

	init: function() {
		merge(TiddlyWiki.prototype,this.storeMethods);
	}
}

config.renameTags.init();

//}}}

Type the text for 'Resources'
<<toggleSideBar "" "Toggle Sidebar" hide>>
<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="858" height="480" poster="" data-setup="{}">
    <source src="video/ssh.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
!! Grading

Shell scripting labs will follow a more traditional grading approach where only meeting the objectives of the script will receive a B grade, or 8.5 / 10.  A grade beyond that will require exceeding the minimum expectations.

15% of shell scripting lab grades will be reserved for style, efficiency, and completeness.  /% For example, if the  %/

| !Grade | !Quality |
| A |Exceptional - Exceeds expectations|
| B |Average - Meets expectations|
| C |Satisfactory - meets some expectations|
| D |Poor - minimally meets expectations|
| F |Does not meet minimal expectations|


!! Requirements

The following procedure must be followed for submitting shell scripting labs. Improperly submitted scripting labs will not be accepted.  

The end goal of this process is to submit a single PDF containing these three components:
&nbsp;&nbsp;''a.'' Lab assignment cover page
&nbsp;&nbsp;''b.'' Your shell scripts
&nbsp;&nbsp;''c.'' A demonstration of your scripts


''1.'' Create the directory ~/bin/. Save all lab shell scripts in this directory with the naming convention ''ncs205-lab//xx//-q//yy//.sh'' where ''//xx//'' is the lab number and ''//yy//'' is the question number. It would make things easier for you if you always use two digits for //xx// and //yy//.
I may refer to the script files if I need to execute/test any of your scripts.

''2.'' The following header must be placed at the top of each script file, immediately after the shebang:
{{{
# File name: 
# Author:
# Date Written:
# Assignment:
# Purpose:
# Description:
#
#
#
}}}

The //Purpose// field should contain a brief, one-line summary of what your script is accomplishing.  The Description field should contain more detailed information regarding how it is accomplishing that goal or any additional information helpful to understand the function of your script.


''3.'' Make use of comments throughout your script to document and convey what you're doing.
Long lines should be nicely wrapped with carriage returns. Cut long lines at about column 60. (makes it easier to read and print)
* You can escape the newline with a {{Command{''\''}}} to continue a long line of commands on the next line.  For example:
{{{
dig axfr ${user}.ncs205.net @ns1.${user}.ncs205.net | \
	grep -v ^\;  | sort | md5sum | cut -d " " -f 1
}}}

{{Note{''Note:'' The remaining two steps are for labs which are //only// scripts and do not contain input boxes}}}

''4.'' Use the script command to launch a recording shell, saving the output to {{File{~/bin/labxx.raw}}} where //xx// is the lab number. Demonstrate execution of your scripts within this recording shell.
* Execute {{Command{script ~/bin/labxx.raw}}} to start the recording shell, saving output to the filename specified as the first command line argument
* Run your scripts. Everything you type and all output will be recorded in the file {{File{~/bin/labxx.raw}}}.
* Be sure you do not have excessive errors in the recording.  Pressing the backspace key will be recorded as a separate keystroke and make your demonstration harder to read.
* Type exit to terminate the recording shell.
* If you examine the {{File{~/bin/labxx.raw}}}, you will see it contains a lot of control characters.  The {{Command{ ansifilter }}} command will remove them.
** {{Command{ ansifilter -o ~/bin/labxx.out ~/bin/labxx.raw }}}

''5.'' Create a PDF of your scripts to save to the {{File{/opt/pub/ncs205/submit/}}} directory:
* The comments below explain what's going on.  
* The first paragraph only explains the {{Command{a2ps}}} command.  The second paragraph contains the two commands you'll need to execute to submit your lab.

{{{
# a2ps is a great tool for formatting documents about to be printed or saved as a PDF.
# The following command string will gather your labs and the output from the demo of your scripts, apply some syntax 
# highlighting and document formatting, and display PostScript on STDOUT.
# The -o - option for a2ps instructs a2ps to sent its output to STDOUT instead of saving it to a file
a2ps -A fill -C -E -g -o - ~/bin/ncs205-labxx-q??.sh ~/bin/labxx.out

# PostScript is mostly the language of printers and isn't as useful on modern PCs. Instead of working with 
# native PostScript or displaying STDOUT to the screen, lets convert it to PDF and save to a file.
# Caution! Only run this command when you are ready to submit your scripts. 
# *** These are the command you will execute to submit your scripting labs ***
a2ps -A fill -C -E -g -o - ~/bin/ncs205-labxx-q??.sh ~/bin/labxx.out | ps2pdf - ~/bin/ncs205-labxx-username.pdf
# Note: The - in the above ps2pdf command instructs the command to obtain its input from STDIN.
# The next command will combine the lab assignment PDF and the PDF you just created containing your scripts, 
# saving the output to the class submit directory.  This is the PDF you are submitting for my review.
cpdf /opt/pub/ncs205/labs/labxx.pdf ~/bin/ncs205-labxx-username.pdf -o /opt/pub/ncs205/submit/ncs205-labxx-username.pdf

# Be sure to follow the standard lab naming scheme and change the xx and username to proper values
# The nice thing about using standard naming conventions is it makes everything easy to script. 
# Rather than have to search for these commands for every scripting lab you need to submit, you might as well make a dynamic script out of it.
# (Hint: This will be a future assignment)
}}}


<html>
<center>
  <video id="my-video" class="video-js" controls preload="auto" width="1000" height="662" poster="" data-setup="{}">
    <source src="video/scripts.mp4" type='video/mp4'>
    <p class="vjs-no-js">
      To view this video please enable JavaScript, and consider upgrading to a web browser that
      <a href="http://videojs.com/html5-video-support/" target="_blank">supports HTML5 video</a>
    </p>
  </video>

  <script src="https://vjs.zencdn.net/7.8.2/video.min.js"></script>
</center>
</html>
Following some general best practices will make it much easier for you to write your scripts and help ensure they're correct.  Good practices will also help others understand what your scripts are doing.  


1. Provide clarity

<<<
You may not be the only one using your script.  Others may have to look at the code to troubleshoot or make modifications later.  Or you may have to come back years later to decipher what you did and what your past self was thinking.  Good usability should be built into everything you do.  Be sure your code is well laid out and clear to follow.  If you make it so I have a hard time understanding your logic and workflow with these simple scripts when I know the objective, then others will surely have difficulty understanding more complex ones.
<<<

2. Add comments to explain what you are doing

<<<
Comments should be utilized to explain what you are doing or your methodology if the command itself does not make it clear.  Simple and obvious commands and processes may be self-documenting.  Others should have comments to explain them.  It's also helpful to cut your comments at about 60 characters so they don't wrap to the next line.

Comments should be concise and professional.  Unnecessary verbosity can cause your meaning to be lost.
<<<

{{{
# Combine lab sheet as a cover page with PDF containing shell 
# scripts, saving output to submit directory.
cpdf /opt/pub/ncs205/labs/labxx.pdf ~/bin/ncs205-labxx-username.pdf -o /opt/pub/ncs205/submit/ncs205-labxx-username.pdf
}}}

3. Cut long lines

<<<

<<<
{{{
# Combine lab sheet as a cover page with PDF containing shell 
# scripts, saving output to submit directory.
cpdf /opt/pub/ncs205/labs/labxx.pdf ~/bin/ncs205-labxx-username.pdf \
	-o /opt/pub/ncs205/submit/ncs205-labxx-username.pdf
}}}



4. Use meaningful variable names


5. Properly indent your code


6. Avoid code duplication


7. Add debugging statements

bash -x scriptname.sh

echo $mdate : $mtime : $oneday : $threeday
*Shell scripting quick reference:  http://www.math.uga.edu/~caner/08reu/shell.html
*Awk one liners:  http://www.catonmat.net/blog/wp-content/uploads/2008/09/awk1line.txt
*Sed one liners:  http://www.catonmat.net/blog/wp-content/uploads/2008/09/sed1line.txt
<<search>><<closeAll>><<collapseAll>><<expandAll>><<permaview>><<newTiddler>><<saveChanges>><<slider chkSliderOptionsPanel OptionsPanel "options »" "Change TiddlyWiki advanced options">><<slider chkSliderContents [[TabContents]] 'contents »' 'contents'>>
/*{{{*/

#sidebar {
 color: #000;
 background: transparent;
}

#sidebarOptions {
 background: #fff;
}

#sidebarOptions .button {
 color: #999;
}

#sidebarOptions .button:hover {
 color: #000;
 background: #fff;
 border-color:white;
}

#sidebarOptions .button:active {
 color: #000;
 background: #fff;
}

#sidebarOptions .sliderPanel {
 background: transparent;
}

#sidebarOptions .sliderPanel A:hover {
 color: #000;
 background: #fff;
}

#sidebarOptions .sliderPanel A:active {
 color: #000;
 background: #fff;
}

.sidebarSubHeading {
 color: #000;
}

#sidebarOptions .sliderPanel .tabSelected{
  border: 1px solid #ccc;
  background-color: #fff;
  margin: 0px;
  padding-top: 5px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius-topleft: 1em;
  -moz-border-radius-topright: 1em;}

#sidebarOptions .sliderPanel .tabUnselected{
  border:    1px solid #ccc;
  background-color: #eee;
  margin: 0px;
  padding-top: 5px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius-topleft: 1em;
  -moz-border-radius-topright: 1em;}

#sidebarTabs .tabContents .tiddlyLink:hover {
 background: #fff;
 color: #000;
}

#sidebarTabs .tabContents {
 color: #000;
}

#sidebarTabs .button {
 color: #666;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 2px solid #ccc;
  border-right:  2px solid #ccc;
}

#sidebarTabs .tabContents .button:hover {
 color: #000;
 background: #fff;
}

.tagging, .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }


/*}}}*/

Fall 2022 Course Notes
Introduction to Linux - SUNYIT NCS 205
[[HorizontalMainMenuStyles]]
[[SideBarStyles]]
[[TagglyTaggingStyles]]

/*{{{*/

body {
  background: #eee; }

h1 {font-size:2.0em; }
h2 { color: #000; background: transparent; text-decoration: underline; }
h3 { margin: 0.0em; color: #000; background: transparent; }
h4,h5 { color: #000; background: transparent; }

h1 {
        margin: 4px 0 4px 0;
	padding: 5px;
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::PrimaryPale]];
}

ul {
	margin-top: 0;
	margin-bottom: 0;
}

.headerShadow {
  padding: 1.0em; }

.headerForeground {
  padding: 1.0em; }

.selected .tagging, .selected .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }

.shadow .title {
  color: #999; }

.siteTitle {
  font-size: 2.5em; }

.siteSubtitle {
  font-size: 1.0em; }

.subtitle {
	font-size: 0.8em;
}

.tagging, .tagged {
  padding: 0.5em;
  background-color: #eee;
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  -moz-border-radius: 1em; }

.tiddler {
  border-top:    1px solid #ccc;
  border-left:   1px solid #ccc;
  border-bottom: 3px solid #ccc;
  border-right:  3px solid #ccc;
  margin: 0.5em;
  background:#fff;
  padding: 0.5em;
  -moz-border-radius: 1em; }

.title {
  color:black;
  font-size: 1.5em; }


.tabSelected{
  padding-top: 0.0em;
  padding-left: 0.5em;
  padding-right: 0.5em;
  -moz-border-radius-topleft: 0.5em;
  -moz-border-radius-topright: 0.5em;}

.tabUnselected {
  padding-top: 0.0em;
  padding-left: 0.5em;
  padding-right: 0.5em;
  -moz-border-radius-topleft: 0.5em;
  -moz-border-radius-topright: 0.5em;}

.tabContents {
  margin: 0px;
  padding-top: 0px;
  padding-bottom: 0px;
  padding-left: 2px;
  padding-right: 2px;
  -moz-border-radius: 1em; }

.viewer .listTitle {
  list-style-type: none;
}

.viewer pre {
  background-color: #f8f8ff;
  border-color: #ddf; }

#messageArea { background-color:#bde; border-color:#8ab; border-width:4px; border-style:dotted; font-size:90%; }
#messageArea .button { text-decoration:none; font-weight:bold; background:transparent; border:0px; }
#messageArea .button:hover {background: #acd;}
/*}}}*/

/*{{{*/
.Command{color: fuchsia;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;}
.Commandi{color: fuchsia;font-size: 10pt;font-family: Courier, monospace;margin-left: 20px;margin-right: 2px;}
.File{color: #4c7fbc;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px; font-weight:bold;}
.Remove{background-color: orange}
.Host{color: #0f9791;font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px; font-weight:bold;}
.Note{display:block;background-color:#e9ffdb;border:1px solid darkgreen;margin: 0 2em 0 2em;padding:5px 5px 5px 5px;}
.Warning{display:block;background-color:#ffee88; border:2px solid darkorange;margin: 0 2em 0 2em;padding:5px 5px 5px 5px;}
.Monospaced{font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;}
.Commands{background-color:#F0F0FF; font-size: 10pt;font-family: Courier, monospace;margin-left: 2px;margin-right: 2px;padding:5px 5px 5px 5px;}


/*}}}*/

 .HideSideBarButton {margin-left: 3em;}

.viewer div.centeredTable {
	text-align: center;
}

.viewer div.centeredTable table {
	margin: 0 auto;
	text-align: left;
}

.viewer table.borderless,
.viewer table.borderless * {
	border: 0;
}
/*{{{*/
body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}

a {color:[[ColorPalette::PrimaryMid]];}
a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
a img {border:0;}

h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
h1 {border-bottom:2px solid [[ColorPalette::PrimaryLight]];} */
h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}

.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}

.header {background:[[ColorPalette::PrimaryMid]];}
.headerShadow {color:[[ColorPalette::Foreground]];}
.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
.headerForeground {color:[[ColorPalette::Background]];}
.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}

.tabSelected{color:[[ColorPalette::PrimaryDark]];
	background:[[ColorPalette::TertiaryPale]];
	border-left:1px solid [[ColorPalette::TertiaryLight]];
	border-top:1px solid [[ColorPalette::TertiaryLight]];
	border-right:1px solid [[ColorPalette::TertiaryLight]];
}
.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
.tabContents .button {border:0;}

#sidebar {}
#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}

.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
	border:1px solid [[ColorPalette::PrimaryMid]];}
.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}

.wizard .notChanged {background:transparent;}
.wizard .changedLocally {background:#80ff80;}
.wizard .changedServer {background:#8080ff;}
.wizard .changedBoth {background:#ff8080;}
.wizard .notFound {background:#ffff80;}
.wizard .putToServer {background:#ff80ff;}
.wizard .gotFromServer {background:#80ffff;}

#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}

.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}

.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}

.tiddler .defaultCommand {font-weight:bold;}

.shadow .title {color:[[ColorPalette::TertiaryDark]];}

.title {color:[[ColorPalette::SecondaryDark]];}
.subtitle {color:[[ColorPalette::TertiaryDark]];}

.toolbar {color:[[ColorPalette::PrimaryMid]];}
.toolbar a {color:[[ColorPalette::TertiaryLight]];}
.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}

.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
.tagging .button, .tagged .button {border:none;}

.footer {color:[[ColorPalette::TertiaryLight]];}
.selected .footer {color:[[ColorPalette::TertiaryMid]];}

.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
.sparktick {background:[[ColorPalette::PrimaryDark]];}

.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
.lowlight {background:[[ColorPalette::TertiaryLight]];}

.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}

.imageLink, #displayArea .imageLink {background:transparent;}

.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}

.viewer .listTitle {list-style-type:none; margin-left:-2em;}
.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}

.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}

.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
.viewer code {color:[[ColorPalette::SecondaryDark]];}
.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}

.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}

.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
.editorFooter {color:[[ColorPalette::TertiaryMid]];}

#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity:60)';}
/*}}}*/
/*{{{*/
body {
	background: [[ColorPalette::Background]];
	color: [[ColorPalette::Foreground]];
}

a{
	color: [[ColorPalette::PrimaryMid]];
}

a:hover{
	background: [[ColorPalette::PrimaryMid]];
	color: [[ColorPalette::Background]];
}

a img{
	border: 0;
}

h1,h2,h3,h4,h5 {
	color: [[ColorPalette::SecondaryDark]];
	background: [[ColorPalette::PrimaryPale]];
}

.button {
	color: [[ColorPalette::PrimaryDark]];
	border: 1px solid [[ColorPalette::Background]];
}

.button:hover {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::SecondaryLight]];
	border-color: [[ColorPalette::SecondaryMid]];
}

.button:active {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::SecondaryMid]];
	border: 1px solid [[ColorPalette::SecondaryDark]];
}

.header {
	background: [[ColorPalette::PrimaryMid]];
}

.headerShadow {
	color: [[ColorPalette::Foreground]];
}

.headerShadow a {
	font-weight: normal;
	color: [[ColorPalette::Foreground]];
}

.headerForeground {
	color: [[ColorPalette::Background]];
}

.headerForeground a {
	font-weight: normal;
	color: [[ColorPalette::PrimaryPale]];
}

.tabSelected{
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::TertiaryPale]];
	border-left: 1px solid [[ColorPalette::TertiaryLight]];
	border-top: 1px solid [[ColorPalette::TertiaryLight]];
	border-right: 1px solid [[ColorPalette::TertiaryLight]];
}

.tabUnselected {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::TertiaryMid]];
}

.tabContents {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::TertiaryPale]];
	border: 1px solid [[ColorPalette::TertiaryLight]];
}

.tabContents .button {
	 border: 0;}

#sidebar {
}

#sidebarOptions input {
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel {
	background: [[ColorPalette::PrimaryPale]];
}

#sidebarOptions .sliderPanel a {
	border: none;
	color: [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel a:hover {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
}

#sidebarOptions .sliderPanel a:active {
	color: [[ColorPalette::PrimaryMid]];
	background: [[ColorPalette::Background]];
}

.wizard {
	background: [[ColorPalette::SecondaryLight]];
	border-top: 1px solid [[ColorPalette::SecondaryMid]];
	border-left: 1px solid [[ColorPalette::SecondaryMid]];
}

.wizard h1 {
	color: [[ColorPalette::SecondaryDark]];
}

.wizard h2 {
	color: [[ColorPalette::Foreground]];
}

.wizardStep {
	background: [[ColorPalette::Background]];
	border-top: 1px solid [[ColorPalette::SecondaryMid]];
	border-bottom: 1px solid [[ColorPalette::SecondaryMid]];
	border-left: 1px solid [[ColorPalette::SecondaryMid]];
}

.wizard .button {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
	border-top: 1px solid [[ColorPalette::PrimaryLight]];
	border-right: 1px solid [[ColorPalette::PrimaryDark]];
	border-bottom: 1px solid [[ColorPalette::PrimaryDark]];
	border-left: 1px solid [[ColorPalette::PrimaryLight]];
}

.wizard .button:hover {
	color: [[ColorPalette::PrimaryLight]];
	background: [[ColorPalette::PrimaryDark]];
	border-color: [[ColorPalette::PrimaryLight]];
}

.wizard .button:active {
	color: [[ColorPalette::Background]];
	background: [[ColorPalette::PrimaryMid]];
	border-top: 1px solid [[ColorPalette::PrimaryLight]];
	border-right: 1px solid [[ColorPalette::PrimaryDark]];
	border-bottom: 1px solid [[ColorPalette::PrimaryDark]];
	border-left: 1px solid [[ColorPalette::PrimaryLight]];
}

#messageArea {
	border: 1px solid [[ColorPalette::SecondaryDark]];
	background: [[ColorPalette::SecondaryMid]];
	color: [[ColorPalette::PrimaryDark]];
}

#messageArea .button {
	padding: 0.2em 0.2em 0.2em 0.2em;
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::Background]];
}

.popup {
	background: [[ColorPalette::PrimaryLight]];
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

.popup hr {
	color: [[ColorPalette::PrimaryDark]];
	background: [[ColorPalette::PrimaryDark]];
	border-bottom: 1px;
}

.popup li.disabled {
	color: [[ColorPalette::PrimaryMid]];
}

.popup li a, .popup li a:visited {
	color: [[ColorPalette::TertiaryPale]];
	border: none;
}

.popup li a:hover {
	background: [[ColorPalette::PrimaryDark]];
	color: [[ColorPalette::Background]];
	border: none;
}

.tiddler .defaultCommand {
 font-weight: bold;
}

.shadow .title {
	color: [[ColorPalette::TertiaryDark]];
}

.title {
	color: [[ColorPalette::SecondaryDark]];
}

.subtitle {
	color: [[ColorPalette::TertiaryDark]];
}

.toolbar {
	color: [[ColorPalette::PrimaryMid]];
}

.tagging, .tagged {
	border: 1px solid [[ColorPalette::TertiaryPale]];
	background-color: [[ColorPalette::TertiaryPale]];
}

.selected .tagging, .selected .tagged {
	background-color: [[ColorPalette::TertiaryLight]];
	border: 1px solid [[ColorPalette::TertiaryMid]];
}

.tagging .listTitle, .tagged .listTitle {
	color: [[ColorPalette::PrimaryDark]];
}

.tagging .button, .tagged .button {
		border: none;
}

.footer {
	color: [[ColorPalette::TertiaryLight]];
}

.selected .footer {
	color: [[ColorPalette::TertiaryMid]];
}

.sparkline {
	background: [[ColorPalette::PrimaryPale]];
	border: 0;
}

.sparktick {
	background: [[ColorPalette::PrimaryDark]];
}

.error, .errorButton {
	color: [[ColorPalette::Foreground]];
	background: [[ColorPalette::Error]];
}

.warning {
	color: [[ColorPalette::Foreground]];
	background: [[ColorPalette::SecondaryPale]];
}

.cascade {
	background: [[ColorPalette::TertiaryPale]];
	color: [[ColorPalette::TertiaryMid]];
	border: 1px solid [[ColorPalette::TertiaryMid]];
}

.imageLink, #displayArea .imageLink {
	background: transparent;
}

.viewer .listTitle {list-style-type: none; margin-left: -2em;}

.viewer .button {
	border: 1px solid [[ColorPalette::SecondaryMid]];
}

.viewer blockquote {
	border-left: 3px solid [[ColorPalette::TertiaryDark]];
}

.viewer table {
	border: 2px solid [[ColorPalette::TertiaryDark]];
}

.viewer th, thead td {
	background: [[ColorPalette::SecondaryMid]];
	border: 1px solid [[ColorPalette::TertiaryDark]];
	color: [[ColorPalette::Background]];
}

.viewer td, .viewer tr {
	border: 1px solid [[ColorPalette::TertiaryDark]];
}

.viewer pre {
	border: 1px solid [[ColorPalette::SecondaryLight]];
	background: [[ColorPalette::SecondaryPale]];
}

.viewer code {
	color: [[ColorPalette::SecondaryDark]];
}

.viewer hr {
	border: 0;
	border-top: dashed 1px [[ColorPalette::TertiaryDark]];
	color: [[ColorPalette::TertiaryDark]];
}

.highlight, .marked {
	background: [[ColorPalette::SecondaryLight]];
}

.editor input {
	border: 1px solid [[ColorPalette::PrimaryMid]];
}

.editor textarea {
	border: 1px solid [[ColorPalette::PrimaryMid]];
	width: 100%;
}

.editorFooter {
	color: [[ColorPalette::TertiaryMid]];
}

/*}}}*/
/*{{{*/
* html .tiddler {height:1%;}

body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}

h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
h4,h5,h6 {margin-top:1em;}
h1 {font-size:1.35em;}
h2 {font-size:1.25em;}
h3 {font-size:1.1em;}
h4 {font-size:1em;}
h5 {font-size:.9em;}

hr {height:1px;}

a {text-decoration:none;}

dt {font-weight:bold;}

ol {list-style-type:decimal;}
ol ol {list-style-type:lower-alpha;}
ol ol ol {list-style-type:lower-roman;}
ol ol ol ol {list-style-type:decimal;}
ol ol ol ol ol {list-style-type:lower-alpha;}
ol ol ol ol ol ol {list-style-type:lower-roman;}
ol ol ol ol ol ol ol {list-style-type:decimal;}

.txtOptionInput {width:11em;}

#contentWrapper .chkOptionInput {border:0;}

.externalLink {text-decoration:underline;}

.indent {margin-left:3em;}
.outdent {margin-left:3em; text-indent:-3em;}
code.escaped {white-space:nowrap;}

.tiddlyLinkExisting {font-weight:bold;}
.tiddlyLinkNonExisting {font-style:italic;}

/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
a.tiddlyLinkNonExisting.shadow {font-weight:bold;}

#mainMenu .tiddlyLinkExisting,
	#mainMenu .tiddlyLinkNonExisting,
	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}

.header {position:relative;}
.header a:hover {background:transparent;}
.headerShadow {position:relative; padding:4.5em 0em 1em 1em; left:-1px; top:-1px;}
.headerForeground {position:absolute; padding:4.5em 0em 1em 1em; left:0px; top:0px;}

.siteTitle {font-size:3em;}
.siteSubtitle {font-size:1.2em;}

#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}

#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
#sidebarOptions {padding-top:0.3em;}
#sidebarOptions a {margin:0em 0.2em; padding:0.2em 0.3em; display:block;}
#sidebarOptions input {margin:0.4em 0.5em;}
#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
#sidebarOptions .sliderPanel input {margin:0 0 .3em 0;}
#sidebarTabs .tabContents {width:15em; overflow:hidden;}

.wizard {padding:0.1em 1em 0em 2em;}
.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0em 0em 0em 0em; margin:0.4em 0em 0.2em 0em;}
.wizardStep {padding:1em 1em 1em 1em;}
.wizard .button {margin:0.5em 0em 0em 0em; font-size:1.2em;}
.wizardFooter {padding:0.8em 0.4em 0.8em 0em;}
.wizardFooter .status {padding:0em 0.4em 0em 0.4em; margin-left:1em;}
.wizard .button {padding:0.1em 0.2em 0.1em 0.2em;}

#messageArea {position:fixed; top:2em; right:0em; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
.messageToolbar {display:block; text-align:right; padding:0.2em 0.2em 0.2em 0.2em;}
#messageArea a {text-decoration:underline;}

.tiddlerPopupButton {padding:0.2em 0.2em 0.2em 0.2em;}
.popupTiddler {position: absolute; z-index:300; padding:1em 1em 1em 1em; margin:0;}

.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
.popup .popupMessage {padding:0.4em;}
.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0em;}
.popup li.disabled {padding:0.4em;}
.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
.listBreak {font-size:1px; line-height:1px;}
.listBreak div {margin:2px 0;}

.tabset {padding:1em 0em 0em 0.5em;}
.tab {margin:0em 0em 0em 0.25em; padding:2px;}
.tabContents {padding:0.5em;}
.tabContents ul, .tabContents ol {margin:0; padding:0;}
.txtMainTab .tabContents li {list-style:none;}
.tabContents li.listLink { margin-left:.75em;}

#contentWrapper {display:block;}
#splashScreen {display:none;}

#displayArea {margin:1em 17em 0em 14em;}

.toolbar {text-align:right; font-size:.9em;}

.tiddler {padding:1em 1em 0em 1em;}

.missing .viewer,.missing .title {font-style:italic;}

.title {font-size:1.6em; font-weight:bold;}

.missing .subtitle {display:none;}
.subtitle {font-size:1.1em;}

.tiddler .button {padding:0.2em 0.4em;}

.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
.isTag .tagging {display:block;}
.tagged {margin:0.5em; float:right;}
.tagging, .tagged {font-size:0.9em; padding:0.25em;}
.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
.tagClear {clear:both;}

.footer {font-size:.9em;}
.footer li {display:inline;}

.annotation {padding:0.5em; margin:0.5em;}

* html .viewer pre {width:99%; padding:0 0 1em 0;}
.viewer {line-height:1.4em; padding-top:0.5em;}
.viewer .button {margin:0em 0.25em; padding:0em 0.25em;}
.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}

.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
table.listView {font-size:0.85em; margin:0.8em 1.0em;}
table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}

.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
.viewer code {font-size:1.2em; line-height:1.4em;}

.editor {font-size:1.1em;}
.editor input, .editor textarea {display:block; width:100%; font:inherit;}
.editorFooter {padding:0.25em 0em; font-size:.9em;}
.editorFooter .button {padding-top:0px; padding-bottom:0px;}

.fieldsetFix {border:0; padding:0; margin:1px 0px 1px 0px;}

.sparkline {line-height:1em;}
.sparktick {outline:0;}

.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
.zoomer div {padding:1em;}

* html #backstage {width:99%;}
* html #backstageArea {width:99%;}
#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageToolbar {position:relative;}
#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em 0.3em 0.5em;}
#backstageButton {display:none; position:absolute; z-index:175; top:0em; right:0em;}
#backstageButton a {padding:0.1em 0.4em 0.1em 0.4em; margin:0.1em 0.1em 0.1em 0.1em;}
#backstage {position:relative; width:100%; z-index:50;}
#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin:0em 3em 0em 3em; padding:1em 1em 1em 1em;}
.backstagePanelFooter {padding-top:0.2em; float:right;}
.backstagePanelFooter a {padding:0.2em 0.4em 0.2em 0.4em;}
#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}

.whenBackstage {display:none;}
.backstageVisible .whenBackstage {display:block;}
/*}}}*/
/***
!Sections in this Tiddler:
*Generic rules
**Links styles
**Link Exceptions
*Header
*Main menu
*Sidebar
**Sidebar options
**Sidebar tabs
*Message area
*Popup
*Tabs
*Tiddler display
**Viewer
**Editor
*Misc. rules
!Generic Rules /%==============================================%/
***/
/*{{{*/
body {
	font-size: .75em;
	font-family: arial,helvetica;
	position: relative;
	margin: 0;
	padding: 0;
}

h1,h2,h3,h4,h5 {
	font-weight: bold;
	text-decoration: none;
	padding-left: 0.4em;
}

h1 {font-size: 1.5em;}
h2 {font-size: 1.25em;}
h3 {font-size: 1.1em;}
h4 {font-size: 1em;}
h5 {font-size: .9em;}

hr {
	height: 1px;
}

a{
	text-decoration: none;
}

ol { list-style-type: decimal }
ol ol { list-style-type: lower-alpha }
ol ol ol { list-style-type: lower-roman }
ol ol ol ol { list-style-type: decimal }
ol ol ol ol ol { list-style-type: lower-alpha }
ol ol ol ol ol ol { list-style-type: lower-roman }
ol ol ol ol ol ol ol { list-style-type: decimal }
/*}}}*/
/***
''General Link Styles'' /%-----------------------------------------------------------------------------%/
***/
/*{{{*/
.externalLink {
	text-decoration: underline;
}

/* the 'a' is required for IE, otherwise it renders the whole tiddler a bold */
a.tiddlyLinkNonExisting.shadow {
	font-weight: bold;
}
/*}}}*/
/***
''Exceptions to common link styles'' /%------------------------------------------------------------------%/
***/
/*{{{*/

#mainMenu .tiddlyLinkExisting,
#mainMenu .tiddlyLinkNonExisting,
#sidebarTabs .tiddlyLinkExisting,
#sidebarTabs .tiddlyLinkNonExisting{
 font-weight: normal;
 font-style: normal;
}

/*}}}*/
/***
!Header /%==================================================%/
***/
/*{{{*/

.header {
		position: relative;
}

.header a:hover {
	background: transparent;
}

.headerShadow {
	position: relative;
	padding: 4.5em 0em 1em 1em;
	left: -1px;
	top: -1px;
}

.headerForeground {
	position: absolute;
	padding: 4.5em 0em 1em 1em;
	left: 0px;
	top: 0px;
}

.siteTitle {
	font-size: 3em;
}

.siteSubtitle {
	font-size: 1.2em;
	padding: 0em 0em 0em 2em;
}

/*}}}*/
/***
!Main menu /%==================================================%/
***/
/*{{{*/
#mainMenu {
	position: absolute;
	left: 0;
	width: 10em;
	text-align: right;
	line-height: 1.6em;
	padding: 1.5em 0.5em 0.5em 0.5em;
	font-size: 1.1em;
}

/*}}}*/
/***
!Sidebar rules /%==================================================%/
***/
/*{{{*/
#sidebar {
	position: absolute;
	right: 3px;
	width: 16em;
	font-size: .9em;
}
/*}}}*/
/***
''Sidebar options'' /%----------------------------------------------------------------------------------%/
***/
/*{{{*/
#sidebarOptions {
	padding-top: 0.3em;
}

#sidebarOptions a {
	margin: 0em 0.2em;
	padding: 0.2em 0.3em;
	display: block;
}

#sidebarOptions input {
	margin: 0.4em 0.5em;
}

#sidebarOptions .sliderPanel {
	margin-left: 1em;
	padding: 0.5em;
	font-size: .85em;
}

#sidebarOptions .sliderPanel a {
	font-weight: bold;
	display: inline;
	padding: 0;
}

#sidebarOptions .sliderPanel input {
	margin: 0 0 .3em 0;
}
/*}}}*/
/***
''Sidebar tabs'' /%-------------------------------------------------------------------------------------%/
***/
/*{{{*/

#sidebarTabs .tabContents {
	width: 15em;
	overflow: hidden;
}

/*}}}*/
/***
!Message area /%==================================================%/
***/
/*{{{*/
#messageArea {
position:absolute; top:0; right:0; margin: 0.5em; padding: 0.5em;
}

*[id='messageArea'] {
position:fixed !important; z-index:99;}

.messageToolbar {
display: block;
text-align: right;
}

#messageArea a{
	text-decoration: underline;
}
/*}}}*/
/***
!Popup /%==================================================%/
***/
/*{{{*/
.popup {
	font-size: .9em;
	padding: 0.2em;
	list-style: none;
	margin: 0;
}

.popup hr {
	display: block;
	height: 1px;
	width: auto;
	padding: 0;
	margin: 0.2em 0em;
}

.popup li.disabled {
	padding: 0.2em;
}

.popup li a{
	display: block;
	padding: 0.2em;
}
/*}}}*/
/***
!Tabs /%==================================================%/
***/
/*{{{*/
.tabset {
	padding: 1em 0em 0em 0.5em;
}

.tab {
	margin: 0em 0em 0em 0.25em;
	padding: 2px;
}

.tabContents {
	padding: 0.5em;
}

.tabContents ul, .tabContents ol {
	margin: 0;
	padding: 0;
}

.txtMainTab .tabContents li {
	list-style: none;
}

.tabContents li.listLink {
	 margin-left: .75em;
}
/*}}}*/
/***
!Tiddler display rules /%==================================================%/
***/
/*{{{*/
#displayArea {
	margin: 1em 17em 0em 14em;
}


.toolbar {
	text-align: right;
	font-size: .9em;
	visibility: hidden;
}

.selected .toolbar {
	visibility: visible;
}

.tiddler {
	padding: 1em 1em 0em 1em;
}

.missing .viewer,.missing .title {
	font-style: italic;
}

.title {
	font-size: 1.6em;
	font-weight: bold;
}

.missing .subtitle {
 display: none;
}

.subtitle {
	font-size: 0.8em;
}

/* I'm not a fan of how button looks in tiddlers... */
.tiddler .button {
	padding: 0.2em 0.4em;
}

.tagging {
margin: 0.5em 0.5em 0.5em 0;
float: left;
display: none;
}

.isTag .tagging {
display: block;
}

.tagged {
margin: 0.5em;
float: right;
}

.tagging, .tagged {
font-size: 0.9em;
padding: 0.25em;
}

.tagging ul, .tagged ul {
list-style: none;margin: 0.25em;
padding: 0;
}

.tagClear {
clear: both;
}

.footer {
	font-size: .9em;
}

.footer li {
display: inline;
}
/***
''The viewer is where the tiddler content is displayed'' /%------------------------------------------------%/
***/
/*{{{*/
* html .viewer pre {
	width: 99%;
	padding: 0 0 1em 0;
}

.viewer {
	line-height: 1.4em;
	padding-top: 0.5em;
}

.viewer .button {
	margin: 0em 0.25em;
	padding: 0em 0.25em;
}

.viewer blockquote {
	line-height: 1.5em;
	padding-left: 0.8em;
	margin-left: 2.5em;
}

.viewer ul, .viewer ol{
	margin-left: 0.5em;
	padding-left: 1.5em;
}

.viewer table {
	border-collapse: collapse;
	margin: 0.8em 1.0em;
}

.viewer th, .viewer td, .viewer tr,.viewer caption{
	padding: 3px;
}

.viewer pre {
	padding: 0.5em;
	margin-left: 0.5em;
	font-size: 1.2em;
	line-height: 1.4em;
	overflow: auto;
}

.viewer code {
	font-size: 1.2em;
	line-height: 1.4em;
}
/*}}}*/
/***
''The editor replaces the viewer in the tiddler'' /%------------------------------------------------%/
***/
/*{{{*/
.editor {
font-size: 1.1em;
}

.editor input, .editor textarea {
	display: block;
	width: 100%;
	font: inherit;
}

.editorFooter {
	padding: 0.25em 0em;
	font-size: .9em;
}

.editorFooter .button {
padding-top: 0px; padding-bottom: 0px;}

.fieldsetFix {border: 0;
padding: 0;
margin: 1px 0px 1px 0px;
}
/*}}}*/
/***
!Misc rules /%==================================================%/
***/
/*{{{*/
.sparkline {
	line-height: 1em;
}

.sparktick {
	outline: 0;
}

.zoomer {
	font-size: 1.1em;
	position: absolute;
	padding: 1em;
}

.cascade {
	font-size: 1.1em;
	position: absolute;
	overflow: hidden;
}
/*}}}*/
/*{{{*/
@media print {
#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea, #toolbar, #topMenu, #rightMenu {display: none !important;}
#header, #headerShadow {display: none !important;}
.siteSubtitle {display: none !important;}

.siteTitle { font-size: 1.5em; }


#displayArea {margin: 1em 1em 0em;}
noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
}
/*}}}*/
Type the text for 'Styles'
<<tabs txtMainTab Timeline Timeline TabTimeline All 'All tiddlers' TabAll Tags 'All tags' TabTags More 'More lists' TabMore>>
The following command table should prove useful for this course.  This is not an extensive list of commands you will need to know / become familiar with.

| !&nbsp;Cover&nbsp; | !&nbsp;Command&nbsp; | !Description |
|>|>|bgcolor(#a0ffa0): ''Basic Commands'' |
| x | echo |output command arguments to the terminal|
| x | cd |change directories|
| x | pwd |display current working directory|
| x | ls |list files|
| x | cp |copy files|
| x | rm |remove files|
| x | mv |move files|
| x | mkdir |create directory|
| x | rmdir |remove directory|
| x | touch |create an empty file with default permissions|
| x | ln |create link|
| x | man |view man pages|
| x | chmod |set permissions for a file|
|  | chgrp |set group for a file|
|>|>|bgcolor(#a0ffa0): ''Display Text / Editors'' |
| x | less |display text output one page at a time|
|  | pico |easy to use text editor|
|  | nano |GNU clone of pico|
| x | vi |advanced unix text editor|
|  | ex |line oriented version of vi|
|  | vim |vi improved|
| x | vimtutor |learn how to use the vim editor|
|>|>|bgcolor(#a0ffa0): ''Filters'' |
| x | cat |concatenate and print files|
| x | grep |pattern matching filter|
| x | egrep |extended regular expression pattern matching filter|
| x | head |display first lines of a file|
| x | tail |display the last part of a file|
| x | cut |cut out selected portions of each line of a file|
| x | fold |fold long lines for finite width output device|
| x | sort |sort lines of text files|
| x | uniq |report or filter out repeated lines in a file|
| x | wc |word, line, character, and byte count|
| x | tr |translate characters|
|  | paste |merge lines of input|
|  | nl |line numbering filter|
| x | sed |stream editor|
| x | awk |pattern-directed scanning and processing language|
| x | tee |duplicate standard input to a file|
|  | strings |print the strings of printable characters in (binary) files|
|  | cmp |compare two files|
|  | diff |compare files line by line|
|  | comm |select or reject lines common to two files|
|>|>|bgcolor(#a0ffa0): ''System Commands'' |
| x | script |save copy of terminal session|
|  | source |read a .file|
| x | rehash |recompute hash table of where commands are located|
| x | which |scan path for a program and return its location (or definition of an alias)|
| x | df |display free disk space|
| x | du |disk usage (-s display each file, -k 1K blocks , -h = human readable|
| x | find |walk a file hierarchy in search of files|
|  | locate |find filenames quickly based on pre-generated file database|
|  | hostname |print name of current host system|
| x | uptime |show how long system has been running|
| x | uname |display information about the system|
|  | xargs |construct argument list(s) and execute utility|
|  | quota |display disk usage and limits|
|  | crontab |schedule commands for automated execution on regular intervals|
|  | at |schedule a job for later execution|
|>|>|bgcolor(#a0ffa0): ''Process Management / Job Control'' |
| x | ps |process status|
| x | top |display and update information about the top cpu processes|
| x | kill |terminate or signal a process|
| x | jobs |display all jobs|
| x | fg |continue background jobs in the foreground|
| x | bg |continue suspended job in the background|
| x | stop |suspend job running in the background|
|  | suspend |suspend the current running shell|
|>|>|bgcolor(#a0ffa0): ''User Information'' |
| x | w |display who is logged in and what they are doing|
| x | id |return user identity|
| x | groups |show group memberships|
|  | users |list usernames of current logged in users|
|  | who |display who is on the system|
|  | whoami |display effective user id|
| x | finger |user information lookup program|
| x | last |indicate last logins of users and ttys|
|>|>|bgcolor(#a0ffa0): ''Misc commands useful for shell scripting'' |
| x | clear |clear the screen|
| x | read //var// |prompt the user to enter information, saving to //var//|
| x | date |display the current date and time with optional formatting.  see strftime manpage|
| x | test |condition evaluation utility.  Linked to [  See test manpage.|
| x | expr |evaluate an expression|
| x | jot |print sequential or random numbers|
|  | sleep //n// |pause execution for //n// seconds|
|  | stat |display extended file status/information|
|  | stty |set the options for a terminal device interface|
|  | basename |return the file name portion of a path|
|  | dirname |return the directory name portion of a path|
|  | fstat |List open files or determine whether specified file is open|
| x | exit [//n//] |log out or quit a script with the option exit status of //n//|
|>|>|bgcolor(#a0ffa0): ''Networking / Communication'' |
| x | ssh |~OpenSSH SSH client|
| x | scp |secure copy (remote file copy program)|
|  | rsync |a fast, versatile, remote (and local) file-copying tool|
|  | telnet |user interface to the TELNET protocol.  also useful for testing connectivity to arbitrary ports|
|  | talk / ytalk |talk to another user|
|  | write |send a message to another user|
|  | mesg |display (do not display) messages from other users|
|  | host |DNS lookup utility|
|  | nslookup |query Internet name servers interactively|
|  | traceroute |print the route packets take to network host|
|  | ping |send ICMP ~ECHO_REQUEST packets to network hosts|
|  | lynx / links |character mode WWW browser|
|>|>|bgcolor(#a0ffa0): ''Text Formatting & Printing'' |
| x | lpr |command line print utility|
| x | lpq |print spool queue examination program|
| x | lprm |remove jobs from the line printer spooling queue|
| x | pdf2ps |Ghostscript PDF to ~PostScript translator|
| x | a2ps |format files for printing on a ~PostScript printer|
|>|>|bgcolor(#a0ffa0): ''Working with files'' |
| x | file |display file type|
| x | tar |manipulate file archive files|
| x | gzip |compression tool using ~Lempel-Ziv coding|
| x | gunzip |decompression tool using ~Lempel-Ziv coding|
| x | bzip2 |a block-sorting file compressor|
| x | bunzip2 |a block-sorting file decompressor|
|  | split |split a file into pieces|
| x | md5 / md5sum |calculate a message-digest fingerprint (checksum) for a file (freebsd / linux)|
|  | srm |securely remove files or directories|
|  | rsync |a fast, versatile, remote (and local) file-copying tool|
/***
|Name|TagglyListPlugin|
|Created by|SimonBaird|
|Location|http://simonbaird.com/mptw/#TagglyListPlugin|
|Version|1.1.2 25-Apr-06|
|Requires|See TagglyTagging|

!History
* 1.1.2 (25-Apr-2006) embedded TagglyTaggingStyles. No longer need separated tiddler for styles.
* 1.1.1 (6-Mar-2006) fixed bug with refreshAllVisible closing tiddlers being edited. Thanks Luke Blanshard.

***/

/***
!Setup and config
***/
//{{{

version.extensions.TagglyListPlugin = {
	major: 1, minor: 1, revision: 2,
	date: new Date(2006,4,25),
	source: "http://simonbaird.com/mptw/#TagglyListPlugin"
};

config.macros.tagglyList = {};
config.macros.tagglyListByTag = {};
config.macros.tagglyListControl = {};
config.macros.tagglyListWithSort = {};
config.macros.hideSomeTags = {};

// change this to your preference
config.macros.tagglyListWithSort.maxCols = 6;

config.macros.tagglyList.label = "Tagged as %0:";

// the default sort options. set these to your preference
config.macros.tagglyListWithSort.defaults = {
 sortBy:"title", // title|created|modified
 sortOrder: "asc", // asc|desc
 hideState: "show", // show|hide
 groupState: "nogroup", // nogroup|group
 numCols: 1
};

// these tags will be ignored by the grouped view
config.macros.tagglyListByTag.excludeTheseTags = [
 "systemConfig",
 "TiddlerTemplates"
];

config.macros.tagglyListControl.tags = {
 title:"sortByTitle",
 modified: "sortByModified",
 created: "sortByCreated",
 asc:"sortAsc",
 desc:"sortDesc",
 hide:"hideTagged",
 show:"showTagged",
 nogroup:"noGroupByTag",
 group:"groupByTag",
 cols1:"list1Cols",
 cols2:"list2Cols",
 cols3:"list3Cols",
 cols4:"list4Cols",
 cols5:"list5Cols",
 cols6:"list6Cols",
 cols7:"list7Cols",
 cols8:"list8Cols",
 cols9:"list9Cols"
}

// note: should match config.macros.tagglyListControl.tags
config.macros.hideSomeTags.tagsToHide = [
 "sortByTitle",
 "sortByCreated",
 "sortByModified",
 "sortDesc",
 "sortAsc",
 "hideTagged",
 "showTagged",
 "noGroupByTag",
 "groupByTag",
 "list1Cols",
 "list2Cols",
 "list3Cols",
 "list4Cols",
 "list5Cols",
 "list6Cols",
 "list7Cols",
 "list8Cols",
 "list9Cols"
];


//}}}
/***

!Utils
***/
//{{{
// from Eric
function isTagged(title,tag) {
 var t=store.getTiddler(title); if (!t) return false;
 return (t.tags.find(tag)!=null);
}

// from Eric
function toggleTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 if (t.tags.find(tag)==null) t.tags.push(tag);
 else t.tags.splice(t.tags.find(tag),1);
}

function addTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 t.tags.push(tag);
}

function removeTag(title,tag) {
 var t=store.getTiddler(title); if (!t || !t.tags) return;
 if (t.tags.find(tag)!=null) t.tags.splice(t.tags.find(tag),1);
}

// from Udo
Array.prototype.indexOf = function(item) {
 for (var i = 0; i < this.length; i++) {
 if (this[i] == item) {
 return i;
 }
 }
 return -1;
};
Array.prototype.contains = function(item) {
 return (this.indexOf(item) >= 0);
}
//}}}
/***

!tagglyList
displays a list of tagged tiddlers.
parameters are sortField and sortOrder
***/
//{{{

// not used at the moment...
function sortedListOfOtherTags(tiddler,thisTag) {
 var list = tiddler.tags.concat(); // so we are working on a clone..
 for (var i=0;i<config.macros.hideSomeTags.tagsToHide.length;i++) {
 if (list.find(config.macros.hideSomeTags.tagsToHide[i]) != null)
 list.splice(list.find(config.macros.hideSomeTags.tagsToHide[i]),1); // remove hidden ones
 }
 for (var i=0;i<config.macros.tagglyListByTag.excludeTheseTags.length;i++) {
 if (list.find(config.macros.tagglyListByTag.excludeTheseTags[i]) != null)
 list.splice(list.find(config.macros.tagglyListByTag.excludeTheseTags[i]),1); // remove excluded ones
 }
 list.splice(list.find(thisTag),1); // remove thisTag
 return '[[' + list.sort().join("]] [[") + ']]';
}

function sortHelper(a,b) {
 if (a == b) return 0;
 else if (a < b) return -1;
 else return +1;
}

config.macros.tagglyListByTag.handler = function (place,macroName,params,wikifier,paramString,tiddler) {

 var sortBy = params[0] ? params[0] : "title";
 var sortOrder = params[1] ? params[1] : "asc";

 var result = store.getTaggedTiddlers(tiddler.title,sortBy);

 if (sortOrder == "desc")
 result = result.reverse();

 var leftOvers = []
 for (var i=0;i<result.length;i++) {
 leftOvers.push(result[i].title);
 }

 var allTagsHolder = {};
 for (var i=0;i<result.length;i++) {
 for (var j=0;j<result[i].tags.length;j++) {

 if (
 result[i].tags[j] != tiddler.title // not this tiddler
 && config.macros.hideSomeTags.tagsToHide.find(result[i].tags[j]) == null // not a hidden one
 && config.macros.tagglyListByTag.excludeTheseTags.find(result[i].tags[j]) == null // not excluded
 ) {
 if (!allTagsHolder[result[i].tags[j]])
 allTagsHolder[result[i].tags[j]] = "";
 allTagsHolder[result[i].tags[j]] += "**[["+result[i].title+"]]\n";

 if (leftOvers.find(result[i].title) != null)
 leftOvers.splice(leftOvers.find(result[i].title),1); // remove from leftovers. at the end it will contain the leftovers...
 }
 }
 }


 var allTags = [];
 for (var t in allTagsHolder)
 allTags.push(t);

 allTags.sort(function(a,b) {
 var tidA = store.getTiddler(a);
 var tidB = store.getTiddler(b);
 if (sortBy == "title") return sortHelper(a,b);
 else if (!tidA && !tidB) return 0;
 else if (!tidA) return -1;
 else if (!tidB) return +1;
 else return sortHelper(tidA[sortBy],tidB[sortBy]);
 });

 var markup = "";

 if (sortOrder == "desc") {
 allTags.reverse();
 }
 else {
 // leftovers first...
 for (var i=0;i<leftOvers.length;i++)
 markup += "*[["+leftOvers[i]+"]]\n";
 }

 for (var i=0;i<allTags.length;i++)
 markup += "*[["+allTags[i]+"]]\n" + allTagsHolder[allTags[i]];

 if (sortOrder == "desc") {
 // leftovers last...
 for (var i=0;i<leftOvers.length;i++)
 markup += "*[["+leftOvers[i]+"]]\n";
 }

 wikify(markup,place);
}

config.macros.tagglyList.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 var sortBy = params[0] ? params[0] : "title";
 var sortOrder = params[1] ? params[1] : "asc";
 var numCols = params[2] ? params[2] : 1;

 var result = store.getTaggedTiddlers(tiddler.title,sortBy);
 if (sortOrder == "desc")
 result = result.reverse();

 var listSize = result.length;
 var colSize = listSize/numCols;
 var remainder = listSize % numCols;

 var upperColsize;
 var lowerColsize;
 if (colSize != Math.floor(colSize)) {
 // it's not an exact fit so..
 lowerColsize = Math.floor(colSize);
 upperColsize = Math.floor(colSize) + 1;
 }
 else {
 lowerColsize = colSize;
 upperColsize = colSize;
 }

 var markup = "";
 var c=0;

 var newTaggedTable = createTiddlyElement(place,"table");
 var newTaggedBody = createTiddlyElement(newTaggedTable,"tbody");
 var newTaggedTr = createTiddlyElement(newTaggedBody,"tr");

 for (var j=0;j<numCols;j++) {
 var foo = "";
 var thisSize;

 if (j<remainder)
 thisSize = upperColsize;
 else
 thisSize = lowerColsize;

 for (var i=0;i<thisSize;i++)
 foo += ( "*[[" + result[c++].title + "]]\n"); // was using splitList.shift() but didn't work in IE;

 var newTd = createTiddlyElement(newTaggedTr,"td",null,"tagglyTagging");
 wikify(foo,newTd);

 }

};

/* snip for later.....
 //var groupBy = params[3] ? params[3] : "t.title.substr(0,1)";
 //var groupBy = params[3] ? params[3] : "sortedListOfOtherTags(t,tiddler.title)";
 //var groupBy = params[3] ? params[3] : "t.modified";
 var groupBy = null; // for now. groupBy here is working but disabled for now.

 var prevGroup = "";
 var thisGroup = "";

 if (groupBy) {
 result.sort(function(a,b) {
 var t = a; var aSortVal = eval(groupBy); var aSortVal2 = eval("t".sortBy);
 var t = b; var bSortVal = eval(groupBy); var bSortVal2 = eval("t".sortBy);
 var t = b; var bSortVal2 = eval(groupBy);
 return (aSortVal == bSortVal ?
 (aSortVal2 == bSortVal2 ? 0 : (aSortVal2 < bSortVal2 ? -1 : +1)) // yuck
 : (aSortVal < bSortVal ? -1 : +1));
 });
 }

 if (groupBy) {
 thisGroup = eval(groupBy);
 if (thisGroup != prevGroup)
 markup += "*[["+thisGroup+']]\n';
 markup += "**[["+t.title+']]\n';
 prevGroup = thisGroup;
 }



*/


//}}}

/***

!tagglyListControl
Use to make the sort control buttons
***/
//{{{

function getSortBy(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.sortBy;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["title"])) return "title";
 else if (tiddler.tags.contains(usetags["modified"])) return "modified";
 else if (tiddler.tags.contains(usetags["created"])) return "created";
 else return defaultVal;
}

function getSortOrder(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.sortOrder;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["asc"])) return "asc";
 else if (tiddler.tags.contains(usetags["desc"])) return "desc";
 else return defaultVal;
}

function getHideState(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.hideState;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["hide"])) return "hide";
 else if (tiddler.tags.contains(usetags["show"])) return "show";
 else return defaultVal;
}

function getGroupState(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.groupState;
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 if (tiddler.tags.contains(usetags["group"])) return "group";
 else if (tiddler.tags.contains(usetags["nogroup"])) return "nogroup";
 else return defaultVal;
}

function getNumCols(title) {
 var tiddler = store.getTiddler(title);
 var defaultVal = config.macros.tagglyListWithSort.defaults.numCols; // an int
 if (!tiddler) return defaultVal;
 var usetags = config.macros.tagglyListControl.tags;
 for (var i=1;i<=config.macros.tagglyListWithSort.maxCols;i++)
 if (tiddler.tags.contains(usetags["cols"+i])) return i;
 return defaultVal;
}


function getSortLabel(title,which) {
 // TODO. the strings here should be definable in config
 var by = getSortBy(title);
 var order = getSortOrder(title);
 var hide = getHideState(title);
 var group = getGroupState(title);
 if (which == "hide") return (hide == "show" ? "−" : "+"); // 0x25b8;
 else if (which == "group") return (group == "group" ? "normal" : "grouped");
 else if (which == "cols") return "cols±"; // &plusmn;
 else if (by == which) return which + (order == "asc" ? "↓" : "↑"); // &uarr; &darr;
 else return which;
}

function handleSortClick(title,which) {
 var currentSortBy = getSortBy(title);
 var currentSortOrder = getSortOrder(title);
 var currentHideState = getHideState(title);
 var currentGroupState = getGroupState(title);
 var currentNumCols = getNumCols(title);

 var tags = config.macros.tagglyListControl.tags;

 // if it doesn't exist, lets create it..
 if (!store.getTiddler(title))
 store.saveTiddler(title,title,"",config.options.txtUserName,new Date(),null);

 if (which == "hide") {
 // toggle hide state
 var newHideState = (currentHideState == "hide" ? "show" : "hide");
 removeTag(title,tags[currentHideState]);
 if (newHideState != config.macros.tagglyListWithSort.defaults.hideState)
 toggleTag(title,tags[newHideState]);
 }
 else if (which == "group") {
 // toggle hide state
 var newGroupState = (currentGroupState == "group" ? "nogroup" : "group");
 removeTag(title,tags[currentGroupState]);
 if (newGroupState != config.macros.tagglyListWithSort.defaults.groupState)
 toggleTag(title,tags[newGroupState]);
 }
 else if (which == "cols") {
 // toggle num cols
 var newNumCols = currentNumCols + 1; // confusing. currentNumCols is an int
 if (newNumCols > config.macros.tagglyListWithSort.maxCols || newNumCols > store.getTaggedTiddlers(title).length)
 newNumCols = 1;
 removeTag(title,tags["cols"+currentNumCols]);
 if (("cols"+newNumCols) != config.macros.tagglyListWithSort.defaults.groupState)
 toggleTag(title,tags["cols"+newNumCols]);
 }
 else if (currentSortBy == which) {
 // toggle sort order
 var newSortOrder = (currentSortOrder == "asc" ? "desc" : "asc");
 removeTag(title,tags[currentSortOrder]);
 if (newSortOrder != config.macros.tagglyListWithSort.defaults.sortOrder)
 toggleTag(title,tags[newSortOrder]);
 }
 else {
 // change sortBy only
 removeTag(title,tags["title"]);
 removeTag(title,tags["created"]);
 removeTag(title,tags["modified"]);

 if (which != config.macros.tagglyListWithSort.defaults.sortBy)
 toggleTag(title,tags[which]);
 }

 store.setDirty(true); // save is required now.
 story.refreshTiddler(title,false,true); // force=true
}

config.macros.tagglyListControl.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 var onclick = function(e) {
 if (!e) var e = window.event;
 handleSortClick(tiddler.title,params[0]);
 e.cancelBubble = true;
 if (e.stopPropagation) e.stopPropagation();
 return false;
 };
 createTiddlyButton(place,getSortLabel(tiddler.title,params[0]),"Click to change sort options",onclick,params[0]=="hide"?"hidebutton":"button");
}
//}}}
/***

!tagglyListWithSort
put it all together..
***/
//{{{
config.macros.tagglyListWithSort.handler = function (place,macroName,params,wikifier,paramString,tiddler) {
 if (tiddler && store.getTaggedTiddlers(tiddler.title).length > 0)
  // todo make this readable
 wikify(
 "<<tagglyListControl hide>>"+
 (getHideState(tiddler.title) != "hide" ?
 '<html><span class="tagglyLabel">'+config.macros.tagglyList.label.format([tiddler.title])+' </span></html>'+
 "<<tagglyListControl title>><<tagglyListControl modified>><<tagglyListControl created>><<tagglyListControl group>>"+(getGroupState(tiddler.title)=="group"?"":"<<tagglyListControl cols>>")+"\n" +
 "<<tagglyList" + (getGroupState(tiddler.title)=="group"?"ByTag ":" ") + getSortBy(tiddler.title)+" "+getSortOrder(tiddler.title)+" "+getNumCols(tiddler.title)+">>" // hacky
 // + \n----\n" +
 //"<<tagglyList "+getSortBy(tiddler.title)+" "+getSortOrder(tiddler.title)+">>"
 : ""),
 place,null,tiddler);
}

config.macros.tagglyTagging = { handler: config.macros.tagglyListWithSort.handler };


//}}}
/***

!hideSomeTags
So we don't see the sort tags.
(note, they are still there when you edit. Will that be too annoying?
***/
//{{{

// based on tags.handler
config.macros.hideSomeTags.handler = function(place,macroName,params,wikifier,paramString,tiddler) {
 var theList = createTiddlyElement(place,"ul");
 if(params[0] && store.tiddlerExists[params[0]])
 tiddler = store.getTiddler(params[0]);
 var lingo = config.views.wikified.tag;
 var prompt = tiddler.tags.length == 0 ? lingo.labelNoTags : lingo.labelTags;
 createTiddlyElement(theList,"li",null,"listTitle",prompt.format([tiddler.title]));
 for(var t=0; t<tiddler.tags.length; t++)
 if (!this.tagsToHide.contains(tiddler.tags[t])) // this is the only difference from tags.handler...
 createTagButton(createTiddlyElement(theList,"li"),tiddler.tags[t],tiddler.title);

}

//}}}
/***

!Refresh everything when we save a tiddler. So the tagged lists never get stale. Is this too slow???
***/
//{{{

function refreshAllVisible() {
 story.forEachTiddler(function(title,element) {
   if (element.getAttribute("dirty") != "true")
     story.refreshTiddler(title,false,true);
 });
}

story.saveTiddler_orig_mptw = story.saveTiddler;
story.saveTiddler = function(title,minorUpdate) {
 var result = this.saveTiddler_orig_mptw(title,minorUpdate);
// refreshAllVisible();
 return result;
}

store.removeTiddler_orig_mptw = store.removeTiddler;
store.removeTiddler = function(title) {
 this.removeTiddler_orig_mptw(title);
// refreshAllVisible();
}

config.shadowTiddlers.TagglyTaggingStyles = "/***\nTo use, add {{{[[TagglyTaggingStyles]]}}} to your StyleSheet tiddler, or you can just paste the CSS in directly. See also ViewTemplate, EditTemplate and TagglyTagging.\n***/\n/*{{{*/\n.tagglyTagged li.listTitle { display:none;}\n.tagglyTagged li { display: inline; font-size:90%; }\n.tagglyTagged ul { margin:0px; padding:0px; }\n.tagglyTagging { padding-top:0.5em; }\n.tagglyTagging li.listTitle { display:none;}\n.tagglyTagging ul { margin-top:0px; padding-top:0.5em; padding-left:2em; margin-bottom:0px; padding-bottom:0px; }\n\n/* .tagglyTagging .tghide { display:inline; } */\n\n.tagglyTagging { vertical-align: top; margin:0px; padding:0px; }\n.tagglyTagging table { margin:0px; padding:0px; }\n\n\n.tagglyTagging .button { display:none; margin-left:3px; margin-right:3px; }\n.tagglyTagging .button, .tagglyTagging .hidebutton { color:#aaa; font-size:90%; border:0px; padding-left:0.3em;padding-right:0.3em;}\n.tagglyTagging .button:hover, .hidebutton:hover { background:#eee; color:#888; }\n.selected .tagglyTagging .button { display:inline; }\n\n.tagglyTagging .hidebutton { color:white; } /* has to be there so it takes up space. tweak if you're not using a white tiddler bg */\n.selected .tagglyTagging .hidebutton { color:#aaa }\n\n.tagglyLabel { color:#aaa; font-size:90%; }\n\n.tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }\n.tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}\n.tagglyTagging ul ul li {margin-left:0.5em; }\n\n.editLabel { font-size:90%; padding-top:0.5em; }\n/*}}}*/\n";

refreshStyles("TagglyTaggingStyles");


//}}}

// // <html>&#x25b8;&#x25be;&minus;&plusmn;</html>
Type the text for 'TagglyTagging'
/***
|Name:|TagglyTaggingPlugin|
|Description:|tagglyTagging macro is a replacement for the builtin tagging macro in your ViewTemplate|
|Version:|3.3.1 ($Rev: 6100 $)|
|Date:|$Date: 2008-07-27 01:42:07 +1000 (Sun, 27 Jul 2008) $|
|Source:|http://mptw.tiddlyspot.com/#TagglyTaggingPlugin|
|Author:|Simon Baird <simon.baird@gmail.com>|
|License:|http://mptw.tiddlyspot.com/#TheBSDLicense|
!Notes
See http://mptw.tiddlyspot.com/#TagglyTagging
***/
//{{{

merge(String.prototype,{

	parseTagExpr: function(debug) {

		if (this.trim() == "")
			return "(true)";

		var anyLogicOp = /(!|&&|\|\||\(|\))/g;
		var singleLogicOp = /^(!|&&|\|\||\(|\))$/;

		var spaced = this.
			// because square brackets in templates are no good
			// this means you can use [(With Spaces)] instead of [[With Spaces]]
			replace(/\[\(/g," [[").
			replace(/\)\]/g,"]] ").
			// space things out so we can use readBracketedList. tricky eh?
			replace(anyLogicOp," $1 ");

		var expr = "";

		var tokens = spaced.readBracketedList(false); // false means don't uniq the list. nice one JR!

		for (var i=0;i<tokens.length;i++)
			if (tokens[i].match(singleLogicOp))
				expr += tokens[i];
			else
				expr += "tiddler.tags.contains('%0')".format([tokens[i].replace(/'/,"\\'")]); // fix single quote bug. still have round bracket bug i think

		if (debug)
			alert(expr);

		return '('+expr+')';
	}

});

merge(TiddlyWiki.prototype,{
	getTiddlersByTagExpr: function(tagExpr,sortField) {

		var result = [];

		var expr = tagExpr.parseTagExpr();

		store.forEachTiddler(function(title,tiddler) {
			if (eval(expr))
				result.push(tiddler);
		});

		if(!sortField)
			sortField = "title";

		result.sort(function(a,b) {return a[sortField] < b[sortField] ? -1 : (a[sortField] == b[sortField] ? 0 : +1);});

		return result;
	}
});

config.taggly = {

	// for translations
	lingo: {
		labels: {
			asc:        "\u2191", // down arrow
			desc:       "\u2193", // up arrow
			title:      "title",
			modified:   "modified",
			created:    "created",
			show:       "+",
			hide:       "-",
			normal:     "normal",
			group:      "group",
			commas:     "commas",
			sitemap:    "sitemap",
			numCols:    "cols\u00b1", // plus minus sign
			label:      "Tagged as '%0':",
			exprLabel:  "Matching tag expression '%0':",
			excerpts:   "excerpts",
			descr:      "descr",
			slices:     "slices",
			contents:   "contents",
			sliders:    "sliders",
			noexcerpts: "title only",
			noneFound:  "(none)"
		},

		tooltips: {
			title:      "Click to sort by title",
			modified:   "Click to sort by modified date",
			created:    "Click to sort by created date",
			show:       "Click to show tagging list",
			hide:       "Click to hide tagging list",
			normal:     "Click to show a normal ungrouped list",
			group:      "Click to show list grouped by tag",
			sitemap:    "Click to show a sitemap style list",
			commas:     "Click to show a comma separated list",
			numCols:    "Click to change number of columns",
			excerpts:   "Click to show excerpts",
			descr:      "Click to show the description slice",
			slices:     "Click to show all slices",
			contents:   "Click to show entire tiddler contents",
			sliders:    "Click to show tiddler contents in sliders",
			noexcerpts: "Click to show entire title only"
		},

		tooDeepMessage: "* //sitemap too deep...//"
	},

	config: {
		showTaggingCounts: true,
		listOpts: {
			// the first one will be the default
			sortBy:     ["title","modified","created"],
			sortOrder:  ["asc","desc"],
			hideState:  ["show","hide"],
			listMode:   ["normal","group","sitemap","commas"],
			numCols:    ["1","2","3","4","5","6"],
			excerpts:   ["noexcerpts","excerpts","descr","slices","contents","sliders"]
		},
		valuePrefix: "taggly.",
		excludeTags: ["excludeLists","excludeTagging"],
		excerptSize: 50,
		excerptMarker: "/%"+"%/",
		siteMapDepthLimit: 25
	},

	getTagglyOpt: function(title,opt) {
		var val = store.getValue(title,this.config.valuePrefix+opt);
		return val ? val : this.config.listOpts[opt][0];
	},

	setTagglyOpt: function(title,opt,value) {
		if (!store.tiddlerExists(title))
			// create it silently
			store.saveTiddler(title,title,config.views.editor.defaultText.format([title]),config.options.txtUserName,new Date(),"");
		// if value is default then remove it to save space
		return store.setValue(title,
			this.config.valuePrefix+opt,
			value == this.config.listOpts[opt][0] ? null : value);
	},

	getNextValue: function(title,opt) {
		var current = this.getTagglyOpt(title,opt);
		var pos = this.config.listOpts[opt].indexOf(current);
		// a little usability enhancement. actually it doesn't work right for grouped or sitemap
		var limit = (opt == "numCols" ? store.getTiddlersByTagExpr(title).length : this.config.listOpts[opt].length);
		var newPos = (pos + 1) % limit;
		return this.config.listOpts[opt][newPos];
	},

	toggleTagglyOpt: function(title,opt) {
		var newVal = this.getNextValue(title,opt);
		this.setTagglyOpt(title,opt,newVal);
	},

	createListControl: function(place,title,type) {
		var lingo = config.taggly.lingo;
		var label;
		var tooltip;
		var onclick;

		if ((type == "title" || type == "modified" || type == "created")) {
			// "special" controls. a little tricky. derived from sortOrder and sortBy
			label = lingo.labels[type];
			tooltip = lingo.tooltips[type];

			if (this.getTagglyOpt(title,"sortBy") == type) {
				label += lingo.labels[this.getTagglyOpt(title,"sortOrder")];
				onclick = function() {
					config.taggly.toggleTagglyOpt(title,"sortOrder");
					return false;
				}
			}
			else {
				onclick = function() {
					config.taggly.setTagglyOpt(title,"sortBy",type);
					config.taggly.setTagglyOpt(title,"sortOrder",config.taggly.config.listOpts.sortOrder[0]);
					return false;
				}
			}
		}
		else {
			// "regular" controls, nice and simple
			label = lingo.labels[type == "numCols" ? type : this.getNextValue(title,type)];
			tooltip = lingo.tooltips[type == "numCols" ? type : this.getNextValue(title,type)];
			onclick = function() {
				config.taggly.toggleTagglyOpt(title,type);
				return false;
			}
		}

		// hide button because commas don't have columns
		if (!(this.getTagglyOpt(title,"listMode") == "commas" && type == "numCols"))
			createTiddlyButton(place,label,tooltip,onclick,type == "hideState" ? "hidebutton" : "button");
	},

	makeColumns: function(orig,numCols) {
		var listSize = orig.length;
		var colSize = listSize/numCols;
		var remainder = listSize % numCols;

		var upperColsize = colSize;
		var lowerColsize = colSize;

		if (colSize != Math.floor(colSize)) {
			// it's not an exact fit so..
			upperColsize = Math.floor(colSize) + 1;
			lowerColsize = Math.floor(colSize);
		}

		var output = [];
		var c = 0;
		for (var j=0;j<numCols;j++) {
			var singleCol = [];
			var thisSize = j < remainder ? upperColsize : lowerColsize;
			for (var i=0;i<thisSize;i++)
				singleCol.push(orig[c++]);
			output.push(singleCol);
		}

		return output;
	},

	drawTable: function(place,columns,theClass) {
		var newTable = createTiddlyElement(place,"table",null,theClass);
		var newTbody = createTiddlyElement(newTable,"tbody");
		var newTr = createTiddlyElement(newTbody,"tr");
		for (var j=0;j<columns.length;j++) {
			var colOutput = "";
			for (var i=0;i<columns[j].length;i++)
				colOutput += columns[j][i];
			var newTd = createTiddlyElement(newTr,"td",null,"tagglyTagging"); // todo should not need this class
			wikify(colOutput,newTd);
		}
		return newTable;
	},

	createTagglyList: function(place,title,isTagExpr) {
		switch(this.getTagglyOpt(title,"listMode")) {
			case "group":  return this.createTagglyListGrouped(place,title,isTagExpr); break;
			case "normal": return this.createTagglyListNormal(place,title,false,isTagExpr); break;
			case "commas": return this.createTagglyListNormal(place,title,true,isTagExpr); break;
			case "sitemap":return this.createTagglyListSiteMap(place,title,isTagExpr); break;
		}
	},

	getTaggingCount: function(title,isTagExpr) {
		// thanks to Doug Edmunds
		if (this.config.showTaggingCounts) {
			var tagCount = config.taggly.getTiddlers(title,'title',isTagExpr).length;
			if (tagCount > 0)
				return " ("+tagCount+")";
		}
		return "";
	},

	getTiddlers: function(titleOrExpr,sortBy,isTagExpr) {
		return isTagExpr ? store.getTiddlersByTagExpr(titleOrExpr,sortBy) : store.getTaggedTiddlers(titleOrExpr,sortBy);
	},

	getExcerpt: function(inTiddlerTitle,title,indent) {
		if (!indent)
			indent = 1;

		var displayMode = this.getTagglyOpt(inTiddlerTitle,"excerpts");
		var t = store.getTiddler(title);

		if (t && displayMode == "excerpts") {
			var text = t.text.replace(/\n/," ");
			var marker = text.indexOf(this.config.excerptMarker);
			if (marker != -1) {
				return " {{excerpt{<nowiki>" + text.substr(0,marker) + "</nowiki>}}}";
			}
			else if (text.length < this.config.excerptSize) {
				return " {{excerpt{<nowiki>" + t.text + "</nowiki>}}}";
			}
			else {
				return " {{excerpt{<nowiki>" + t.text.substr(0,this.config.excerptSize) + "..." + "</nowiki>}}}";
			}
		}
		else if (t && displayMode == "contents") {
			return "\n{{contents indent"+indent+"{\n" + t.text + "\n}}}";
		}
		else if (t && displayMode == "sliders") {
			return "<slider slide>\n{{contents{\n" + t.text + "\n}}}\n</slider>";
		}
		else if (t && displayMode == "descr") {
			var descr = store.getTiddlerSlice(title,'Description');
			return descr ? " {{excerpt{" + descr  + "}}}" : "";
		}
		else if (t && displayMode == "slices") {
			var result = "";
			var slices = store.calcAllSlices(title);
			for (var s in slices)
				result += "|%0|<nowiki>%1</nowiki>|\n".format([s,slices[s]]);
			return result ? "\n{{excerpt excerptIndent{\n" + result  + "}}}" : "";
		}
		return "";
	},

	notHidden: function(t,inTiddler) {
		if (typeof t == "string")
			t = store.getTiddler(t);
		return (!t || !t.tags.containsAny(this.config.excludeTags) ||
				(inTiddler && this.config.excludeTags.contains(inTiddler)));
	},

	// this is for normal and commas mode
	createTagglyListNormal: function(place,title,useCommas,isTagExpr) {

		var list = config.taggly.getTiddlers(title,this.getTagglyOpt(title,"sortBy"),isTagExpr);

		if (this.getTagglyOpt(title,"sortOrder") == "desc")
			list = list.reverse();

		var output = [];
		var first = true;
		for (var i=0;i<list.length;i++) {
			if (this.notHidden(list[i],title)) {
				var countString = this.getTaggingCount(list[i].title);
				var excerpt = this.getExcerpt(title,list[i].title);
				if (useCommas)
					output.push((first ? "" : ", ") + "[[" + list[i].title + "]]" + countString + excerpt);
				else
					output.push("*[[" + list[i].title + "]]" + countString + excerpt + "\n");

				first = false;
			}
		}

		return this.drawTable(place,
			this.makeColumns(output,useCommas ? 1 : parseInt(this.getTagglyOpt(title,"numCols"))),
			useCommas ? "commas" : "normal");
	},

	// this is for the "grouped" mode
	createTagglyListGrouped: function(place,title,isTagExpr) {
		var sortBy = this.getTagglyOpt(title,"sortBy");
		var sortOrder = this.getTagglyOpt(title,"sortOrder");

		var list = config.taggly.getTiddlers(title,sortBy,isTagExpr);

		if (sortOrder == "desc")
			list = list.reverse();

		var leftOvers = []
		for (var i=0;i<list.length;i++)
			leftOvers.push(list[i].title);

		var allTagsHolder = {};
		for (var i=0;i<list.length;i++) {
			for (var j=0;j<list[i].tags.length;j++) {

				if (list[i].tags[j] != title) { // not this tiddler

					if (this.notHidden(list[i].tags[j],title)) {

						if (!allTagsHolder[list[i].tags[j]])
							allTagsHolder[list[i].tags[j]] = "";

						if (this.notHidden(list[i],title)) {
							allTagsHolder[list[i].tags[j]] += "**[["+list[i].title+"]]"
										+ this.getTaggingCount(list[i].title) + this.getExcerpt(title,list[i].title) + "\n";

							leftOvers.setItem(list[i].title,-1); // remove from leftovers. at the end it will contain the leftovers

						}
					}
				}
			}
		}

		var allTags = [];
		for (var t in allTagsHolder)
			allTags.push(t);

		var sortHelper = function(a,b) {
			if (a == b) return 0;
			if (a < b) return -1;
			return 1;
		};

		allTags.sort(function(a,b) {
			var tidA = store.getTiddler(a);
			var tidB = store.getTiddler(b);
			if (sortBy == "title") return sortHelper(a,b);
			else if (!tidA && !tidB) return 0;
			else if (!tidA) return -1;
			else if (!tidB) return +1;
			else return sortHelper(tidA[sortBy],tidB[sortBy]);
		});

		var leftOverOutput = "";
		for (var i=0;i<leftOvers.length;i++)
			if (this.notHidden(leftOvers[i],title))
				leftOverOutput += "*[["+leftOvers[i]+"]]" + this.getTaggingCount(leftOvers[i]) + this.getExcerpt(title,leftOvers[i]) + "\n";

		var output = [];

		if (sortOrder == "desc")
			allTags.reverse();
		else if (leftOverOutput != "")
			// leftovers first...
			output.push(leftOverOutput);

		for (var i=0;i<allTags.length;i++)
			if (allTagsHolder[allTags[i]] != "")
				output.push("*[["+allTags[i]+"]]" + this.getTaggingCount(allTags[i]) + this.getExcerpt(title,allTags[i]) + "\n" + allTagsHolder[allTags[i]]);

		if (sortOrder == "desc" && leftOverOutput != "")
			// leftovers last...
			output.push(leftOverOutput);

		return this.drawTable(place,
				this.makeColumns(output,parseInt(this.getTagglyOpt(title,"numCols"))),
				"grouped");

	},

	// used to build site map
	treeTraverse: function(title,depth,sortBy,sortOrder,isTagExpr) {

		var list = config.taggly.getTiddlers(title,sortBy,isTagExpr);

		if (sortOrder == "desc")
			list.reverse();

		var indent = "";
		for (var j=0;j<depth;j++)
			indent += "*"

		var childOutput = "";

		if (depth > this.config.siteMapDepthLimit)
			childOutput += indent + this.lingo.tooDeepMessage;
		else
			for (var i=0;i<list.length;i++)
				if (list[i].title != title)
					if (this.notHidden(list[i].title,this.config.inTiddler))
						childOutput += this.treeTraverse(list[i].title,depth+1,sortBy,sortOrder,false);

		if (depth == 0)
			return childOutput;
		else
			return indent + "[["+title+"]]" + this.getTaggingCount(title) + this.getExcerpt(this.config.inTiddler,title,depth) + "\n" + childOutput;
	},

	// this if for the site map mode
	createTagglyListSiteMap: function(place,title,isTagExpr) {
		this.config.inTiddler = title; // nasty. should pass it in to traverse probably
		var output = this.treeTraverse(title,0,this.getTagglyOpt(title,"sortBy"),this.getTagglyOpt(title,"sortOrder"),isTagExpr);
		return this.drawTable(place,
				this.makeColumns(output.split(/(?=^\*\[)/m),parseInt(this.getTagglyOpt(title,"numCols"))), // regexp magic
				"sitemap"
				);
	},

	macros: {
		tagglyTagging: {
			handler: function (place,macroName,params,wikifier,paramString,tiddler) {
				var parsedParams = paramString.parseParams("tag",null,true);
				var refreshContainer = createTiddlyElement(place,"div");

				// do some refresh magic to make it keep the list fresh - thanks Saq
				refreshContainer.setAttribute("refresh","macro");
				refreshContainer.setAttribute("macroName",macroName);

				var tag = getParam(parsedParams,"tag");
				var expr = getParam(parsedParams,"expr");

				if (expr) {
					refreshContainer.setAttribute("isTagExpr","true");
					refreshContainer.setAttribute("title",expr);
					refreshContainer.setAttribute("showEmpty","true");
				}
				else {
					refreshContainer.setAttribute("isTagExpr","false");
					if (tag) {
        				refreshContainer.setAttribute("title",tag);
						refreshContainer.setAttribute("showEmpty","true");
					}
					else {
        				refreshContainer.setAttribute("title",tiddler.title);
						refreshContainer.setAttribute("showEmpty","false");
					}
				}
				this.refresh(refreshContainer);
			},

			refresh: function(place) {
				var title = place.getAttribute("title");
				var isTagExpr = place.getAttribute("isTagExpr") == "true";
				var showEmpty = place.getAttribute("showEmpty") == "true";
				removeChildren(place);
				addClass(place,"tagglyTagging");
				var countFound = config.taggly.getTiddlers(title,'title',isTagExpr).length
				if (countFound > 0 || showEmpty) {
					var lingo = config.taggly.lingo;
					config.taggly.createListControl(place,title,"hideState");
					if (config.taggly.getTagglyOpt(title,"hideState") == "show") {
						createTiddlyElement(place,"span",null,"tagglyLabel",
								isTagExpr ? lingo.labels.exprLabel.format([title]) : lingo.labels.label.format([title]));
						config.taggly.createListControl(place,title,"title");
						config.taggly.createListControl(place,title,"modified");
						config.taggly.createListControl(place,title,"created");
						config.taggly.createListControl(place,title,"listMode");
						config.taggly.createListControl(place,title,"excerpts");
						config.taggly.createListControl(place,title,"numCols");
						config.taggly.createTagglyList(place,title,isTagExpr);
						if (countFound == 0 && showEmpty)
							createTiddlyElement(place,"div",null,"tagglyNoneFound",lingo.labels.noneFound);
					}
				}
			}
		}
	},

	// todo fix these up a bit
	styles: [
"/*{{{*/",
"/* created by TagglyTaggingPlugin */",
".tagglyTagging { padding-top:0.5em; }",
".tagglyTagging li.listTitle { display:none; }",
".tagglyTagging ul {",
"	margin-top:0px; padding-top:0.5em; padding-left:2em;",
"	margin-bottom:0px; padding-bottom:0px;",
"}",
".tagglyTagging { vertical-align: top; margin:0px; padding:0px; }",
".tagglyTagging table { margin:0px; padding:0px; }",
".tagglyTagging .button { visibility:hidden; margin-left:3px; margin-right:3px; }",
".tagglyTagging .button, .tagglyTagging .hidebutton {",
"	color:[[ColorPalette::TertiaryLight]]; font-size:90%;",
"	border:0px; padding-left:0.3em;padding-right:0.3em;",
"}",
".tagglyTagging .button:hover, .hidebutton:hover, ",
".tagglyTagging .button:active, .hidebutton:active  {",
"	border:0px; background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]];",
"}",
".selected .tagglyTagging .button { visibility:visible; }",
".tagglyTagging .hidebutton { color:[[ColorPalette::Background]]; }",
".selected .tagglyTagging .hidebutton { color:[[ColorPalette::TertiaryLight]] }",
".tagglyLabel { color:[[ColorPalette::TertiaryMid]]; font-size:90%; }",
".tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }",
".tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}",
".tagglyTagging ul ul li {margin-left:0.5em; }",
".editLabel { font-size:90%; padding-top:0.5em; }",
".tagglyTagging .commas { padding-left:1.8em; }",
"/* not technically tagglytagging but will put them here anyway */",
".tagglyTagged li.listTitle { display:none; }",
".tagglyTagged li { display: inline; font-size:90%; }",
".tagglyTagged ul { margin:0px; padding:0px; }",
".excerpt { color:[[ColorPalette::TertiaryDark]]; }",
".excerptIndent { margin-left:4em; }",
"div.tagglyTagging table,",
"div.tagglyTagging table tr,",
"td.tagglyTagging",
" {border-style:none!important; }",
".tagglyTagging .contents { border-bottom:2px solid [[ColorPalette::TertiaryPale]]; padding:0 1em 1em 0.5em;",
"  margin-bottom:0.5em; }",
".tagglyTagging .indent1  { margin-left:3em;  }",
".tagglyTagging .indent2  { margin-left:4em;  }",
".tagglyTagging .indent3  { margin-left:5em;  }",
".tagglyTagging .indent4  { margin-left:6em;  }",
".tagglyTagging .indent5  { margin-left:7em;  }",
".tagglyTagging .indent6  { margin-left:8em;  }",
".tagglyTagging .indent7  { margin-left:9em;  }",
".tagglyTagging .indent8  { margin-left:10em; }",
".tagglyTagging .indent9  { margin-left:11em; }",
".tagglyTagging .indent10 { margin-left:12em; }",
".tagglyNoneFound { margin-left:2em; color:[[ColorPalette::TertiaryMid]]; font-size:90%; font-style:italic; }",
"/*}}}*/",
		""].join("\n"),

	init: function() {
		merge(config.macros,this.macros);
		config.shadowTiddlers["TagglyTaggingStyles"] = this.styles;
		store.addNotification("TagglyTaggingStyles",refreshStyles);
	}
};

config.taggly.init();

//}}}

/***
InlineSlidersPlugin
By Saq Imtiaz
http://tw.lewcid.org/sandbox/#InlineSlidersPlugin

// syntax adjusted to not clash with NestedSlidersPlugin
// added + syntax to start open instead of closed

***/
//{{{
config.formatters.unshift( {
	name: "inlinesliders",
	// match: "\\+\\+\\+\\+|\\<slider",
	match: "\\<slider",
	// lookaheadRegExp: /(?:\+\+\+\+|<slider) (.*?)(?:>?)\n((?:.|\n)*?)\n(?:====|<\/slider>)/mg,
	lookaheadRegExp: /(?:<slider)(\+?) (.*?)(?:>)\n((?:.|\n)*?)\n(?:<\/slider>)/mg,
	handler: function(w) {
		this.lookaheadRegExp.lastIndex = w.matchStart;
		var lookaheadMatch = this.lookaheadRegExp.exec(w.source)
		if(lookaheadMatch && lookaheadMatch.index == w.matchStart ) {
			var btn = createTiddlyButton(w.output,lookaheadMatch[2] + " "+"\u00BB",lookaheadMatch[2],this.onClickSlider,"button sliderButton");
			var panel = createTiddlyElement(w.output,"div",null,"sliderPanel");
			panel.style.display = (lookaheadMatch[1] == '+' ? "block" : "none");
			wikify(lookaheadMatch[3],panel);
			w.nextMatch = lookaheadMatch.index + lookaheadMatch[0].length;
		}
   },
   onClickSlider : function(e) {
		if(!e) var e = window.event;
		var n = this.nextSibling;
		n.style.display = (n.style.display=="none") ? "block" : "none";
		return false;
	}
});

//}}}

/*{{{*/
/* created by TagglyTaggingPlugin */
.tagglyTagging { padding-top:0.5em; }
.tagglyTagging li.listTitle { display:none; }
.tagglyTagging ul {
	margin-top:0px; padding-top:0.5em; padding-left:2em;
	margin-bottom:0px; padding-bottom:0px;
}
.tagglyTagging { vertical-align: top; margin:0px; padding:0px; }
.tagglyTagging table { margin:0px; padding:0px; }
.tagglyTagging .button { visibility:hidden; margin-left:3px; margin-right:3px; }
.tagglyTagging .button, .tagglyTagging .hidebutton {
	color:[[ColorPalette::TertiaryLight]]; font-size:90%;
	border:0px; padding-left:0.3em;padding-right:0.3em;
}
.tagglyTagging .button:hover, .hidebutton:hover,
.tagglyTagging .button:active, .hidebutton:active  {
	border:0px; background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]];
}
.selected .tagglyTagging .button { visibility:visible; }
.tagglyTagging .hidebutton { color:[[ColorPalette::Background]]; }
.selected .tagglyTagging .hidebutton { color:[[ColorPalette::TertiaryLight]] }
.tagglyLabel { color:[[ColorPalette::TertiaryMid]]; font-size:90%; }
.tagglyTagging ul {padding-top:0px; padding-bottom:0.5em; margin-left:1em; }
.tagglyTagging ul ul {list-style-type:disc; margin-left:-1em;}
.tagglyTagging ul ul li {margin-left:0.5em; }
.editLabel { font-size:90%; padding-top:0.5em; }
.tagglyTagging .commas { padding-left:1.8em; }
/* not technically tagglytagging but will put them here anyway */
.tagglyTagged li.listTitle { display:none; }
.tagglyTagged li { display: inline; font-size:90%; }
.tagglyTagged ul { margin:0px; padding:0px; }
.excerpt { color:[[ColorPalette::TertiaryDark]]; }
.excerptIndent { margin-left:4em; }
div.tagglyTagging table,
div.tagglyTagging table tr,
td.tagglyTagging
 {border-style:none!important; }
.tagglyTagging .contents { border-bottom:2px solid [[ColorPalette::TertiaryPale]]; padding:0 1em 1em 0.5em;
  margin-bottom:0.5em; }
.tagglyTagging .indent1  { margin-left:3em;  }
.tagglyTagging .indent2  { margin-left:4em;  }
.tagglyTagging .indent3  { margin-left:5em;  }
.tagglyTagging .indent4  { margin-left:6em;  }
.tagglyTagging .indent5  { margin-left:7em;  }
.tagglyTagging .indent6  { margin-left:8em;  }
.tagglyTagging .indent7  { margin-left:9em;  }
.tagglyTagging .indent8  { margin-left:10em; }
.tagglyTagging .indent9  { margin-left:11em; }
.tagglyTagging .indent10 { margin-left:12em; }
.tagglyNoneFound { margin-left:2em; color:[[ColorPalette::TertiaryMid]]; font-size:90%; font-style:italic; }
/*}}}*/
! The vi editor

!! vi Intro

!!! Background: 
* ex, vi, vim
** {{Command{ex}}} = line oriented text editor (for printed output / slow displays / modems)
*** demonstrate ''c'' (change) and ''i'' (insert) commands.  Go to a line number, use command with ''.'' to return to prompt.
** {{Command{vi}}} = screen oriented instead of line oriented
*** Different modes - either entering text or executing commands
*** Commands are either {{Command{vi}}} commands or {{Command{ex}}} commands.
** {{Command{ex}}} & {{Command{vi}}} are different interfaces to the same program
** {{Command{ex}}} & {{Command{vi}}} began with original unix versions, over 30 years ago
** {{Command{vi}}} is now the standard unix text editor
** {{Command{vim}}} = vi Improved - extra commands and functionality

!!! Using vi:
* Opening a document for editing loads it into a buffer, which is the in-memory text of a file.  
** Any changes are made to the buffer and not saved to the file until the //write// command is provided.
* There are two Modes:
** Command mode - where you provide commands to the editor
*** These may be either {{Command{vi}}} or {{Command{ex}}} commands
** Input mode - where you can interact with the content of the file
*** You'll typically see the string ''-- INSERT --'' in the bottom-left corner when you're in Input Mode
*** Leave input mode by pressing ESC
* vi commands (command mode) contain an operator (what to do) and scope (what to do it on)
** Examples:
*** {{Monospaced{''d$''}}} - delete (d) all text from the cursor to the end of the line ($ typically means end of line)
*** {{Monospaced{''dw''}}} - delete (d) the current word
*** {{Monospaced{''d5w''}}} - delete (d) the current and next 4 (5) words (w)
*** {{Monospaced{''d2d''}}} - delete (d) the current and next 1 (2) line (d)
*** {{Monospaced{''cw''}}} - change (c) the next word (w), placing you in input mode
*** {{Monospaced{''ct:''}}} - change (c) all characters until (t) the next colon (:)
* Searching with ''/'' and ''?''
** Search down with the ''/'' key
** Search up with the ''?'' key
*** After you type either ''/'' and ''?'', you cursor will move to the bottom-left corner and you will be prompted to enter a search string.  Press enter to begin the search.
** Repeat your last search with ''n''


!! Using ex commands in vi

The {{Command{vi}}} editor is a the ''vi''sual screen-oriented front-end for the {{Command{ex}}} line-oriented text editor.  {{Command{ex}}} was one of the original Unix text editors from the days where text files could only be displayed and edited one line at a time.  It wasn't yet possible to display a full screen of text.  The ''vi''sual functionality was supported after technology evolved to support full-screen document editing.  {{Command{vi}}} also supports the original {{Command{ex}}} commands for manipulating a document.  These commands bring a great deal of power to the editor and make solving complex tasks rather simple.

* Press the : (colon) key to enter {{Command{ex}}} command mode when you are no in Input mode.  Your cursor will move to the bottom left corner.
* {{Command{ex}}} commands will be displayed on the bottom status line.  Press ~CTRL-C to cancel the command and return to vi mode.
* Syntax: {{Monospaced{'' :[address]command ''}}}
** {{Monospaced{'' :[address] ''}}} is an optional component which allows you to specify which lines to act upon.  

!!! Valid address formats
* Addresses may be addressed singly:
** {{Monospaced{''.''}}} - represents current line (default if no address is specified)
** {{Monospaced{''//n//''}}} - a specific line number
** {{Monospaced{''$''}}} - last line in the file
* or as a range:
**{{Monospaced{''%''}}} - Whole file
** {{Monospaced{''address1,address2''}}} - from address1 to address2.
** Also includes +//n// and -//n// to include the next or previous //n// lines
* Examples:
** {{Monospaced{'':12,20d''}}} - delete lines 12 to 20
** {{Monospaced{'':.,+5''}}}  - current and next five lines
** {{Monospaced{'':10,$''}}} - lines 10 through the end of the file
** {{Monospaced{'':$-2,$''}}} - last three lines (last line and two previous)

!!! Most useful ex commands

* ''d'' - delete lines
** {{Monospaced{'':10d''}}} - delete line 10
** {{Monospaced{'' :1,10d ''}}} - delete lines 1 to 10
* ''e'' - edit
** {{Monospaced{'':e! ''}}} - reopen current file, discarding changes
* ''s'' - substitute
**{{Monospaced{'' :s/one/two/ ''}}} - change first instances of one to two on the current line
**{{Monospaced{'' :%s/one/two/ ''}}} - change first instance of one to two on all lines in the document
**{{Monospaced{'' :%s/one/two/g ''}}} - change all instances of one to two on all lines in the document
**{{Monospaced{'' :.,+5s/one/two/g ''}}} - change all instances of one to two on current and next 5 lines.
* ''g'' - globally execute specified commands on lines containing a particular pattern
** {{Monospaced{'' :g/stuff/d ''}}} - delete all lines containing the string stuff
** {{Monospaced{'' :g/lpd-errs/s/^/#/ ''}}}  - add a comment to the beginning of the line on all lines containing the string lpd-errors
** {{Monospaced{'' :10,20/g/stuff/d ''}}} - remove lines between lines 10 and 20 that contain the string delete 

----


!! More info
*vi handouts: [[vi Diagram|handouts/viDiagram.pdf]] & [[Old Handout|handouts/viHandout.pdf]]
*{{Command{vimtutor}}} command
*http://www.gentoo.org/doc/en/vi-guide.xml
*[[UNIX Command summary|handouts/UnixCommandSummary.pdf]] back page

http://docstore.mik.ua/orelly/unix/unixnut/ch09_01.htm


! Using the compilers

Also a simple exercise to get more practice editing text files with vi

{{Command{gcc}}} & {{Command{g++}}}
Use {{Command{gcc}}} for compiling C code and {{Command{g++}}} for compiling C++ code.  Source code file extensions must either be .c or .cpp

{{Command{gcc -o //name_of_executable// source.c}}}
{{Command{g++ -o //name_of_executable// source.cpp}}}

//name_of_executable// = executable file to create after compiling your source code, instead of using the default a.out

{{{
#include <stdio.h>

main()
{
    printf("Hello World in C\n\n");
}
}}}

{{{
#include <iostream>
using namespace std;
int main()
{ 
  cout << "Hello World!" << endl;
  return 0;
}
}}}


! Assignments

!! Read :
 - Chapter 12 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]

!! Complete: 
 - [[Lab 27|labs/lab27.pdf]] & [[Lab 28|labs/lab28.pdf]] 
 - These labs are optional for additional vi practice and will be accepted for extra credit.



/***

|Name|ToggleSideBarMacro|
|Created by|SaqImtiaz|
|Location|http://lewcid.googlepages.com/lewcid.html#ToggleSideBarMacro|
|Version|1.0|
|Requires|~TW2.x|
!Description:
Provides a button for toggling visibility of the SideBar. You can choose whether the SideBar should initially be hidden or displayed.

!Demo
<<toggleSideBar "Toggle Sidebar">>

!Usage:
{{{<<toggleSideBar>>}}} <<toggleSideBar>>
additional options:
{{{<<toggleSideBar label tooltip show/hide>>}}} where:
label = custom label for the button,
tooltip = custom tooltip for the button,
show/hide = use one or the other, determines whether the sidebar is shown at first or not.
(default is to show the sidebar)

You can add it to your tiddler toolbar, your MainMenu, or where you like really.
If you are using a horizontal MainMenu and want the button to be right aligned, put the following in your StyleSheet:
{{{ .HideSideBarButton {float:right;} }}}

!History
*23-07-06: version 1.0: completely rewritten, now works with custom stylesheets too, and easier to customize start behaviour.
*20-07-06: version 0.11
*27-04-06: version 0.1: working.

!Code
***/
//{{{
config.macros.toggleSideBar={};

config.macros.toggleSideBar.settings={
         styleHide :  "#sidebar { display: none;}\n"+"#contentWrapper #displayArea { margin-right: 1em;}\n"+"",
         styleShow : " ",
         arrow1: "«",
         arrow2: "»"
};

config.macros.toggleSideBar.handler=function (place,macroName,params,wikifier,paramString,tiddler)
{
          var tooltip= params[1]||'toggle sidebar';
          var mode = (params[2] && params[2]=="hide")? "hide":"show";
          var arrow = (mode == "hide")? this.settings.arrow1:this.settings.arrow2;
          var label= (params[0]&&params[0]!='.')?params[0]+" "+arrow:arrow;
          var theBtn = createTiddlyButton(place,label,tooltip,this.onToggleSideBar,"button HideSideBarButton");
          if (mode == "hide")
             {
             (document.getElementById("sidebar")).setAttribute("toggle","hide");
              setStylesheet(this.settings.styleHide,"ToggleSideBarStyles");
             }
};

config.macros.toggleSideBar.onToggleSideBar = function(){
          var sidebar = document.getElementById("sidebar");
          var settings = config.macros.toggleSideBar.settings;
          if (sidebar.getAttribute("toggle")=='hide')
             {
              setStylesheet(settings.styleShow,"ToggleSideBarStyles");
              sidebar.setAttribute("toggle","show");
              this.firstChild.data= (this.firstChild.data).replace(settings.arrow1,settings.arrow2);
              }
          else
              {
               setStylesheet(settings.styleHide,"ToggleSideBarStyles");
               sidebar.setAttribute("toggle","hide");
               this.firstChild.data= (this.firstChild.data).replace(settings.arrow2,settings.arrow1);
              }

     return false;
}

setStylesheet(".HideSideBarButton .button {font-weight:bold; padding: 0 5px;}\n","ToggleSideBarButtonStyles");

//}}}
|~ViewToolbar|closeTiddler closeOthers editTiddler > fields syncing permalink references jump|
|~EditToolbar|+saveTiddler -cancelTiddler deleteTiddler|

{{Note{This video is a nice demo and overview on how this all works.  It may be helpful to review it before proceeding.  https://www.youtube.com/watch?v=XFJ6_BYno08}}}

!! Defeating firewalls with SSH to access protected resources

Knowing how to more fully use SSH and it's tunneling and proxy capabilities to defeat firewalls is an excellent skill for a security practitioner to have!  There are two methods we can use with SSH to defeat firewalls:

A.  Dynamic application-level port forwarding (SOCKS proxy)
<<<
Specifies local "dynamic" application-level port forwarding.  This works by allocating a socket to listen to a port on the local side, optionally bound to the specified bind_address.  Whenever a connection is made to this port, the connection is forwarded over the secure channel, and the application protocol is then used to determine where to connect to from the remote machine.
<<<

B.  Port forwarding
<<<
Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to the given host and port, or Unix socket, on the remote side.  This works by allocating a socket to listen to either a TCP port on the local side, optionally bound to the specified bind_address, or to a Unix socket.  Whenever a connection is made to the local port or socket, the connection is forwarded over the secure channel, and a connection is made to either host port hostport, or the Unix socket remote_socket, from the remote machine.
<<<

Method A. functions as a traditional application-level proxy.  You would configure your application (eg: web browser) to proxy all connections through the tunnel.  Method B. creates a 1:1 connection:  a TCP port on your local PC is tunneled through the SSH connection to a specific IP address and TCP port on the other side.  This method is best when there is no option to configure a proxy in your application. 

We're going to use method ''A'' for accessing internal web resources behind our class router.  This grants us the most flexibility since our browser allows us to configure an application-level proxy.


Before you begin, open your web browser and load the page http://ifconfig.me.  Take note of the IP address displayed.  We will compare this to the IP address you receive after everything is set up.  This is our lab server's IP address at the time these notes were written.

[img[img/proxyIP.jpg]]


!!! A. Establishing a SOCKS proxy with SSH

A proxy is a middle man, passing on network requests to their destination on your behalf.

A SOCKS proxy (socket secure) is a protocol to route packets between a client and a server through an intermediate proxy.  This is used (typically for web traffic) when the client is not able to communicate with the server directly, but the client can communicate with the proxy system and the chosen proxy can communicate with the server.  Some sites set up a proxy for web traffic as a means to enforce policy, monitor traffic, and block direct connections to web sites.

Here, your home PC cannot access your web server VM or the Naemon monitoring server but the class shell server can.  We'll use the class shell server to proxy your browser's web connections and be the middleman for your web requests.  This diagram illustrates the overall goal.  We see your proxy connection traveling through the encrypted SSH tunnel to the class shell server.  Web requests are then made from the perspective of the class shell server.

[img[img/proxy.png]]


SSH can be used to establish a SOCKS proxy.  This functionality is available from putty or the command line ~OpenSSH

''1.'' To set up the Proxy on your home PC, complete either ''a)'' or ''b)'', depending on your OS:

''a)'' If your home OS is Mac or Unix:  This command will create an encrypted proxy tunnel between your PC and the specified host, in this case our class shell server.  Traffic connecting to your PC on port 8118 will then pass through this proxy.  Execute a similar command on your home computer.  You may also need to update the username.
<<<
Set up SOCKS proxy:  {{Command{ssh -D 8118 lab.ncs205.net}}}
<<<

''b)'' Follow these steps when connecting with Putty from your home Windows PC:
<<<
* Expand the Connection / SSH menu
* Select Tunnels
* Enter ''8118'' in the Source port box
* Select ''Dynamic''
* Click Add
* Connect to a remote host (the class shell server) as normal
<<<
* [[This video|Putty Proxy]] demonstrates configuring Putty to add the dynamic tunnel.


''2.'' Your browser must be configured to pass traffic through the encrypted proxy.

I use the ~FoxyProxy extension to easily toggle between proxy settings in my browser.  It can also be configured to automatically send only selected sites through the proxy.  
* Firefox:
** [[Firefox Extension|https://addons.mozilla.org/en-US/firefox/addon/foxyproxy-standard/]]
** ~NCS205 settings file: [[FoxyProxy-ncs205.json|https://www.ncs205.net/media/FoxyProxy-ncs205.json]]
* Chrome:
** [[Chrome Extension|https://chrome.google.com/webstore/detail/foxyproxy-standard/gcknhkkoolaabfmlnjonogaaifnjlfnp]]
** ~NCS205 settings file:  You're on your own for now.  I don't have a version for Chrome yet.


Install the browser extension, import the settings file, and enable the proxy.

* [[This video|Firefox Proxy]] demonstrates using Firefox with the proxy to access an internal website


!!! B. Verification

Verification should be built into everything you configure.  Now that your proxy is established, let's verify it is functioning correctly and web connections from firefox are flowing through the class infrastructure.  Load the page http://ifconfig.me again in your browser and observe the IP address.  It should have changed from the original value you observed and instead contain the public IP address of the class shell server in the diagram above.  With the class server acting as a middle man, you can now load internal resources in this web browser which would have otherwise been blocked from the outside world.


!!! C. Naemon infrastructure monitoring

[[Naemon|https://www.naemon.org/]] is a tool which continuously monitors resources to provide a high level view of the health of an environment.  I'm running a Naemon server to monitor your ~VMs and use it to assist with grading your labs.  You can also use it to monitor the state of your systems and correct any issues it discovers.

Naemon is running on the internal class network and is not directly accessible from the outside world.  You will need to bypass the router and use the class shell server as a proxy in order to reach it.  

Once the proxy is configured in your browser, navigate to the URL &nbsp; '' http://head.ncs205.net/ ''.  The username and password are both set to ''naemon''.

This video contains a brief [[Naemon Introduction]].


{{Note{Naemon status checks run every two hours.  If you fix a problem, you will either need to wait up to two hours for the recheck or force Naemon to recheck.}}}

{{Warning{Warning: Naemon checks are not a replacement for your own sound testing and verification.  They may return false positives and negatives.  Not every possibility can be evaluated.  They are only a troubleshooting and status aid; not a definitive determination that something is correct.  I will still perform manual testing for most of your labs that Naemon cannot fully evaluate.}}}
!! Asking for help
* Title your posts appropriately.  Use something descriptive in the name and not just the lab and question number.  A subject like @@Lab 17, #2 - Incorrectly discarding data@@ is far more helpful than something generic like ''Lab 17''.
* When asking for help in blackboard, be sure to include relevant supporting information.  You'll receive faster responses if you provide everything someone needs to help you.
** If you're asking about a lab question, include that question in your post so everyone doesn't need to first look at the lab.
** Did you receive an error from a command?  Be sure to include the error and the command you ran.  The shell prompt will also include helpful information:
*** The host you're running the command on
*** The user you're running the command as
*** A portion of the current working directory.  Including the full output of the {{Command{pwd}}} command would be helpful too
*** The exact command string you're running.
** Don't forget to include any relevent log information and troubleshooting steps you've already taken.  You're more likely to get help if you start the process and can describe what you've already done to troubleshoot.
* Be sure to review everything for typos first.  Too many posts to Blackboard asking for help will be for problems caused by typos.  Save some time and check your typing first.
* Screenshots are helpful too.  Pictures are worth a thousand words.

!! Posting Screenshots
When posting screenshots, use the Insert Local Files(circled) in Blackboard.  Don't attach a file.  It's much easier work with embedded images than ones that need to be opened in a new tab.

@@display:block;text-align:center;[img[img/screenshots.png]]@@


!! Pasting in terminal output
Everyone should be using the Blackboard discussion boards during the course of the semester and will likely need to paste in output from the command line at some point. 

Aesthetics and readability should be considered in everything you produce.  We can make our post easier to read with a couple additional steps.

''1.'' Paste your copied text from the terminal where you would like it to appear.  Finish typing out your message.  Before sending, change the formatting for the portions you pasted from the terminal.

''2.'' Select the text you pasted in and change the paragraph type to Formatted Code.  This will remove the double spacing.

''3.'' Select the text you pasted in and choose the font ''Courier New''.  All commands and text copied from the terminal should be written with a monospaced font like Courier New to make spacing uniform between the characters and show that what you're typing is a command or output from one.

''4.''  Select the command you executed to get the output and change it to bold.  This makes it easier to identify the command that was used from the output returned.  Including the shell prompt and executed command provides important context.

''5.'' If appropriate, use the highlighter to draw attention to any parts you're talking about.  Be sure to first change the color to a brighter one.

@@display:block;text-align:center;[img[img/blackboard4.png]]@@

You'll finally be left with something that is much easier to read.  You're more likely to get a response to your forum post if you provide all necessary information in a way that's easy to work with.  Pasting text like this is preferable to just posting a screenshot.  If you paste in the text, someone can quote it in a reply and easily highlight relevant parts.

@@display:block;text-align:center;[img[img/blackboard3.png]]@@
/%


----

 avoid Blackboard's text mangling and

 If you paste copied text from the terminal, blackboard will turn it into a mangled mess:

than the Blackboard mangled mess


@@display:block;text-align:center;[img[img/blackboard0.png]]@@


''1.'' Insert a few blank lines where you want to put the pasted text.  These blank lines will make it easier to add additional text after inserting your pasted text from the terminal

''2.'' Choose the HTML editor from the Toolbar

@@display:block;text-align:center;[img[img/blackboard1.png]]@@


''4.'' Add a {{Command{&lt;pre>}}} HTML tag before your pasted text and a {{Command{&lt;/pre>}}} tag after it.  This will prevent the mangled formatting and preserve all spacing, just as you see it in the terminal.

@@display:block;text-align:center;[img[img/blackboard2.png]]@@

''5.'' Click update.  You should now see your copied text nicely formatted in Blackboard.

''6.'' Select the text you pasted in and choose the font ''Courier New''.  All commands and text copied from the terminal should be written with a monospaced font like Courier New to make spacing uniform and highlight what you're typing is a command or output from one.

''7.''  Highlight the command you executed to get the output and change it to bold.  This makes it easier to identify the command that was used from the output returned.

''8.'' If appropriate, use the highlighter to draw attention to any parts you're talking about.  Be sure to first change the color to a brighter one.

@@display:block;text-align:center;[img[img/blackboard4.png]]@@


You'll finally be left with something that is much easier to read than the Blackboard mangled mess.  You're more likely to get a response to your forum post if it is easier to read.  Pasting text like this is preferable to just posting a screenshot.  If you paste in the text, someone can quote it in a reply and easily highlight relevant parts.

@@display:block;text-align:center;[img[img/blackboard3.png]]@@  %/

!! Using Discord
We'll need to keep Discord organized in order to keep it useful.  Get in the habit of this now, because you'll have these same issues later in the workplace.  The concepts are very similar to what we need to do on Slack in the corporate world.

!!! There are four types of channels:
# //administrative// - Administrative questions about the class like grading, due dates, and technical support issues.  Not for course content.
# //misc-chatter// - Conversation not related to this class
# //notes-resource// - Posts about general course notes and resources that might be helpful
# //week#// - The weekly course content discussions.  Post to this channel regarding material that was ''//assigned//'' in this week.
** For example, if you have a question about a week 1 lab, post it to the week 1 channel even if we're now in week 2.

!! Asking for help
* Use threads for your questions to help keep things organized.
** See below for an example on using threads
** Title your threads appropriately.  Use something descriptive in the name and not just the lab and question number.  A subject like @@Lab 17, #2 - Incorrectly discarding data@@ is far more helpful than something generic like ''Lab 17''.
** Organization and usability is important in everything you do.  Full credit for asking a question will only be given for conversations which are within threads.
* When asking for help, be sure to include relevant supporting information.  You'll receive faster responses if you provide everything someone needs to help you.
** If you're asking about a lab question, including that question in your post is helpful so everyone doesn't need to first look at the lab to know what you're talking about.
** Send us what you're seeing, don't just describe it.  A picture is worth a thousand words
*** Did you receive an error from a command?  Be sure to include the error and the command you ran.  
*** The shell prompt will also include helpful information:
**** The host you're running the command on
**** The user you're running the command as
**** A portion of the current working directory.  Including the full output of the {{Command{pwd}}} command might be helpful too
**** The exact command string you're running.
** Don't forget to include any relevant log information and troubleshooting steps you've already taken.  You're more likely to get help if you start the process and can describe what you've already done to troubleshoot.
* Be sure to review everything for typos first.  Too many posts asking for help will be for problems caused by typos.  Save some time and check your typing first.

!! Using code blocks

* Be sure all code, commands, and output is enclosed within a code block.  This will make it easier to identify commands and prevent Discord from interpreting special characters.
* Single commands can be put inside of a code block by enclosing your command in backticks.
* A series of lines can be put inside of a code block by putting three backticks at the start of the first line and three backticks at the end of the last line.
* When possible, sending text in code blocks is better then just sending a screenshot.  Text sent in a screenshot cannot be copy/pasted for any testing

Example of using single line code block:
[img[img/discord-code3.png]]

Example of using multi-line code block:
[img[img/discord-code1.png]]

Results of using code blocks:
[img[img/discord-code2.png]]


!! Using threads

Threads in Discord will help keep the weekly channels and conversations organized.  Create a new thread for each question you're asking.

----
[img[img/discord1.png]]
# Click on the week number for the material you would like to discuss
# Click on the threads icon up top
----

[img[img/discord2.png]]
# Enter your thread name
# Enter your question in the first posted followed by any supporting information in additional posts.
# Click on //Create Thread//
----

[img[img/discord3.png]]
# To join a thread, click on the //# Message// link.  Your thread will open to the right
# Post any additional messages within the thread to the right. 
----

[img[img/discord4.png]]
The available threads will appear under the weekly channel.  You can click on the thread title to easily join the conversation.
----

[img[img/discord5.png]]
If you would like to follow an interesting thread, right click on the thread message area and choose //Join Thread//.
----

[img[img/discord6.png]]
After joining a thread, it will appear on the left side of your screen under the channel for the week number.  This will make it easier to find later.


<!--{{{-->
<div class='toolbar' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
<div class='title' macro='view title'><span class="miniTag" macro="miniTag"></span></div>
<div class='subtitle'>Updated <span macro='view modified date [[MMM DD, YYYY]]'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date [[MMM DD, YYYY]]'></span>)<BR><BR></div>
<div class='viewer' macro='view text wikified'></div>
<div class="tagglyTagging" macro="tagglyTagging"><BR><BR></div>
<div class="tagglyTagged" macro="hideSomeTags"></div>
<div class='tagClear'></div>
<!--}}}-->
/% @@ This will be used in the second half of the semester @@ %/
Proxmox hypervisor:  https://lab.ncs205.net/

Subnet: 192.168.12.0/24
Gateway: 192.168.12.1
DNS: 192.168.12.10

!! IP Addresses:
| !Last octet | !Host Name | !Description |
| n | test |Testing|
| n+1 | www |Web Server|
| n+2 | core |DNS, syslog, ntp|
| n+3 | files |Storage Server|
| n+4 |>| Unused |
| n+5 |>|~|
| n+6 | ||
| n+7 | ||

* The fully-qualified hostname for your VM is //host//.//username//.ncs205.net where //host// is in the second column in the table above.
* Your VM IP addresses should be in the form 192.168.12.//n// where //n// is the first IP address you have been assigned in the table below.  Increment the value of //n// as necessary for additional ~VMs.  
** Do not deviate from the provided IP addresses.  These IP addresses will be checked for grading.  If you use other ~IPs you will not receive credit for the labs and may conflict with other students.

| !Start IP | !Username |
| 24 | merantn |
| 32 | backers |
| 40 | caplanc |
| 48 | cormiej |
| 56 | esquitc |
| 64 | fitzgea2 |
| 72 | khanss |
| 80 | khandat |
| 88 | leonevj |
| 96 | lionht |
| 104 | marvinc |
| 120 | penas |
| 128 | peralas |
| 136 | reynolz |
| 144 | stevendc |
| 152 | subedib |
| 160 | tajs |
| 168 | talasih |
| 176 | tankoud |
| 184 | terronl |
| 192 | tut |
| 200 | sheard |

! Lab network topology

[img[img/topo.png]]


/% awk -v ip=32 '{print "| " ip " | " $1 " |"; ip+=8}' user2009.txt %/
! Introduction to NCS 205

!!! Expectations:

Mostly outlined in the [[syllabus|syllabus/NCS205Syllabus2209.pdf]], but to recap:

* Honesty & Integrity - Cheating generally results in a failing ''course'' grade.
** This course is in a security program.  If you cannot be trusted, you do not belong here.
* Motivation & practice - You must be motivated to practice the work in order to pick up the material.
** An article discussing [[productive struggle|http://maateachingtidbits.blogspot.com/2017/11/the-role-of-failure-and-struggle-in.html]] that roughly outlines how I'm teaching this course.
* Graded Homework - Almost everything will be graded.
* Don't fall behind - Else the workload will bury you.
** Let me know early if you're starting to run into trouble.

!!! Class Resources
* Required Textbooks:  
** First half of the semester - [[The Linux Command Line|http://linuxcommand.org/tlcl.php]]
** Second half of the semester - [[Linux Administration: A Beginners Guide, Eighth Edition|https://www.mhebooklibrary.com/doi/book/10.1036/9781260441710]]
* Class website:  https://www.ncs205.net/
** The class website will be our primary resource for course content
** Each content page is generally divided into three sections:  
### the content assignment (what to read or watch),
### my notes about the content
### the deliverables for that content
* Blackboard
** Blackboard will be used only for announcements, the discussion board, and tracking grades.
** Subscribe to the weekly discussion boards to be notified of new posts.
*** Participation here will be [[evaluated as well|Class Participation]].

!!! Class Cadence
* A week's worth of new material will be posted to the class website Sunday evening in two parts.  
** Unless stated otherwise, part 1 assignments will be due by end of day Wednesday
** Part 2 assignments will be due by end of day Saturday.
** An [[assignment calendar|Calendar]] can be found on our class website in the menu bar above.
* Carnegie credit hour
** A Carnegie credit hour is defined as 50 minutes of lecture and 2 hours of prep/homework for each traditional course credit hour 
** This requirement is defined in [[SUNY Policy|https://www.suny.edu/sunypp/documents.cfm?doc_id=168]]
** Translated to our online class, this means we are expected to perform approximately 12 hours of instructional activity per week
** This is hard to gauge in an online class.  Please let me know if you feel we are regularly exceeding that.

!!! Extra Help
* Chat sessions with {{Command{talk}}} (see below)
* Ad-hoc online meetings via Zoom.  Let me know if you'd like to schedule one.
* Weekly Zoom meetings if there's interest


{{Warning{
This class will test your skills as a student; ''being a good student will be important in order to successfully complete this course''.  This will not be one where you can do the bare minimum and skate by with a good grade.  Good ''time management'' and ''study skills'' will be critical.  ''If you neglect the material you will likely not successfully complete the course.''

Everything we do this semester will look back on previous work. If you're rushing through and not retaining it, you will surely pay for it later.  Having a keen eye for detail, paying attention to the directions, and taking the time to practice and retain the material will make for a much smoother semester.
}}}


!! Accessing the class shell server

The class shell server is an always-on system we will connect to in order to practice the class assignments and submit homework.  There are two ways we will access the system - from the command line for entering commands or through a file transfer utility for uploading files.

!!! Connection Tools
* Access the shell (command line) with either:
** [[PuTTY for Windows|http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html]] (Download the latest version of the 64-bit MSI installer)
** [[PuTTY for Mac|https://www.ssh.com/ssh/putty/mac/]]
* Transfer files between the server and your local system:
** Windows: [[WinSCP|https://winscp.net/eng/download.php]]
** Mac: scp/sftp on the command line or any SFTP client like [[FileZilla|https://filezilla-project.org/]]
* Portable versions exist for these applications.  This is convenient if you are using campus ~PCs that do not have the tools installed.  You may download and run them from a flash drive or your home directory in the lab.
** [[PuTTY|http://www.chiark.greenend.org.uk/~sgtatham/putty/download.html]] - Download and run putty.exe
** [[WinSCP|https://winscp.net/eng/download.php]]: Download the portable package

!!! Logging in
* Use one of the tools above to log in to lab.ncs205.net.
* Log in with your campus username
* Your initial password will be posted to the Week 1 forums in the Blackboard Discussion Board.
* Change your password after logging in.
** Run the {{Command{passwd}}} command to change your password
** ''Any accounts still using the default password will be locked on Friday, September 3.''

This short video will walk you through downloading ~PuTTY, a unix remote access client, and connecting to the system for command line access.  Your initial password for the server can be found in the Blackboard discussion board.

Video:  [[Accessing the shell server]]
/% Download: ~PuTTY - [[installer|https://the.earth.li/~sgtatham/putty/latest/w64/putty-64bit-0.70-installer.msi]] or [[exe|http://the.earth.li/~sgtatham/putty/latest/win64/putty.exe]] %/

!! Working on the command line

Console
* Console is considered the interface with a system as though you are physically sitting at its monitor and keyboard.  This lets us interact with the system before the operating system loads
* A virtual console is available for ~VMs or through a lights-out management utility such as a Dell iDRAC.  
Remote access
* Remote access to a Linux system such as our class shell server can also be obtained through a remote access service like SSH (Secure ~SHell).  
* SSH is the standard command-line remote access interface for Unix/Linux systems.  It allows us to interact via a SSH client, much like how your web browser is a client to a web server.
* Our shell server is a traditional timeshare server.  It's always available; we don't power it off.
Shells
* The shell is our interface with the command line.  It's a program that takes input from the user and passes it on to the system to process.


!!! Navigating our lab server's filesystem:
* Directory paths
** Directory paths enable us to have a hierarchy of directories and keep our files organized
** Similar to the command line on Windows
** The path separator is a forward slash on Unix/Linux systems - {{File{''/''}}}
** Change directories with the {{Command{cd}}} command
*** eg:  {{Command{cd /opt/pub/ncs205/submit}}}
** List the contents of the directory with the {{Command{ls}}} command
** List the contents of the directory in long format with the {{Command{ls -l}}} command
*** Displaying the contents of a directory in long format is always preferred so you can easily see all information about the files
* Some directories of interest:
** {{File{/home/}}} - User home directories typically reside within this directory
** {{File{/opt/pub/ncs205/submit/}}} - Lab/Homework assignments are uploaded to this directory
** {{File{/opt/pub/ncs205/returned/}}} - Graded homework assignments are stored in this directory for you to download
** {{File{/opt/pub/ncs205/data/}}} - Data files for labs are stored here
** {{File{/tmp/}}} - Temporary scratch space

!!! Executing commands
* Structure of a command string:
** {{Command{''command'' [options] [arguments]}}}
** options and arguments may be optional or required depending on the command
** In Unix command documentation, an item within the square brackets is an optional component

* Viewing files
** Display a file: {{Command{cat //filename//}}}
** Display a file one page at a time:  {{Command{less //filename//}}}
** Edit a text file:  {{Command{nano //filename//}}}  ''-or-''  {{Command{vi //filename//}}}


!!! Other useful commands
* The UNIX manual - {{Command{man}}}
** If you want to learn more about a command, check out its manpage.
** eg:  {{Command{man ls}}}

* Chat on the shell server with {{Command{talk}}}
** Log in and run {{Command{talk //username//}}} to start a chat session with that user
** Use the {{Command{w}}} command to see currently logged in users and check their idle times

!!! Using Blackboard
* The discussion boards make up 10% of your total course grade this semester.  
* Blackboard is great example of a monopolistic business producing a sub-par product due to lack of market competition
* We're going to need to work around some of their deficiencies
* Please see [[Using Blackboard]] for more details

!!! Working efficiently
* View your previously executed commands with the {{Command{history}}} command
* Tab completion - Press the tab key to autocomplete commands or file paths
* Up / Down arrows - search up and down through your command history
* Page Up / Page Down - Use these keys to search through your command history for the last commands which begin with a given string
* [[Linux Shortcuts]]


!! Submitting homework assignments

See the [[Lab Assignments]] page for details 
! Material 
!! Read:
* Chapter 1 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]


! Operating system basics 

!! Major components
*Kernel - Main control program of the computer:  process control and resource management.
*File system - Organize location of items on the system.  Everything is a file.  
*Shell - Main interaction between the user and the system.  The shell receives and interprets commands entered by users, passes them on to the kernel to execute.

!! Secondary components:
Not part of the core OS, but necessary to do useful things with the system
* Basic Utilities - Many are from the GNU project 
** System - Tools an administrator would use: mount, dd, fsck
** Userland - Tools regular users would use:  file system tools (cd, ls, mkdir), text editors (vi, pico), filters (grep, cut, sed), process tools (ps, kill) 
* Development environment - compilers
* System Documentation - man pages, docs, etc.
* Larger Applications - Graphical interface, word processor, image viewer, etc
* Specialized utilities (Like the tools that come with a distro like Kali)

!! UNIX is an OS that supports:
* Multi-tasking - foreground and background processes
* ~Multi-User - Multiple users may access the system at the same time.
** Privilege separation - administrators (root) and regular users, with regular users able to be isolated from eachother.
* Time sharing - Share computing resources among many users
* Portability - Can be run on different types of hardware systems/architectures (~PCs, servers, game systems, phones, embedded, etc)

!! Types of Unix

* Started at Bell Labs
** 1969 Bell labs - AT&T Unics (Uniplexed Information & Computing Service)
** Unics became UNIX when multiuser support was added
** 1958 antitrust agreement with the government,  AT&T could not go into the computer business and charge for software.  They had to give licenses to anyone who asked.
** Sources distributed to researchers at universities allowing them to modify and extend.
** Unix editions 1 - 10 numbered from the edition of the printed manual.  Mostly for research and development purposes.

* 1978 BSD UNIX (Berkeley Software Distribution)
** Grad students at Berkeley modified and extended the AT&T code
** Bundled and released their addons for use at other universities.
** Early development slowed due to licensing issues with AT&T
** Would eventually fork into three projects: ~FreeBSD (1993), ~OpenBSD (1993), & ~NetBSD (1995)

* GNU project & the origins of Linux:
** GNU Project (GNU's not Unix) - Richard Stallman (1983) - Wanted to create a totally free OS. They started with the utilities.
*** FSF : Free Software Foundation (1985)
*** At MIT, saw many MIT software developers get picked off by companies and sign restrictive non-disclosure agreements.  
*** Many companies were now restricting access to Unix source code to limit modification and redistribution, facilitate hardware lock-in and push towards expanded commercialization.
*** Software should be free to run, change, copy and modify so users are the ones in control, free from corporate control, and better software develops - GNU license
*** Brought a philosophy of freedom (freedom (speech), not price (beer) )
*** FSF kernel (GNU/Hurd) taking too long to develop, though all other components (the utilities) were complete.
** 1991 - Linux Kernel:
** Linus Torvalds, Finnish grad student, started working on a kernel for fun after getting impatient for a totally free kernel to work with due to all of the legal battles
*** Minix good for academics, but not allowed for professional use
*** Minix still required a fee and had a restrictive license
*** BSD still somewhat encumbered by AT&T license 
*** BSD legal problems stalling development
*** Linux only had a kernel, not a complete operating system.

* Linux distributions:
** GNU Project had utilities but no kernel.  Linus Torvalds had a kernel but no utilities.
** Linus provided the kernel (Linux kernel) to accompany FSF GNU utilities & components to make a Linux OS
** Different distributions (Fedora, Gentoo, Debian, Ubuntu, etc) combine the Linux kernel, FSF utilities, and other applications in different ways and focus on different types of users.
** Success due to freedom - many other programmers able to contribute code and ideas.

* Commercial: (AIX, HPUX)
** Several commercial Unix distributions exist and are only found in large enterprises.  These were more popular decades ago and have lost ground to Linux.

A good article about the history of Unix/Linux:  [[Did Linux Kill Commercial Unix|https://www.howtogeek.com/440147/did-linux-kill-commercial-unix/]]


! Interacting with the system

There are two ways to interact with a system:  through a graphical interface (GUI) or the command line (CLI).  Most of our work this semester will be conducted through the CLI.

!! Graphical User Interface (GUI)
* Desktop environment on top of OS; just another application
* Examples of GUI Desktop managers:
** Gnome
** KDE
** XFCE
* Much better for multitasking
* How to use it
**~Alt-F2 - Run a command
**~CTRL-ALT-Arrows - Change virtual desktops
**~CTRL-ALT-BKSP - Restart the window manager (if enabled)
** Navigating the menus - Much like what you're used to on either Windows or Mac
** Mouse: Highlight to copy, middle button to paste
*** This is the standard Unix/Linux way to copy/paste.  In putty, highlighting text copies it to the clipboard and clicking the right mouse button will paste to the terminal

!! Command Line Interface (CLI)
* Can be accessed from within the GUI, eg: the terminal program
* Or Console, which is accesses when you're sitting down at the keyboard and monitor on a system not running a graphical environment
* Virtual Console (~CTRL-ALT-F[2-9]).  Unix systems run many virtual consoles which can be accessed to run other tasks.
* Access remotely, such as via SSH.  Will be accessing our class shell server remotely to complete our work.

* Unix/Linux is primarily a Multi-user environment.  Many users can easily log in concurrently and work simultaneously.
** About accounts:
*** Home directory - Every user has a home directory where they can store files
*** User ID - The ID number assigned to your account.  It's these numbers which identify you as a user
*** Group ID - Users may belong to groups for shared resources.  Everyone in this class is a member of the ncs205 group and can access this class's resources
*** Who am I - List information about your user account:  {{Command{id}}}
*** Who are you - List information about other user's accounts:  {{Command{id //username//}}}
*** Who is connected:  {{Command{w}}} or {{Command{who}}}


!! The Shell

The shell is our command processor that provides:
* An interpreter - it reads and interprets commands from the user, 
** displays the shell prompt and waits for input (Case matters here!)
** user interface for entering and processing commands from the user
** works with the kernel to execute them
* Programming interface
** script interpreter for executing shell scripts
** a shell script is just a collection of commands

!! Different Shells

Different shells for different things:  bourne, bash, csh, tcsh, korn

[>img[img/shell.jpg]] 
The Shell is what users interact with on the command line.  It receives and interprets commands. 

[[Two main families|img/shells.jpg]] - bourne and ~C-Shell

Thompson shell, original unix shell, ends with AT&T 6th edition and replaced by the modern branches:
* Bourne Shell ({{Command{sh}}}) 
**written to replace limited abilities of original shell
**Oldest and most primitive
**Korn shell ({{Command{korn}}}) - Closed shell from Bell Labs
***Built to be a vast improvement over the bourne shell
***Adopted in future editions of AT&T Unix (8-10th editions)
***Became popular with commercial users as a higher end, more powerful shell, especially as a programming language
**Bash ({{Command{bash}}}) - FSF - ''B''ourne ''a''gain ''sh''ell
***Extends bourne shell while being free to distribute 
***Free software, community supported, part of the GNU toolset.
*~C-Shell ({{Command{csh}}}) - Created by Bill Joy for the Berkeley Software Distribution (BSD) unix.
** Based on C Programming language.  scripting commands are based on C statements
** BSD License, couldn't distribute freely
** TCSH ({{Command{tcsh}}}) -
*** Enhancement of the C-shell while being free from licenses
*** In public domain for academic users

Which to use:
Split into three camps:  {{Command{bash}}} for Linux, {{Command{tcsh}}} in BSD branch, and {{Command{korn}}} for commercial distributions (IBM AIX and HP-UX)
What we'll be using this semester:
Interactive use: bash, since we're doing everything in Linux
Shell scripting:  bourne for portability/compatibility or bash for extended features.

We can see available shells on a system with:  {{Command{cat  /etc/shells}}}

The shell is just a regular program, so anyone can design their own shell.  You can also execute it by its command name to run a different shell.

!! Working with the shell

* Commands are entered at the shell prompt
*Syntax: {{Monospaced{''command_name'' [options] [arguments]}}} (whitespace delimited)
**Command - what action to take
**Options - modify how the action is applied / how the command does its job
**Arguments - Provide additional info to the command (object identifiers, file names, etc)
** Some options can have arguments (option arguments) to provide additional information
*** For example, {{Command{mkdir -m 755 ~/open/}}} to create the directory named {{File{open}}} within your home directory with different starting permissions.  Here, the {{Monospaced{755}}} is an argument to the {{Monospaced{-m}}} option.
** The components in a command string must always be separated by some form of whitespace
*** The command string {{Command{ls -l /tmp/}}} is correct where all three options are properly separated by whitespace.  Whereas the command {{Command{ls-l/tmp/}}} is an invalid command that does not exist on the system.

** Example commands:  {{Command{ls}}}, {{Command{date}}}, {{Command{cal}}}, {{Command{who}}}
* default options  /  arguments 
** {{Command{date}}} - by default, show the current date and time.  Different options can be specified to alter the format the date is displayed in.
** {{Command{cal}}} - by default, show a calendar for the current month.  {{Command{cal 2022}}} will display the entire year.
** {{Command{cd}}} - by default, change directory to the user's home directory.  Specifying an argument will change to that directory instead.
* combining options
** several options can be combined together, for example: {{Command{ ls -lrt }}} to display the contents of the current directory in long listing format, sorted by modification date, with the most recently accessed files at the bottom.
* - vs &#45;-word options (eg: the {{Command{file}}} command)
* Autocompletion - Enter the first few characters of a file or command and the shell will complete the rest.
** Tab key & ~CTRL-D
* command history
** The keyboard up/down arrows can be used to cycle through previous commands
** type first 2 letters and hit page-up to return to the last command which begain with those characters
* Canceling a command with ~CTRL-C
* grouping commands with ;
** example: {{Command{date ; cal}}}

!! Shell customization files
* bourne shell family:
**.profile - commands used to customize the shell
*C-shell family
**.cshrc - Shell environment (run on both interactive and non-interactive shell)
**.login - Run on login, contains commands and variables
**.logout - Script that runs on logout


! Finding information / UNIX Documentation
[>img[https://imgs.xkcd.com/comics/rtfm.png][https://xkcd.com/293/]]
*Usage: {{Command{man [section] //command//}}} - the unix manual / man pages
**Navigation:
***moving up and down with space, f and b
***search down with /, up with ?, n for next match
***g to top of page, G to bottom of page
***q to quit 
***h for help

* Man page chapters:
**Synopsis - Overview of the command, listing options and requirements.  Optional items are contained within [brackets]
**Description - Description of actions performed by the command and detail information about all of the supported options
**Examples - Examples of common usage	
**See Also - Other man pages to read for related information
**Check manpages for look, chmod, cut

*{{Command{man -k //keyword//}}} - search the unix manual

*Manual sections:
**1 - Commands
**2 - System Calls
**3 - Library Functions
**4 - Special Files
**5 - File Formats
**6 - Games
**7 - Misc Info
**8 - System administration

{{Note{''Note:'' When working with Unix documentation, items in ''[''brackets'']'' are optional.}}}

! Deliverables

!! Read Chapter 1 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
- Complete [[Lab 1|labs/lab1.pdf]]

''Warning:'' Do not complete these lab assignments ~PDFs within your web browser.  Download the files and open them in [[Acrobat Reader|https://get.adobe.com/reader/]] or a similar PDF document reader.  Web browsers to not save form input appropriately and your responses will likely be lost.

{{Warning{It's wise to preview the completed PDF document in Acrobat Reader to verify your responses before uploading to the class shell server.}}}

Be sure to read the the instructions for submitting assignments and information about the labs in the [[Lab Assignments]] page.

! Material
!! Read: 
* Chapter 2 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]


! UNIX Files

Navigating the filesystem is at the core of working with the unix command line.  Explore our shell server using the {{Command{cd}}}, {{Command{ls}}}, and {{Command{pwd}}} commands.  Files for our class and your labs can be found within the {{File{/opt/pub/ncs205/}}} directory.  Use the material introduced in this chapter to explore the filesystem on the shell server, especially the directories within {{File{/opt/pub/ncs205/}}}.

Everything is a file
* Defined: A file is anything that can be a source or destination of data.
* Ordinary files, directories, links, devices, etc.

!!File types:  (ls will show the type)
* Ordinary files (regular files) - hold information
** Text file (ASCII files)
** Binary file (data & programs)
** {{Command{file}}} command - use to obtain info on file type
** {{Command{strings}}} command - use to extract ascii strings from a binary file
* Directory files - collections of files and other directories, how files are organized on the system.
** Standard conventions:  A file written with a trailing slash (eg: {{File{/opt/pub/ncs205/}}} refers to a directory.
**Root directory - Highest level of the file system =  {{File{/}}}
**Navigate with the {{Command{cd}}} command
***default argument will change to home directory
*** {{Command{cd -}}}  will change to the last directory you were in
**Display file metadata (eg, permissions and date information) on a directory with  {{Command{ls -ld}}}
**Present working directory =  {{File{.}}}
**Parent directory = {{File{..}}}  
*** See note below.
**Home directory  ({{File{~}}} or $HOME)
*** Either use it alone to refer to your home directory, eg: {{Command{ls ~}}}
*** Or with a username to refer to another user's home directory, eg: {{Command{ls ~//username//}}}
**Relative & absolute path names
***Absolute - start from root
***Relative - start from current location
**Working directory
*** The //current working directory// is the directory you are currently located in at the shell prompt.  
*** The command {{Command{pwd}}} (print working directory) will display the full path of your current working directory to the screen.
**Obtain disk usage of a directory with the  {{Command{du}}}  command
**Tour the unix filesystem
*Symbolic links  (soft link)
*Device Files
**Character devices - device that reads or writes data one character at a time
**Block device - device that reads or writes data one block at a time
*FIFO (aka named pipe) - Used for interprocess communication
*Socket - Used for network communication

{{Warning{''Note:'' The special directory {{File{..}}} refers to the ''parent directory'', not //previous directory//.  The word previous is ambiguous and could mean the last directory you were in.  The last directory you were in could be anywhere on the filesystem.  Referring to the special directory    {{File{..}}}  as //previous directory// will be considered incorrect. }}}

!! File and directory names
* Can be any sequence of ASCII characters up to 255 characters
* Start file names with an alphabetic character
* Try to avoid spaces.  Instead use dividers to separate parts of the name, such as _ - .
* Use an extension that describes the file type
** homebackup_december.tgz
* Files beginning with a dot are hidden
** Typically configuration files, like {{File{.login}}} or {{File{.cshrc}}}
* Avoid special characters (metacharacters) and spaces, such as:    & * \ | [ ] { } < > ( ) # ? ' " / ; ^ ! ~ % `
** All of these symbols mean something special to the shell.  Keep track of what they mean.  [[Shell Metacharacter Table|handouts/ShellMetacharacterTable.pdf]]
* Escape special characters with a \
** vi  commands\&examples.txt
* Use quotes for files with spaces
**vi "Long File Name.txt"

!! Basic file manipulation

!!! Listing files - {{Command{ls}}}
The {{Command{ls}}} command will list the contents of a directory.  Extra options can be used to alter the default behavior of the {{Command{ls}}} command:
* {{Command{-a}}} - This option will include hidden files in the output
* {{Command{-l}}} - This option will display the output in //long listing// format.  Additional information about the files will be displayed.



! Deliverables
!! Read Chapter 2 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
- Complete [[Lab 2|labs/lab2.pdf]]
! Material

!! Read Linux Administration - A Beginner's Guide:
* Chapter 12 until page 265 (tcpdump) - Good Networking background info
* Chapter 13 - Network Configuration

!! Watch
* Good networking background info, but not required: https://www.youtube.com/watch?v=fHgk7aDGn_4

! Notes

For the remainder of the semester we will transition from being users of a Unix system to administrators.  A virtual lab environment is available where everyone will be assigned a small collection of Unix servers to configure and manage.  

{{Note{
!!! Getting ahead of future problems:
# ''About 50% of the problems we will encounter will be due to typos''
** Leave little to chance and use copy/paste for long or complex commands or configurations
** Pay attention to what you are typing.  Some characters, like a 1 and an l, look alike.  Be sure you know which you're dealing with.
** Use your VM's console only to configure networking and bring it online.  After that, do everything through SSH
# ''About 25% of the problems later in the semester will be due to rushing through earlier material instead of taking the time to retain it''
** Everything we do this semester will look back on previous work.  If you're rushing through and not retaining it, you will surely pay for it later.
# ''About 20% of the problems will be due to not following the directions''
** Go slow and pay attention!  Each of the steps matter.  If you skip over important steps or ignore errors, don't expect things to work.
# ''< 5% of the problems will be due to genuine system errors''
** Having a keen eye for detail, paying attention to the directions, and taking the time to practice and retain the material will make for a much smoother semester.
}}}

!! Linux Basics

There are many different Linux distributions available.  The distro to choose is a combination of the system's purpose and personal preference.  

Examples of different Linux distributions are:
* Server - ~CentOS, ~ClearOS
* Desktop - Fedora, Mint 
* Dual (both desktop & server editions) - Ubuntu, Debian
* Build from source / minimal - Gentoo
* Special Purpose - Kali, Clonezilla, ~GParted

Obtaining a Linux distro
* Directly from the distro's website
* [[DistroWatch|http://distrowatch.com/]] - A site which tracks all available Linux distributions

Installation options
* Single OS
* Dual boot (eg: Dual boot between Windows and Linux)
* Virtualization (eg: ~VirtualBox)
* Live USB (Kali is a great option for this)


For our class, everyone will be assigned Linux virtual machines.  These class ~VMs have already had the OS installed from a common template.  We'll be using ~CentOS minimal, the same distribution used for our class shell server.  This is a bare-bones installation by default.  All other software will need to be installed.  This allows for a slim and nicely tuned system which only contains the components required for its function.

!! Bringing our class ~VMs Online

Direct your web browser to https://lab.ncs205.net/ to work with your VM
* Log in with the same credentials you used for the class shell server.
* Select your VM from the list on the left
* Click the Start button in the top right corner if your VM is currently powered down.
** The monitor icon next to your VM name should change from black to white when it is powered on
* Once your VM begins to boot, select the drop-down next to Console and pick xterm.js
** Once the console windows opens, you may need to press Enter to get to the login prompt.

!!! Set root password
Log in with the username ''root'' and set a password with the {{Command{passwd}}} command.  Do not forget this root password.
 - The user ''root'' is the standard administrative account.  This special user account has full access to manage the system and all files on it.
 - There is currently no root password set.  You should be able to log in without being prompted for one.
 - Without setting a root password you will not be able to log in remotely via SSH.


!! Basic networking

Our virtual lab environment is behind a single IP address.  The internal portion is utilizing a private IP space, the subnet 192.168.12.0/24.  This setup is much like a home network where your home systems all share a single public IP address and are behind a router.  This router protects the internal systems since none of them are directly accessible from the internet.  Since your ~VMs are all behind a router, you cannot SSH into any of them directly.  You'll first need to SSH into the class shell server and from there you can SSH into your VM.


!! Set IP addresses

Everyone has a block of 8 ~IPs to work with.  We have five things to configure to bring them fully online:  IP address, netmask, gateway, DNS, and host name

The table below contains the fourth octet of your ''starting'' IP address. Use this to assign to your first VM.  The first three octets are 192.168.12.

| !Start IP | !Username |
| 24 | merantn |
| 32 | backers |
| 40 | caplanc |
| 48 | cormiej |
| 56 | esquitc |
| 64 | fitzgea2 |
| 72 | khanss |
| 80 | khandat |
| 88 | leonevj |
| 96 | lionht |
| 104 | marvinc |
| 120 | penas |
| 128 | peralas |
| 136 | reynolz |
| 144 | stevendc |
| 152 | subedib |
| 160 | tajs |
| 168 | talasih |
| 176 | tankoud |
| 184 | terronl |
| 192 | tut |
| 200 | sheard |
/% awk -v ip=32 '{print "| " ip " | " $3 " |"; ip+=8}' user2009.txt %/
* This is a ~Class-C subnet with a /24 CIDR mask.  Your netmask will be 255.255.255.0.
* The default gateway for these systems, the next hop towards the internet, is 192.168.12.1
* Our DNS resolver is at 192.168.12.10

!!! Manually apply static IP address immediately:

There are two ways to manually apply an IP address.  The old way with the old utilities and the new way most newer distributions are utilizing.  Our systems must be configured with the new way; they do not come with the old tools by default.  It is generally easier to bring your systems online manually using the virtual console and then SSH into them to complete the configuration.  You can also install nano if you're not yet comfortable with vi once the networking is configured.

!!!! The old way:
* Now requires the ''net-tools'' package on systemd-enabled installations
** The {{Command{ ifconfig }}} and {{Command{ route }}} commands are no longer installed by default
* Access a root prompt
* Set the ip address
** {{Command{ifconfig eth0 inet 192.168.12.''x'' netmask 255.255.255.0}}}
** Test it:  {{Command{ping 192.168.12.1}}}
** But we can't yet leave our local network:  {{Command{ping 1.1.1.1}}}
* Set the default route
** {{Command{route add default gw 192.168.12.1}}}
** Test it:  {{Command{ping 1.1.1.1}}}
* Set the system host name:
** {{Command{hostname test.//username//.ncs205.net}}}
** Be sure to replace ''//username//'' with your actual username in the above command.  Do the same wherever you see //username// in italics.
* Test by reinvoking the shell by executing {{Command{bash}}}


!!!! The new systemd way:
* Log in and access a root prompt
* Ensure the interface is up:
** {{Command{ ip link set eth0 up }}}
* Set the ip address
** {{Command{ ip addr add 192.168.12.''x''/24 dev eth0 }}}
** Test it:  {{Command{ping 192.168.12.1}}}
** But we can't yet leave our local network:  {{Command{ping 1.1.1.1}}}  (This should fail)
* Set the default route
** {{Command{ ip route add default via 192.168.12.1 }}}
** Test it:  {{Command{ping 1.1.1.1}}}  (This should now work)


!!!! Configure DNS & host name

* Configure DNS:
** DNS is not yet configured so DNS resolution cannot yet take place.  Attempts to ping a system by its host name should fail.
{{{
[root@localhost ~]# ping google.com
ping: unknown host google.com
}}}
** Add the following line to {{File{/etc/resolv.conf}}} to specify the DNS server to use for mappings between hostname and IP address.  
*** ''nameserver 192.168.12.10''
** Test it:  {{Command{ ping www.google.com }}} 


* Execute this command to set the system host name immediately:
** {{Command{hostname test.//username//.ncs205.net}}}
** Don't forget to replace ''//username//'' with your actual username
* Verify with {{Command{ hostnamectl }}}
* Test by reinvoking the shell:  {{Command{ bash }}}
* The file {{File{/etc/hostname}}} should contain the system hostname to set on boot


!!!! Test connectivity by accessing your VM via the network

* Open putty or your SSH client and connect to our class shell server:  ''lab.ncs205.net''
* From the class shell server, connect to your VM via SSH:  {{Command{ssh 192.168.12.''x'' -l root}}}
** Use the root password you just set

{{Warning{''Warning:''  The {{Monospaced{''-l''}}} above in the {{Command{ssh}}} command string is a dash followed by a lowercase letter {{Monospaced{''l''}}}, not the number {{Monospaced{''1''}}}.  Be sure you can spot the difference between these two characters.}}}

{{Note{''Note:'' You'll likely see a similar warning the first time you SSH into a server:
<<<
{{Monospaced{
[nick@shell ~]$ ''ssh 192.168.12.24''
The authenticity of host '192.168.12.24 (192.168.12.24)' can't be established.
ECDSA key fingerprint is ~SHA256:fnSqW1mfXsFRg/i9XMqQ4/l3FoEYKX1BteuR7FgDXlc.
ECDSA key fingerprint is ~MD5:7f:07:b8:32:2c:c8:94:af:c2:66:8d:6e:3c:b6:08:2d.
Are you sure you want to continue connecting (yes/no)? ''yes''
Warning: Permanently added '192.168.12.24' (ECDSA) to the list of known hosts.}}}
<<<
This warning means the destination server is unknown and untrusted because the key isn't recognized.  The SSH client is giving you an opportunity to validate the remote host to ensure it is the correct system.  This error is expected if its the first time you're connecting.  But if it appears spontaneously, some paranoia may be warranted and you should verify the key fingerprint over another channel.  If it was an imposter, that impostor would be able to intercept your credentials and other communication with the remote host.

You'll need to answer {{Monospaced{''yes''}}} to the question in order to proceed.  The host fingerprint will then be saved to your system within {{File{~/.ssh/known_hosts}}} so you're not prompted again in the future.
}}}

!!! Modify networking configuration files:

The {{Command{ ip }}} commands we just used will cause these changes to take effect only for the current boot instance of the system.  These settings will be lost once the system reboots.  We need to edit the appropriate configuration files so these settings will be applied on system startup.

!!!! Edit the file {{File{ /etc/sysconfig/network-scripts/ifcfg-eth0 }}}
These configuration options apply to the interface

# Ensure the ''HWADDR'' line is commented out (if it exists).
# Change the ''BOOTPROTO'' option from ''dhcp'' to ''static''
# Change the ''ONBOOT'' option from ''no'' to ''yes''
# Add the following lines:
<<<
IPADDR=192.168.12.''x''
NETMASK=255.255.255.0
GATEWAY=192.168.12.1
~DNS1=192.168.12.10
<<<
Replace ''x'' with the IP address assigned to your VM


!!!! Edit the file {{File{ /etc/hostname }}}
# Change the current contents to:  ''test.//username//.ncs205.net''
# The name ''test'' is only for this first VM.  A different host name will be used for future ~VMs.
# Don't forget to change ''//username//'' to your actual username


!!!! Add a line to {{File{ /etc/hosts }}} which resembles the following:

{{{
192.168.12.24         test.merantn.ncs205.net test
}}}
Replace the last octet of the above IP address with yours and replace my username with yours.


!!! Switch back to console and test


!!!! Restart networking services
This command will restart networking services on your system, activating the new settings to ensure they were correct.

{{Command{systemctl restart network}}}


!!!! Check your settings
Verify your configuration with the {{Command{ip addr}}} and {{Command{ip route}}} commands.  

The output of {{Command{ip addr}}} should resemble:
{{{
[root@test ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether ea:d3:f4:ee:bd:33 brd ff:ff:ff:ff:ff:ff
    inet 192.168.12.24/24 brd 192.168.12.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::e8d3:f4ff:feee:bd33/64 scope link
       valid_lft forever preferred_lft forever
}}}
The last octet in the IP address above is unique to each system.  .24 is used here, your value must be different.

The output of {{Command{ip route}}} must minimally resemble the following lines.  Additional lines may be present.  
{{{
[root@test ~]# ip route
default via 192.168.12.1 dev eth0
192.168.12.0/24 dev eth0  proto kernel  scope link  src 192.168.12.24  metric 100
}}}

Hostname verification should resemble:

{{{
[root@localhost ~]# hostname
test.merantn.ncs205.net
}}}


!!!! Verify network connectivity
You should now be able to ping the default gateway for our test network by its IP addresses and google by its hostname.
{{{
[root@localhost ~]# ping 192.168.12.1
PING 192.168.12.1 56(84) bytes of data.
64 bytes from 192.168.12.1: icmp_seq=1 ttl=64 time=1.45 ms
64 bytes from 192.168.12.1: icmp_seq=2 ttl=64 time=1.23 ms
64 bytes from 192.168.12.1: icmp_seq=3 ttl=64 time=1.23 ms
^C
--- 192.168.12.1 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2399ms
rtt min/avg/max/mdev = 1.235/1.308/1.454/0.110 ms

[root@test ~]# ping www.google.com
PING www.google.com (172.217.3.100): 56 data bytes
64 bytes from 172.217.3.100: icmp_seq=0 ttl=57 time=2.147 ms
64 bytes from 172.217.3.100: icmp_seq=1 ttl=57 time=1.434 ms
64 bytes from 172.217.3.100: icmp_seq=2 ttl=57 time=1.266 ms
^C
--- www.google.com ping statistics ---
3 packets transmitted, 3 packets received, 0.0% packet loss
round-trip min/avg/max/stddev = 1.266/1.616/2.147/0.382 ms
}}}


!!!! Remote connections with SSH

The configuration of our virtual lab network will not allow direct outside connections to virtual machines.  You must first connect to the lab SSH jumphost.  

In this example I'm connecting to the class shell server from my home Unix system and then my test VM.  You can also use putty or a similar tool to first connect to the class shell server.

{{{
nick@trillian:~>ssh lab.ncs205.net -l merantn
Last login: Sat Mar 14 13:38:20 2020 from hoot
[merantn@shell ~]$


[merantn@shell ~]$ssh 192.168.12.24 -l root
Warning: Permanently added '192.168.12.24' (RSA) to the list of known hosts.
root@192.168.12.24's password: 
Last login: Sat Mar 21 23:35:38 2020
[root@localhost ~]#
}}}

{{Note{''Note:''   The {{Monospaced{''-l''}}} above in the {{Command{ssh}}} command string is a dash followed by a lowercase letter {{Monospaced{''l''}}}, not the number {{Monospaced{''1''}}}.}}}


! Our class infrastructure

None of this will be tested, but setting up our course infrastructure is related to the material we're covering so describing the setup seems prudent.  Here's how it all works under the hood.

!! Original class shell server

Our original class shell server was a VM running at [[DigitalOcean|https://www.digitalocean.com/]].  They are an easy to use yet low-cost cloud VM provider and a great choice if you need to stand up a short-term VM for a project.  The old VM cost $5 per month to run.  Most of my personal infrastructure is here.

After creating an account and logging into the dashboard, you're given an option to create ~VMs (DO calls them Droplets) with several operating systems to choose from.  Pick one and your new droplet will be available for use in a few minutes.


!! New class shell server and student ~VMs

~DigitalOcean won't scale very well for the second half of our course where each student will be provided 5 ~VMs. We'll also need a few for infrastructure.  For this half, I rented an entire server from a [[Hetzner auction|https://www.hetzner.com/sb]] for about 50&euro; per month.  [[Hetzner|https://www.hetzner.com/]] is a German cloud provider and their auction is the cheapest bare-metal option I've found.  The cheapest auction server is at 22&euro; per month, but we'll need more resources than the cheaper ones have.

The server I picked has an 8-core AMD Ryzen 7 1700X CPU with 64gb of RAM and two 512gb datacenter SSD drives.  I expect we'll be running about 125 ~VMs total, so we'll need a lot of RAM.  SSD drives are significantly faster than spindle, so they should speed things up for us.  With so many ~VMs, we may end up stressing that 8-core CPU though - that's the resource I'm most worried about.

They have an out of band rescue environment that you can SSH into for recovering a server or installing a new OS.  Once they turned the server over to me, I connected to their rescue environment and instructed it to install Debian Linux (version 11) on the server.  It rebooted after the installation was complete and I was able to SSH into our new class server.

Next came the hypervisor so everyone can have their own ~VMs.  I use [[Proxmox|https://www.proxmox.com/en/]] for everything.  This can be installed either directly from its own ISO or [[on top of an existing Debian Linux installation|https://pve.proxmox.com/wiki/Install_Proxmox_VE_on_Debian_11_Bullseye]].  

I only have a single IP address for this new server, so the Proxmox host is also functioning as a router to handle traffic for an internal subnet that will be used for our class ~VMs.  One of the ~VMs will be used as our new class shell server.

After the new class shell server came online, I installed the packages we'll need for our material, copied everyone's home directories & /opt/pub/ncs205/, and copied everyone's user accounts.  Unless I forgot something, you shouldn't be able to tell the difference between the old and new shell servers.  At the end of next week, after we have some time to settle in to our new home, I'll power down the old class shell server at ~DigitalOcean, create a snapshot of it, and destroy the droplet.  Next semester I'll create a new droplet from this snapshot for the next class's shell server.

On the new Hetzer server, everything is a VM.  This makes it easy to back everything up at the end of the semester and cancel the server to reduce costs.  I can just copy those VM backups somewhere else and transfer them back to a new Hetzner server for the second half of the semester for the next class.  This way I only need to rent a ~DigitalOcean VM for the first half of our course and a Hetzner server for the second half.  I expect I'll need to transfer about 15gb from the Hetzner server in Germany to back up the ~VMs.  I can easily download them to my home file server, but uploading them next semester on a slow Spectrum connection will be painful.  Instead, we're using the B2 cloud storage at [[BackBlaze|https://www.backblaze.com/b2/cloud-storage.html]] to store the VM & server backups.  Their pricing is $0.005 per month per GB, so the VM image backups will only cost me about 10 cents per month.


! Assignment

<<tiddler [[Lab 51 - Bring test and www online]]>>

//''Note:''// Virtual machine work begins at lab 51
* Labs 41 - 50 were skipped
! Material

!! Read:
* Linux Administration Chapter 7 - Booting and Shutting Down
** Note: Booting into single-user mode is good to know but the book's instructions are incomplete. Here's the new way for Redhat systems (which includes ~CentOS) - https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/

* Linux Administration Chapter 9 - Core System Services
** Skip over //xinetd// and //logging// (pages 181 to 195 and continue with journald on page 195.


! Notes

!! General system startup

* The Boot process
** BIOS (Basic Input/Output System)
*** For motherboard and certain devices
** MBR (First block of the disk)
** Boot loader - Chooses the OS/Kernel and bootstraps the operating system
*** Grub - Grand Unified Boot loader - Standard Linux boot loader
*** Check out grub configs in /boot/grub
*** Use it to boot multiple kernels (such as after a kernel update) or multiple ~OSes
** kernel - /boot/vmlinuz* - loaded into memory and begins to execute
*** Press ESC to see boot messages while the system starts
*** device detection: probe system buses, inventory hardware, and load device driver modules
*** create kernel processes (those in brackets)
*** system becomes available for user processes once the kernel is loaded
** Initialization daemon - First user process, parent of all processes running on the system
*** init - old ~SystemV ~OSes
*** systemd - New method
*** executed by the kernel and responsible for starting other processes
**startup scripts - start system services
* Config files in {{File{/etc/}}}
** Most are single files for the service or resource
*** {{File{fstab}}} : tab = table - filesystem table
*** {{File{resolv.conf}}}
*** {{File{sysconfig}}} directory - extra system configuration files
** Some are multiple files
*** cron is a good example
*** {{File{crontab}}} - traditional cron config table
*** {{File{cron.d}}} - directory containing individual config files
*** {{File{ cron.{daily,monthly,weekly} }}}


!! Systemd
* A new standard init system
* Backward compatible with ~SystemV init
* Can start services in parallel, reducing system start times
* Everything is broken down into units.  
** Two primary unit types to be concerned with
*** service units - Manage a single service
*** target units - manage groups of services
*** {{Command{ systemctl list-units | grep service }}}
*** {{Command{ systemctl list-units | grep target }}}
* Service and target configuration files are stored in {{File{ /{etc,lib}/systemd/system }}}
** Use the {{File{/etc/systemd/system}}} path for custom configs or to override existing
** Stock configs are in {{File{ /lib/systemd/system }}}
** View a list with current state: {{Command{ systemctl list-unit-files &#45;-type=service }}}

Everything is managed by symlinks:
* runlevel.? targets are symlinked to their systemd equiv

<<<
[root@www system]# pwd
/lib/systemd/system
[root@www system]# ll runlevel*
lrwxrwxrwx. 1 root root 15 Oct 21 17:02 runlevel0.target -> poweroff.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel1.target -> rescue.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel2.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel3.target -> multi-user.target
lrwxrwxrwx. 1 root root 17 Oct 21 17:02 runlevel4.target -> multi-user.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 runlevel5.target -> graphical.target
lrwxrwxrwx. 1 root root 13 Oct 21 17:02 runlevel6.target -> reboot.target
<<<
* default.target symlinked to the desired default runlevel target
<<<
[root@www system]# ll default.target
lrwxrwxrwx. 1 root root 16 Oct 21 17:02 default.target -> graphical.target
<<<

| !~SysVinit Runlevel | !Systemd Target | !Description |
| 0 |runlevel0.target, poweroff.target|Halt the system|
| 1, s |runlevel1.target, rescue.target|Single user mode|
| 2, 4 |runlevel2.target, runlevel4.target, multi-user.target|User-defined/Site-specific runlevels. By default, identical to 3|
| 3 |runlevel3.target, multi-user.target|Multi-user, non-graphical. Users can usually login via multiple consoles or via the network|
| 5 |runlevel5.target, graphical.target|Multi-user, graphical. Usually has all the services of runlevel 3 plus a graphical login|
| 6 |runlevel6.target, reboot.target|Reboot|
| emergency |emergency.target|Emergency shell|


!!! Examining service configuration files

cat /lib/systemd/system/sshd.service
{{{
[Unit]
Description=OpenSSH server daemon
After=network.target sshd-keygen.service
Wants=sshd-keygen.service

[Service]
EnvironmentFile=/etc/sysconfig/sshd
ExecStart=/usr/sbin/sshd -D $OPTIONS
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
RestartSec=42s

[Install]
WantedBy=multi-user.target
}}}
 - After: What this service depends on
 - Wants:  Additional units tied to this service
 - ~EnvironmentFile - Location to store environment variables or options to startup / shutdown commands
 - ~WantedBy: Runlevel target this service is associated with

Display services wanted by a runlevel target: {{Command{ systemctl show &#45;-property "Wants" multi-user.target }}}
Display services required by a runlevel target: {{Command{ systemctl show &#45;-property "Requires" multi-user.target }}}
Display services that want a particular child service: {{Command{ systemctl show &#45;-property "~WantedBy" sshd-keygen.service }}}

!!! Starting and Stopping

Example commands to start, stop, restart, and check the status of a service immeditately:

* Start: {{Command{systemctl start firewalld.service}}}
* Stop: {{Command{systemctl stop firewalld.service}}}
* Check Status: {{Command{systemctl status firewalld.service}}}
* Restart {{Command{systemctl restart firewalld.service}}}
** Can also be used to reload configuration

Conditional restart - only restart if its already running:  {{Command{ systemctl condrestart firewalld.service }}}

Reload a service to re-read configuration files:  {{Command{ systemctl reload sshd.service }}}

Persistent services - Those to start on system boot:
Newly installed services will not be configured automatically to start on system boot.  You will have to start them manually and set to them start on boot.
- Enable a service to start on boot, eg: {{Command{systemctl enable firewalld.service}}}
- Stop a service from starting on boot, eg: {{Command{systemctl disable firewalld.service}}}


!!! Checking status

{{Command{systemctl list-unit-files &#45;-type=service}}}
{{Command{systemctl status firewalld.service}}}


!!! Adding a new service

For example, adding a new service for Apache.  This is only necessary if you installed a service from source code instead if via package management.  If you install software from a package, that package will come with the necessary files for systemd to manage the service.  This is a good reference to see the internals in case something custom needs to be added or modified.

{{File{/etc/systemd/system/httpd.service}}} :
{{{
[Unit]
Description=Apache Webserver
After=network.target

[Service]
Type=forking
EnvironmentFile=/etc/sysconfig/httpd
ExecStart=/opt/work/apache/bin/httpd -k start $OPTIONS
ExecStop=/opt/work/apache/bin/httpd -k graceful-stop $OPTIONS
ExecReload=/opt/work/apache/bin/httpd -k graceful $OPTIONS

Restart=always

[Install]
WantedBy=multi-user.target
}}}
 - {{Command{man systemd.service}}} for more details.

* Create environment file:  {{Command{ touch /etc/sysconfig/httpd }}}
* Refresh service and target configuration files:  {{Command{ systemctl daemon-reload }}}
* Enable startup on boot:  {{Command{systemctl enable httpd.service}}}
** Symlink was created in multi-user.target.wants:  {{Command{ ll /etc/systemd/system/multi-user.target.wants/ }}}
* Start now: {{Command{systemctl start httpd.service}}}
** Review recent logs associated with the service:  {{Command{ journalctl -u httpd.service }}}


!! Single user mode
Single user mode is method to access systems which cannot fully boot.  The boot process is changed to disable most system startup steps and services so it can be accessed and recovered from the failure.

* How to access systems if problems occur during boot
** Boot from a live CD or recovery mode
** Single user mode
*** Change the grub timeout to 20 seconds so you'll have more time to catch it.  ~VMs often introduce delays accessing the console, thus it can be difficult catching the grub loader if it has a short timeout.
**** Edit the file {{File{/etc/default/grub}}} and add the line {{Monospaced{''~GRUB_TIMEOUT=20''}}} to the bottom of the file
**** Execute {{Command{grub2-mkconfig -o /boot/grub2/grub.cfg}}} to activate the changes
*** See https://www.tecmint.com/boot-into-single-user-mode-in-centos-7/ for instructions to boot in single user mode


Other useful commands:  
* {{Command{shutdown}}} - shutdown / power off the system with many options for doing so
* {{Command{halt}}} & {{Command{poweroff}}}
* {{Command{reboot}}}


! Assignment

Play around with accessing single user mode.  It's a handy thing to know how to do.  The second half of this week is mostly background info that we'll need later.

Be sure you're comfortable using the {{Command{systemctl}}} command to start, stop, and restart services.
! Material

!! Lab & VM notebook:
* Start keeping good notes of what you are doing with your ~VMs.
** The software installed today should be included.
** These notes will come in handy later when you need to repeat these steps on future ~VMs

!! Read - Linux Administration - A Beginner's Guide 
* Chapter 5 - Managing Software
* Our systems are running ~CentOS and will be using the {{Command{rpm}}} & {{Command{yum}}} package management commands.
* It's good to be familiar with the {{Command{rpm}}} command, but we'll mostly be using {{Command{yum}}}.
* We won't be using the DNF package manager.  It hasn't caught on yet like the book was suggesting it would.


! Notes

{{Note{As technology users, we should know by now how to submit usable problem reports.

If you have a problem, please send a report I can work with. I need details of the problem, what you tried, steps you took to diagnose it, documentation you reviewed, screenshots, logs, etc. If you send me something vague like "//X command doesn't work//" with no supporting details, there may not be much I can do for you and I will wait for you to follow up your message with meaningful information. 

The level of assistance I provide will be proportionate to your effort to troubleshoot and supply details. If you do nothing to troubleshoot and send me little information to work with, you should then expect that much effort put into a response.
}}}

!! Expanding our systems

!!! The yum package manager

Package management is one of the customized components of a linux distribution and differs between unix operating systems and linux distributions

The core components of a Linux distribution are:
* Linux kernel
* Base utilities (typically GNU tools)
** Many g* utilities are from the GNU project (eg: gcc)
** Stallman's GNU (GNU's not Unix) project, early 80's.  Wanted to create a totally free OS. Started with the utilities.
** Came from the Free Software Foundation and [[a philosophy of freedom|http://audio-video.gnu.org/video/TEDxGE2014_Stallman05_LQ.webm]] (freedom (speech), not price (beer) ).
** Software should be free to run, change, copy and modify so users are the ones in control, free from corporate control so better software develops - GNU license
** Differ somewhat from ~FreeBSD tools (sed is a good example)
* Package manager.

Extra (optional) components:
* Specialized utilities (Like the tools that come with a distro like Kali)
* X server / Window manager

Each distribution combines these components in different ways depending on their focus and goals.

Redhat based systems (including ~CentOS) use the RPM package format and rpm package system with the yum package management utility.

Other package management systems exist for other distros
 - apt  (Debian & Ubuntu)
 - portage  (Gentoo)
 - ports  (~FreeBSD)
 - ~DistroWatch [[Package management cheat sheet|http://distrowatch.com/dwres.php?resource=package-management]]

{{Command{rpm}}} - very basic utility
* It will mainly just install, update, or remove packages
* You will need to acquire the .rpm package file yourself or have a direct URL for it
** A .rpm file is a collection of pre-compiled binaries, configuration files, and support files for an application compiled for the target architecture.
* Conflicts and dependencies will need to be sorted out manually

{{Command{yum}}} - high level utility for package management
* will interact with repositories (collections of .rpm files) to obtain packages
* takes care of any conflicts
* will install necessary dependencies
* records what is installed and any changes made to the system to facilitate updates, package removal, or audit.

Different package repositories (repos)
* Repository configuration files are stored in {{File{/etc/yum.repos.d/}}}
* A repository is the central distribution point for our linux packages
* Typically, each distro has its own repository on the internet for base packages
* The repository creator determines which applications it contains
* Repos are mirrored for accessibility and speed.
* Other repositories offer additional packages

EPEL (Extra Packages for Enterprise Linux)
Distributed by the Fedora Project to offer a repository of add-on packages
See: https://fedoraproject.org/wiki/EPEL

Don't run this until we need the EPEL repo 
{{Command{yum install epel-release}}}

Other specialized repositories may exist:
 - HP repo for their utilities (raid utils)


!!! Yum package manager commands:

{{Command{yum}}}
<<<
The primary command for the yum package manager.  Run this by itself to see all sub-commands
<<<

{{Command{yum repolist}}}
<<<
Display the configured repositories
<<<

{{Command{yum check-update}}}
<<<
Check the repositories for any available updates and display the results, without applying an updates.
<<<

{{Command{yum update}}}
<<<
Check the repositories for any available updates.  After reviewing the results, the user will be prompted to apply them.
''A {{Command{reboot}}} will be required if a kernel update is included in the list.  Otherwise, only updated services may need to be restarted for the updates to take effect.''
<<<

!!!! yum cleanup:
{{Command{yum clean packages}}}
<<<
Remove cached packages after install is completed.
<<<

{{Command{yum clean metadata}}}
<<<
Remove the XML metadata cache
<<<

{{Command{yum clean dbcache}}}
<<<
Clean the yum ~SQLite database
<<<

{{Command{yum clean all}}}
<<<
Remove all cached yum content
<<<

{{Command{yum makecache}}}
<<<
Download and rebuild the repo metadata cache
<<<


{{Command{yum update //package_name//}}}
<<<
Update a single package
<<<

{{Command{yum provides "*/ssh"}}}
<<<
See which package provides the file named ssh. 
<<<

{{Command{yum info //package_name//}}}
<<<
Display information on the specified package
<<<

{{Command{yum install //package_name//}}}
<<<
Install a package from yum
<<<

{{Command{yum search //string//}}}
<<<
Search the repository for packages matching //string//
<<<

{{Command{yum deplist package_name}}}

{{Command{yum list installed}}}

{{Command{yum remove //package_name//}}}


!!!! Fixing damaged configuration files
It's a common occurrence that a configuration file is accidentally damaged during the course of completing these labs and a service will not load as a result.  This sequence of commands will demonstrate comparing the configuration file on the system to the default which was installed as part of the package.  This comparison should help identify such configuration errors.

* Show the package which installed a particular file:  {{Command{rpm -qf /etc/named.conf}}}
* Display changes made since the original file was installed:  {{Command{rpm -V bind}}}

{{{
[root@core ~]# rpm -qf /etc/named.conf
bind-9.11.4-26.P2.el7_9.4.x86_64

[root@core ~]# rpm -V bind-9.11.4-26.P2.el7_9.4.x86_64
S.5....T.  c /etc/named.conf
}}}

The following table explains the letters in the above output:
| !Code | !Description |
| S |file Size differs|
| M |Mode differs (includes permissions and file type)|
| 5 |~MD5 sum differs|
| D |Device major/minor number mismatch|
| L |readLink(2) path mismatch|
| U |User ownership differs|
| G |Group ownership differs|
| T |mTime differs|
| P |caPabilities differ|

* Rename the original configuration file:  {{Command{mv /etc/named.conf /tmp}}}
* Reinstall the package:  {{Command{yum reinstall bind}}}
** A configuration file will only be reinstalled from the package if it is missing from the expected location.
* Compare the default configuration file to the renamed copy:  {{Command{diff /etc/named.conf /tmp/named.conf}}}
** Lines beginning with &lt; are the version in the file listed as argument one
** Lines beginning with &gt; are the version in the file listen as argument two

{{{
[root@core ~]# diff /etc/named.conf /tmp/named.conf
13c13
<       listen-on port 53 { 127.0.0.1; };
---
>       listen-on port 53 { any; };
21c21,24
<       allow-query     { localhost; };
---
>       allow-query     { any; };
}}}

There's no error here; these changes are expected.  This only demonstrates the process.  But it should be helpful for identify a damaged or missing line.

Once that damaged line is identified, either merge it into your backup in /tmp/ or repeat your modifications to the new clean copy.


!!!! Additional yum commands:

* Yum package groups:
** {{Command{yum grouplist}}}
** {{Command{yum groupinfo //group_name//}}}
** {{Command{yum groupinstall //group_name//}}}


* Yum plugins
** Extend the functionality of the yum package manager
** See available plugins with {{Command{yum search yum-plugin}}}

yum-plugin-security - Check currently installed software for security updates.  Requires a subscription.

{{Command{yum &#045;-security check-update}}}
{{Command{yum &#045;-security update}}}

{{Command{yum updateinfo list available}}}
{{Command{yum updateinfo list security all}}}

https://access.redhat.com/solutions/10021

yum-utils - Extra utilities for working with yum


We can install most required software using packages with yum:
* Keep a record of what is installed as we go.
* Get started with: 
** Install on all systems: man wget nc telnet bind-utils openssh-clients rsync bzip2


We can also install software directly from source archives
* Source archives are typically distributed as compressed tarballs
* Latest versions of software are not always available via package
* Building from source allows for additional customizations
* and a higher level of control
* Multiple versions of a program can easily be maintained on the same system by installing to different locations
** But you must keep them up to date (patched) and sort out any dependencies manually.

For this class, we'll only be installing software from packages via yum.

!! Install web server software

The following tasks must now be completed to bring your web server online.  Refer to the notes above and in last weeks pages to identify the proper commands to achieve these goals.

Complete these tasks on your web server VM:
# Install the following packages:  httpd httpd-tools php telnet
# Set the ''httpd'' service to start on system startup
# Start the ''httpd'' service now

!! Verify the service with the {{Command{telnet}}} & {{Command{curl}}} commands

The {{Command{telnet}}}, {{Command{curl}}}, and {{Command{nc}}} commands are excellent tools for verifying that you're able to communicate with a host or a service.  These are great for troubleshooting and everyone should know how to use all three.

Here I'm using telnet to connect to my web server on localhost.  Run the telnet command to make a TCP connection and then begin speaking HTTP to the server.  The HTTP command {{Command{GET /}}} will return the website.  A lot of HTML will be returned, so I only have the first couple lines in the sample output below.

{{{
[root@www ~]# telnet localhost 80
Trying ::1...
telnet: connect to address ::1: Connection refused
Trying 127.0.0.1...
Connected to localhost.
Escape character is '^]'.
GET /
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
(truncated)
}}}


The {{Command{curl}}} command is another great tool for verifying TCP services and is generally available on every unix system.  We'll add the {{Monospaced{-v}}} flag here for additional verbosity that's helpful for troubleshooting.  Again, I'm truncating the output.

{{{
[root@www ~]# curl -v http://localhost/
* About to connect() to localhost port 80 (#0)
*   Trying ::1...
* Connected to localhost (::1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: localhost
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Tue, 30 Mar 2021 19:44:15 GMT
< Server: Apache/2.4.6 (CentOS) PHP/7.3.27
< Last-Modified: Tue, 30 Mar 2021 10:17:46 GMT
< ETag: "56-5bebe4f44343d"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
<
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
(truncated)

* Connection #0 to host localhost left intact
}}}

Once a successful connection has been made, view the apache log files to verify the connection.
 - Apache logs are located in {{File{/var/log/httpd/}}}

A log entry for a successful connection will resemble the following.  Note the ''200'' HTTP status code:
{{{
192.168.12.10 - - [30/Mar/2022:18:45:42 -0400] "GET / HTTP/1.1" 200 86 "-" "curl/7.29.0"
}}}


Now try to connect to your web server from your test VM using {{Command{telnet}}} or {{Command{curl}}}.  If you use telnet, don't forget to send the {{Command{GET /}}} command.

{{{
[root@test ~]# telnet 192.168.12.25 80
Trying 192.168.12.25...
Connected to 192.168.12.25.
Escape character is '^]'.
GET /
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"><html><head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
                <title>Apache HTTP Server Test Page powered by CentOS</title>
                <meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
( truncated )
}}}


Your web server is now online.  We'll work with it further in the 2nd half of this week's material.


! Additional Material

[[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] - Chapter 14 (Package Management)


!! Useful commands:
* {{Command{wget}}} - great tool for downloading files from a web or FTP server
* {{Command{tar}}} - Standard linux archive tool.  Files are usually distributed or stored as tarballs (the Linux equivalent of Zip).  This tool will create or extract them
* {{Command{telnet}}} - Useful tool for testing TCP ports
* {{Command{curl}}} - Useful tool for testing TCP ports or downloading content from the web
* {{Command{apachectl}}} - Tool to manage an Apache server.  Good to know if exists, but we likely won't be using it.


!! References

Yum quick reference:  http://yum.baseurl.org/wiki/YumCommands.html


! Assignment

<<tiddler [[Lab 52 - VM updates & software installation]]>>
<<tiddler [[Lab 53 - Web Server]]>>
! Material

!! Reading

Read Linux Administration - A Beginner's Guide Chapter 19 (//Apache Web Server//)

* Keep in mind our ~DocumentRoot directory should now be {{File{/opt/work/htdocs}}}


! Notes

At this point the web servers should be online and serving a basic web site from our new ~DocumentRoot directory.  We'll now use that directory to set up a more sophisticated website.

!! Web Services 

There are many different web server options - 
* Apache - One of the most common web server software packages
** LAMP stack (Linux, Apache, mySQL, PHP)
* nginx - Lighter weight for higher performance, speed, and reduced memory footprint.  Another very popular option.
* python - {{Command{python -m ~SimpleHTTPServer [port]}}}
**  This is a very useful way to stand up fast and simple web servers anywhere.  It's handy for quick data exfiltration. 
* IIS - Microsoft's web server package for Windows

[[Netcraft Web Server Survey|https://news.netcraft.com/archives/2022/02/28/february-2022-web-server-survey.html]]
* Web server market share and stats over the last 10 years
* Apache used to be the most popular but has been steadily losing steam over the last few years with nginx gaining ground.


Default web site files:
* Apache {{File{htdocs}}} directory - ''h''yper''t''ext ''docs'' - The root of our web site.  These are the files our web server will provide
* {{File{index.html}}}, {{File{index.php}}} or whatever we define via the ~DirectoryIndex configuration option in httpd.conf
** This is the default page to provide if only a directory is given (eg, http://www.ncs205.net/)
* Or display directory listing if no file specified and the ~DirectoryIndex directive is enabled

Process ownership
* {{Command{ps aux | grep httpd}}} - Apache runs as an unprivileged user
* Any scripts executed will run as this user
* This protects the system from malicious or vulnerable scripts
** If a script is compromised, the attacker will only be able to access what that unprivileged user can access
** This kind of privilege separation and isolation are important security concepts to follow

Headers
* Extra information sent by the web server describing the connection and server data
* Header information provides useful troubleshooting and security metadata
* They're often hidden by your web browser, but you can see them in the developer tools or the command line with {{Command{curl}}}
** Use the {{Monospaced{ ''-v'' }}} {{Command{curl}}} option to see the headers ({{Monospaced{ ''-v'' }}} usually means verbose output for most commands)
{{{
[root@www ~]# curl -v -s http://localhost
* About to connect() to localhost port 80 (#0)
*   Trying ::1...
* Connected to localhost (::1) port 80 (#0)
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: localhost
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Mon, 28 Mar 2022 02:57:50 GMT
< Server: Apache/2.4.6 (CentOS) PHP/5.4.16
< Last-Modified: Thu, 24 Mar 2022 03:46:42 GMT
< ETag: "56-5daeeb1c16b20"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
<
<HTML>
<BODY>
<BR><BR><BR>
<center><B>Welcome to NCS205!</B></center>
</BODY>
</HTML>
* Connection #0 to host localhost left intact
}}}

~VirtualHosts
* A header value can be set containing the host name used to access the web server
* The server software will examine this header value to determine which site to display
* This allows for multiple web sites per server, depending on the host name used to access the server
** IP based virtual hosts - A single server and apache instance will be accessible by multiple IP addresses with each IP address linked to a different web site
** Name based virtual hosts - Multiple host names resolve to the same IP address.  Examine the hostname in the HTTP headers to determine which site to serve.


!!! Apache Modules
* Addons to Apache - Modules provide an extensible framework for additional functionality
** Static - Compiled in to Apache.  Apache must be recompiled to add support for new modules or to update them
** DSO - Dynamic Shared Objects - Compiled separately and loaded when apache starts
** apxs (Apache Extension tool) - A perl script to assist with building and installing apache DSO modules
** PHP was added as an Apache DSO module
* Check available modules
** Those compiled in: {{Command{apachectl -l}}}
** Available as DSO: {{Command{ls -l /etc/httpd/modules/}}}
* Add new modules with apxs (The manual way of doing things.  We'll use the automated packages for our labs)
** {{Command{apxs -c mod_foo.c}}}
** {{Command{apxs -ian foo mod_foo.la}}}
** Must then add a ~LoadModule directive to the apache config file
* Module examples:
** php - A robust web scripting language
** mod_rewrite - Provides a rule-based rewriting engine to rewrite requested URLs on the fly
** mod_security - Provides intrusion detection and prevention for web applications
** mod_limits - Limit the number of connections per host
** mod_headers - Customization of HTTP request and response headers
** Authentication modules - Different method for authenticating web users
** https://httpd.apache.org/docs/current/mod/ - More available modules


!!! Apache configuration

!!!! Server level
* Main configuration file {{File{conf/httpd.conf}}}
** Configuration extras - {{File{conf.d/*.conf}}} & {{File{conf.modules.d/*.conf}}}
** A quick way to make common additional functionality available
** ie: SSL support, virtual hosts, user web sites

!!!! User level
{{File{.htaccess}}} files - Modify permitted configuration values per web directory


!!!! Module configuration
* A separate configuration file to tune php.
* By default {{File{ /etc/php.ini }}}

Set the following values in your php.ini file:
{{{
session.save_path = /tmp/php
log_errors = on
error_log = /var/log/httpd/php.log

date.timezone = "America/New_York"
disable_functions = system, exec, shell_exec
}}}

Be sure to create the directory {{File{/tmp/php}}} and make it owned by the apache user:
* {{Command{chown apache /tmp/php}}}


!!! Scripting
* A means to develop applications to generate dynamic web content 
* php - A standard server side scripting language for web development
** Change your {{File{ index.html }}} (located in {{File{ /opt/work/htdocs/}}}) to {{File{ index.php }}} and add the {{Monospaced{phpinfo();}}} php function to it
{{{
# cat /opt/work/htdocs/index.php
<HTML>
<BODY>
<CENTER>Welcome to NCS205!</CENTER>
<?php
// The line above instructs the php module to start processing php scripting
phpinfo();		// This function will display information and configuration for our php installation
// The line below instructs the php module to stop processing php scripting.
?>
</BODY>
</HTML>
}}}

Now {{Command{curl http://localhost/index.php}}} will execute the {{Monospaced{phpinfo()}}} function and return a dump of the server configuration.


* scripts typically run as the apache process (for modules) 

Keep web applications up to date!
* Security vulnerabilities are constantly discovered in web applications
* These vulnerabilities become attack vectors against the hosting server


!!! Extras

Basic HTTP is stateless: 
* Client makes a request, server responds, connection closed.
* cookies and session files can be used to maintain state between connections
** Cookies are files stored on your system to retain session information
** Authentication information may be stored in these cookies
** Leaking cookies is just as bad as leaking credentials

Content Distribution Networks (CDN)
* Globally or nationally distribute static content close to the end user
** Static content is cached to reduce load on the primary web server
** Serving content from a closer datacenters improves speed
* Examples:
** Cloudflare (Free/low cost tiers for experimenting and getting familiar with these concepts)
** Akamai


!!! Disable ~SELinux

~SELinux are ''s''ecurity ''e''xtensions designed to protect Linux systems. ~SELinux will prevent the ~LocalSettings.php file from being read. The website will say it cannot read the file even though the file permissions are correct. ~SELinux must be disabled if it is not already.
- Otherwise we need to properly configure it, which is out of scope for this class.

To disable ~SELinux (it may already be done):
* Disable ~SELinux now:  {{Command{setenforce 0}}}
* To disable ~SELinux on boot, edit the file {{File{/etc/selinux/config}}} and change ''enforcing'' to ''disabled'' on the line that is not commented out.
* Check the status of ~SELinux with the command:  {{Command{getenforce}}}.  It should return the output ''disabled''.

!! Install and configure ~MediaWiki

!!! 1. Install the ~MediaWiki web software package

Download the package from the [[MediaWiki web site|http://www.mediawiki.org/wiki/MediaWiki]] with {{Command{wget}}}
* Look for the downloads section and find the latest version in .tar.gz format.  
* Current file name is {{File{mediawiki-1.38.4.tar.gz}}} as of writing this lab.  The latest version and file name may have incremented since then.
Save it to the directory {{File{/opt/work/htdocs/}}}
Extract the tarball with a command resembling:  {{Command{tar -xf mediawiki-1.38.4.tar.gz}}}
Rename the base directory from ''mediawiki-//version//'' to ''wiki''


!!! 2. Install the ~MariaDB database package

~MediaWiki will need a database to install its dynamic content.  We'll now need to install the ~MariaDB database package and php extension for connecting to the database.

# Install these three packages on your web server:  {{Monospaced{''mariadb-server mariadb php-mysql''}}}
# Set the mariadb service to start on boot
# Start the mariadb service now

!!!!  ''a.'' Verify the services are running
Check the status of the mariadb service with the command {{Command{systemctl status mariadb}}}.  The service ''must'' be enabled and active before proceeding.

[img[img/db-status.png]]


!!!!  b. Create wiki user

Now that the database service is installed, we need to create a user and database for the wiki.  

The mysql command can be used to connect to the database service as root, the database superuser.  There is currently no root password set.  Just press enter if prompted for one.  Once you are connected to the database, enter the remaining commands at the database prompt.  You may supply whatever password you'd like for //wiki_pass//, just be sure to remember what you use.

Execute the following commands:
{{Commands{
[root@www ~]# ''mysql -u root''

Welcome to the ~MariaDB monitor.  Commands end with ; or \g.
Your ~MariaDB connection id is 2
Server version: 5.5.68-~MariaDB ~MariaDB Server

Copyright (c) 2000, 2018, Oracle, ~MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

~MariaDB [(none)]> ''CREATE USER 'wiki'@'localhost' IDENTIFIED BY 'wiki_pass';''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''CREATE database wiki;''
Query OK, 1 row affected (0.00 sec)

~MariaDB [(none)]> ''GRANT select, insert, update, delete, create, alter, index ON wiki.* TO 'wiki'@'localhost';''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''flush privileges;''
Query OK, 0 rows affected (0.00 sec)

~MariaDB [(none)]> ''exit''
Bye
}}}

{{Note{''Note:'' If you goof the wiki user's password, it can be reset by logging into the database as the root user and running:  {{Monospaced{''SET PASSWORD FOR 'wiki'@'localhost' = PASSWORD('new_password');''}}}.  Replace //wiki_pass// with whatever you want the password to be.  Then run the {{Monospaced{''flush privileges''}}} database command.}}}


Now test your connection to the database by logging into it with the new wiki user:

{{Commands{
[root@www ~]# ''mysql -u wiki -p wiki''
Enter password:

Welcome to the ~MariaDB monitor.  Commands end with ; or \g.
Your ~MariaDB connection id is 3
Server version: 5.5.68-~MariaDB ~MariaDB Server

Copyright (c) 2000, 2018, Oracle, ~MariaDB Corporation Ab and others.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

~MariaDB [wiki]> ''exit''
Bye
[root@www ~]#
}}}

!!! 3. Quick Verification: 

Our server software is installed; let's use {{Command{curl}}} to verify everything is working so far:

{{{
[root@www htdocs]#  curl http://localhost/wiki/
<!DOCTYPE html>
<html lang="en" dir="ltr">
        <head>
                <meta charset="UTF-8" />
                <title>MediaWiki 1.38.4</title>
                <style media="screen">
                        body {
                                color: #000;
                                background-color: #fff;
                                font-family: sans-serif;
                                padding: 2em;
                                text-align: center;
                        }
                        p, img, h1, h2, ul {
                                text-align: left;
                                margin: 0.5em 0 1em;
                        }
                        h1 {
                                font-size: 120%;
                        }
                        h2 {
                                font-size: 110%;
                        }
                </style>
        </head>
        <body>
                <img src="/wiki/resources/assets/mediawiki.png" alt="The MediaWiki logo" />
                <h1>MediaWiki 1.38.4 internal error</h1>
                <p>
                        MediaWiki 1.38.4 requires PHP 7.3.19 or higher; you are using PHP 5.4.16.
                </p>
                <h2>Supported PHP versions</h2>
                                <p>
                        Please consider <a href="https://www.php.net/downloads.php">upgrading your copy of PHP</a>.
                        PHP versions less than v7.3.0 are no longer supported by the PHP Group and will not receive
                        security or bugfix updates.
                </p>
                <p>
                        If for some reason you are unable to upgrade your PHP version, you will need to
                        <a href="https://www.mediawiki.org/wiki/Download">download</a> an older version of
                        MediaWiki from our website. See our
                        <a href="https://www.mediawiki.org/wiki/Compatibility#PHP">compatibility page</a>
                        for details of which versions are compatible with prior versions of PHP.
                </p>
        </body>
</html>
}}}


If you look closely at the error above, you'll see that ~MediaWiki isn't happy about our version of php.  The downside of Enterprise versions of operating systems and software is they tend to favor old and stable versions instead of the latest versions of software packages.  Here, ~MediaWiki is complaining that the default version offered by the standard yum repository is too old.  We're going to need to update php and deviate from the standard software repositories to do so.

!!!  4. Run these commands to update to a newer version of php:

* Install the EPEL repository:  {{Command{yum install epel-release}}}
* Install the [[Remi Release|https://rpms.remirepo.net/]] repository for php 7: {{Command{yum install http://rpms.remirepo.net/enterprise/remi-release-7.rpm}}}
* Install the yum-utils package: {{Command{yum install yum-utils}}}
* Enable the php 7.4 yum repository from Remi: {{Command{yum-config-manager &#045;-enable remi-php74}}}
* Install the new php:  {{Command{yum install php}}}
* Verify the version of php:  {{Command{php -v}}}
** It should now be (roughly) version 7.4.32
* Also install the following packages.  These will be required by ~MediaWiki:  {{Monospaced{''php-mbstring php-xml php74-php-gd php-intl''}}}
** A lot of dependencies will be required.  Install them too.
* Restart apache to activate the new version of php and its new extensions:  {{Command{systemctl restart httpd}}}

Now when you run our connection test, you shouldn't get any version errors or warnings about missing modules:

{{{
[root@www ]# curl http://localhost/wiki/
<!DOCTYPE html>
<html lang="en" dir="ltr">
        <head>
                <meta charset="UTF-8" />
                <title>MediaWiki 1.38.4</title>
                <style media="screen">
                        body {
                                color: #000;
                                background-color: #fff;
                                font-family: sans-serif;
                                text-align: center;
                        }

                        h1 {
                                font-size: 150%;
                        }
                </style>
        </head>
        <body>
                <img src="/wiki/resources/assets/mediawiki.png" alt="The MediaWiki logo" />

                <h1>MediaWiki 1.38.4</h1>
                <div class="errorbox">
                        <p>LocalSettings.php not found.</p>
                                <p>Please <a href="/wiki/mw-config/index.php">set up the wiki</a> first.</p>
                </div>
        </body>
</html>
}}}

We should be ready to configure the wiki software.



!! Bypassing network restrictions with proxies

A proxy is a middle man, passing on network requests to their destination on your behalf.  Our web server ~VMs are behind the lab infrastructure's router and cannot be accessed outside of that LAN.  We'll need to use a proxy in order to view the wiki sites in our browsers at home.

See the [[Tunnels & Proxies with SSH]] page for more information on how to set up a SOCKS proxy with SSH to access protected resources.


!!! 5. Configure your ~MediaWiki

With a SSH proxy in place, you should be able to complete the configuration of your wiki.  After establishing the tunnel, browse to http://your_www_ip_address/wiki/ to reach the configuration page.  It will look something like this.

[img[img/MediaWiki.png]]


As you are stepping through the configuration page, be sure to use these values:
* database host:  {{Monospaced{'' localhost ''}}}
* database name: {{Monospaced{'' wiki ''}}}
* database username: {{Monospaced{'' wiki ''}}}
* database password: ''// whatever password you used above //''

Once the Wiki setup is complete, you will be prompted to download the {{File{~LocalSettings.php}}} file to your home computer.  This file must then be uploaded to the {{File{/opt/work/htdocs/wiki/}}} directory on your web server.  You will be able to fully access your wiki after this file is uploaded.  
 - Our class ~VMs are on [[RFC 1918|https://datatracker.ietf.org/doc/html/rfc1918]] IP addresses. You cannot connect directly to your web server VM from home to upload the {{File{~LocalSettings.php}}} file.  It must be first uploaded to the class shell server.
 - The [[Virtual Machines]] page (linked on the top menu bar) has a diagram of our lab infrastructure which may be helpful.

The {{Command{scp}}} or {{Command{sftp}}} tools may be helpful for transferring files on the command line between ~VMs.


! Assignment

!! Web Server:
<<tiddler [[Lab 54 - Set up MediaWiki]]]>>
! Material
!! Watch
NTP Tutorial: https://www.youtube.com/watch?v=eh5ZL_fNi0g

!! Read
* Linux Administration - The Logging Daemon / rsyslog / journald
** 7th Ed: Pages 230-240 in Chapter 8
** 8th Ed: Pages 187-197 in Chapter 9


! Time & Logging

Time and logging go together.  It's incredibly useful to know //when// something happened if you need to investigate a problem or security incident, especially if you need to correlate events among systems to build an accurate timeline.  If time is not properly synchronized among your systems, it's difficult to properly understand sequences of events.  You may also run into functional issues if time is wildly incorrect.  For example, SSL certificates may be considered invalid if time is wrong on a system.

!! Time

What's the [[difference between accuracy and precision|https://www.thoughtco.com/difference-between-accuracy-and-precision-609328]]?

Importance of accurate time:
* file timestamps - when something was modified
* tracing events - Knowing when a breech occurred, when a change was made, or when someone logged in to a system
* security
** certificate validity - Certificates are only valid for a certain time range.  If a system's time is off, it may negatively impact secure communication

Importance of precise time:
* correlating activities between systems

Ideally we'll have both - accuracy and precision.  We want the correct time on all systems.  But the closer time is among systems in a network, the easier it will be to correlate events between them.


!!! Setting the system date
* {{Command{tzselect}}} - Select the time zone.  This is typically done for you on most modern installs
* The symbolic link {{File{/etc/localtime}}} will point to the timezone file to use
** Timezone definition files are typically stored within {{File{/usr/share/zoneinfo/}}}
* {{Command{date ~MMDDhhmm&#91;[CC]YY]}}} - set the system date and time manually
** It's usually not necessary to set the date/time and timezone in a VM.  ~VMs should obtain their time from the host.

!!! Network Time Protocol (NTP)
* NTP provides an automated way to keep time in sync and counter clock drift.
* A local server is configured to query a pool of many time servers and the best candidates will be used to keep the clock in sync
* They can maintain time to the millisecond 
* Clock strata - Distance from the reference clock
** Stratum 0 - The reference clock.  High precision, high accuracy clocks, such as atomic, GPS, or radio.
** Stratum 1 - Primary time servers.  Systems directly attached to and synchronized with stratum 0 clocks
** Stratum 2 - Secondary time servers.  Systems synchronized to stratum 1 time servers over the network.
** Stratum n+1 up to 15 - Time servers synchronized to a lower stratum clock
** Stratum 16 - An unsynchronized clock.

!!!! NTP Commands:
* {{Command{ntpdate}}} - Client utility.  A one-time immediate clock update.  Requires a time server to use as an argument.
** eg: {{Command{ntpdate 0.pool.ntp.org}}}
** Can be enabled to run on boot to force a time update on system startup.
* {{Command{ntpd}}} - Background service to maintain time synchronization
** Sets and maintains system time in sync with a central point
** Regularly polls one or more time servers for updates
** The ntpd service updates time slowly in small steps
** May use an internet-based source time server or a local one.
*** Generally, a large site will maintain an ntp server locally that other systems on the local network will synchronize against
*** Using a local service increases security and reduces strain on the public NTP servers
** An NTP service may be configured to provide time synchronization to client systems
** {{Command{ntpstat}}} - show the status of the current ntp service
** {{Command{ntpq}}} - query an ntp server
*** ntpq sub commands:
**** peers
**** associations
*** Tally codes for peers:
**** blank - Unreachable and discarded
**** - - Considered an outlier and discarded
**** + - Providing data and a candidate for use
**** * - The system peer and providing data

** {{Command{ntpdc}}} - control an ntp server

!!!!! {{Command{ntpq peers}}} command example for a fully synchronized NTP client:
[img[img/ntpq-peers.jpg]]
Left to right:
* Red - Tally code.  Here it is indicating an accepted NTP peer which is providing data
* Green - Remote NTP server.  Who we are obtaining our time from.
* Yellow - the time reference our source is using
* Blue - Stratum level of our time source 
* Orange - Connection type.  U means unicast
* Purple - Connection statistics 



{{Warning{''Warning'':  NTP is a very basic protocol that uses UDP port 123 for its communication.  NTP services will bind to that port and client tools will try to communicate over that port.  If the ntpd service is running and bound to the port to listen for connections, the port is then not available for the {{Command{ntpdate}}} client tool to use.  If you must run {{Command{ntpdate}}}, stop the ntpd service to free up the socket and then start it back up again after running {{Command{ntpdate}}}.}}}


UDP Reflection attacks against NTP
* Reflection attacks are a big problem.  DNS and NTP were popular targets
** A tiny network request by an attacker can be "reflected" to its intended target.  
** Generally the amount of network traffic generated by the reflection toward the target is significantly larger than the request
** A ~Denial-of-Service attack is launched requiring only minimal resources of the attacker
* http://blog.cloudflare.com/technical-details-behind-a-400gbps-ntp-amplification-ddos-attack/
* https://ics-cert.us-cert.gov/advisories/ICSA-14-051-04
* monlist command
** This is what what abused in the last NTP reflection attack.  A small NTP request would return a very large response.  
** The request IP address would be spoofed so the response is sent to the victim
* ntpdc -n -c monlist core.merantn.ncs205.net
* To protect against this attack: 
** disable monitor
** add the noquery restriction to the configuration


!! Logging

Unix logging is an excellent resource and can quickly solve a lot of problems for you.

Here's a great example.  I have a typo in my Apache configuration file and the service will not restart.  The log entry details exactly what the problem is and where the problem originates:

{{{
[root@www conf]# systemctl restart httpd
Job for httpd.service failed because the control process exited with error code. See "systemctl status httpd.service" and "journalctl -xe" for details.

[root@www conf]# systemctl status httpd
 httpd.service - The Apache HTTP Server
   Loaded: loaded (/usr/lib/systemd/system/httpd.service; enabled; vendor preset: disabled)
   Active: failed (Result: exit-code) since Wed 2020-04-08 23:50:48 EDT; 4s ago
     Docs: man:httpd(8)
           man:apachectl(8)
  Process: 2170 ExecStop=/bin/kill -WINCH ${MAINPID} (code=exited, status=1/FAILURE)
  Process: 2168 ExecStart=/usr/sbin/httpd $OPTIONS -DFOREGROUND (code=exited, status=1/FAILURE)
 Main PID: 2168 (code=exited, status=1/FAILURE)

Apr 08 23:50:48 www systemd[1]: Starting The Apache HTTP Server...
Apr 08 23:50:48 www httpd[2168]: AH00526: Syntax error on line 1 of /etc/httpd/conf/httpd.conf:
Apr 08 23:50:48 www httpd[2168]: Invalid command 'my', perhaps misspelled or defined by a module not included in the server configuration
Apr 08 23:50:48 www systemd[1]: httpd.service: main process exited, code=exited, status=1/FAILURE
Apr 08 23:50:48 www kill[2170]: kill: cannot find process ""
Apr 08 23:50:48 www systemd[1]: httpd.service: control process exited, code=exited status=1
Apr 08 23:50:48 www systemd[1]: Failed to start The Apache HTTP Server.
Apr 08 23:50:48 www systemd[1]: Unit httpd.service entered failed state.
Apr 08 23:50:48 www systemd[1]: httpd.service failed.
}}}

Notice the lines above:
''Apr 08 23:50:48 www httpd[2168]: ~AH00526: Syntax error on line 1 of /etc/httpd/conf/httpd.conf:''
''Apr 08 23:50:48 www httpd[2168]: Invalid command 'my', perhaps misspelled or defined by a module not included in the server configuration''

If I examine line 1 of my configuration file as the log suggests, I'll spot my problem - text that doesn't conform to the required syntax of the file.

{{{
[root@www conf]# head -5 httpd.conf
my typo
# This is the main Apache HTTP server configuration file.  It contains the
# configuration directives that give the server its instructions.
# See <URL:http://httpd.apache.org/docs/2.4/> for detailed information.
# In particular, see
}}}

Syslog:
* The syslog service is the primary recipient of system-level event log information
** syslog then determines what should be done with that log data based on configuration
*** save it locally, send it to another system for log aggregation, or discard it
** Allows for centralized log collection and management
* Some utilities/services log directly to their own files and some use syslog
** Apache is an example of a service that saves log data to its own files
* syslog events are written to the domain socket /dev/log 
** sockets provide inter-process communication via the filesystem
** Processes either communicate via open network ports or these socket files
* log events contain the timestamp, type, severity, and details
* Most log files are plain text, allowing review or parsing with standard unix CLI tools, such as the filters we've been working with

* syslog events consist of pre-defined facility and severity levels
** facility is generally the service that generated the message (auth, cron, ftp, mail) and based on standardized names
*** local0-7 facilities are for customized destinations
*** or the keyword none to disable a particular facility or severity
** severity ranges from emergency to debug
*** When specified, that severity level and greater will be processed
** See the /var/log/messages example in /etc/rsyslog.conf
** Here's a list:

[img[img/syslogFacility.jpg]]
[img[img/syslogSeverity.jpg]]


Most services can elevate verbosity for debugging, recording additional information to assist with troubleshooting.
 - This should only be enabled for a short time.  The extra log entries may consume a lot of space over a long period of time.

!!! syslog components:
* syslogd - the logging service which receives and processes the log information
** {{File{/etc/rsyslog.conf}}} - The main configuration file
** {{File{/etc/rsyslog.d/}}} - The secondary configuration files
* library routines to submit log messages to syslogd
* {{Command{logger}}} - userland utility for recording log events from the shell.  Handy for scripting.
** Monitor or debug your automated scripts
** Backups and account processing are good examples
** {{Command{logger -t $0 -p local5.warning "test message"}}} - Send a test message to syslog from within a shell script with the local5 facility and warning severity
* logrotate / newsyslog - rotate logs at a configured time or file size
** It's important to rotate logs instead of letting them accumulate indefinitely.  Eventually they will consume the filesystem and will likely cause system failure.
** It's wise to account for this when designing a system and put logs on a separate filesystem.
** Retention issues - How long do we keep logs for?
** compress or delete old logs according to an archival schedule
** Logrotate - a tool which periodically runs to rotate log files
*** {{File{/etc/logrotate.conf}}} - Main configuration file
*** {{File{/etc/logrotate.d/}}} - Secondary configuration files
*** Periodically executed by cron to process the log files
**** Take a look at the file {{File{/etc/cron.daily/logrotate}}}

Standard exemptions to syslog:
* wtmp - binary file, view with last command
* lastlog - view with lastlog command
* psacct - process accounting service.  View with lastcomm command
** Not built-in.  Will need to install psacct package and enable psacct service
* Some services do not send to syslog and instead manage log files themselves:
** Apache
** BIND DNS server

syslog as a network service
* syslog is by default a local service and not bound to a network port
* But can be configured to collect log events from multiple hosts
* Many benefits to central logging:
** Aggregate logs in one place for central review and retention
** If an attacker breaks into a system, they cannot easily remove the logs to cover their tracks if the logs are also stored on another server

!!! Kernel boot logging and message buffer
* We need a way to record the kernel events prior to init, before syslog starts and the filesystems are mounted
* The kernel stores this boot information in an internal buffer
** Also contains the system hardware detected on boot and any subsequent hardware changes.
* Captured by the system and recorded once it fully loads
* Viewable with the {{Command{dmesg}}} command
* Also saved to {{File{/var/log/dmesg}}}


!!! Systemd Additions:

!!!! Manage time:
* {{Command{ timedatectl list-timezones }}}
* {{Command{ timedatectl set-timezone //zone// }}}
* {{Command{ timedatectl status }}}

!!!! Display Logs:

{{Command{ journald }}} - New logging daemon with systemd
* Can replace or augment syslog

{{Command{ journalctl }}} - Front end for displaying logs
* Logs since last reboot: {{Command{ journalctl -b }}}
* Show last 10 lines (replace tail): {{Command{  journalctl -n }}} or {{Command{ journalctl -n 20 }}}
* Display new log entries as they arrive (replace tail -f):  {{Command{ journalctl -f }}}
* Display kernel messages:  {{Command{ journalctl -k}}}
* Display log entries for a particular unit:  {{Command{ journalctl -u httpd.service }}}
** For a particular time range:  {{Command{ journalctl -u httpd.service &dash;&dash;since yesterday }}}
** {{Command{ journalctl -u httpd.service journalctl &dash;&dash;since "2015-10-01" &dash;&dash;until "2015-10-31 03:00" }}}


!!! We have all this log data, now what?

* Logcheck - A utility to mine the log data and send reports
* fail2ban - Scan log files and ban malicious ~IPs
** Perform a regular expression match and add offending ~IPs to a firewall
** Important way to combat all of the automated scanning on the internet.  Our class shell server is under constant attack and countermeasures like this should be deployed.

The class shell server is currently blocking 293 IP addresses which attacked us sometime in the last 9 hours:

{{{
[root@shell ~]# fail2ban-client status sshd-root
Status for the jail: sshd-root
|- Filter
|  |- Currently failed: 0
|  |- Total failed:     1514
|  `- Journal matches:  _SYSTEMD_UNIT=sshd.service + _COMM=sshd
`- Actions
   |- Currently banned: 293
   |- Total banned:     1441
}}}


* Web log data
** http://en.wikipedia.org/wiki/List_of_web_analytics_software
** [[GoAccess|http://goaccess.io/]]
** http://www.awstats.org/
** [[Logstalgia|https://code.google.com/p/logstalgia/]] 

Big data analytics: 
* ELK stack - Free & open source (FOSS)
** Elasticsearch - log searching and data analytics
** Logstash - centralized logging and parsing
** Kibana - data visualization
* Enterprise SIEM (Security information and event management) tools
** Splunk - Big data analytics with a nice web front-end
*** //Splunk captures, indexes and correlates real-time data in a searchable repository from which it can generate graphs, reports, alerts, dashboards and visualizations//


! Assignment

<<tiddler [[Lab 55 - Bring core VM online]]>>
----
<<tiddler [[Lab 56 - Time]]>>
----
<<tiddler [[Lab 57 - Logging]]>>
----
<<tiddler [[Lab 58 - Working with logs]]>>
! Material
!! Read: 
- Linux Administration Chapter 17 - DNS. 
- Read the notes in the Material section below when completing the lab assignment.
!! Watch:
* DNS Intro: https://www.youtube.com/watch?v=mpQZVYPuDGU


! Useful additional reading:

*[[DNS for Rocket Scientists|http://www.zytrax.com/books/dns/]]
*Open Resolvers for network amplification attacks (~DDoS)

 - kaminsky DNS bug
 - DNSSEC
 - get familiar with the dig command
 - http://www.mayhemiclabs.com/tools/malwarednsscraper
 - http://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
 - EXPOSURE: Finding Malicious Domains Using Passive DNS Analysis - http://www.syssec-project.eu/m/page-media/3/bilge-ndss11.pdf


http://en.wikipedia.org/wiki/Forward_Confirmed_reverse_DNS
http://www.ietf.org/rfc/rfc1912.txt
http://www.netwidget.net/books/apress/dns/info/ttl.html


! DNS Notes

Unfortunately we don't have enough time to cover DNS in depth.  What's here is only the highlights.  DNS is a core service that is critical for the proper operation of the internet.  Everyone should be familiar with its components and how it works.

!! Host names and some history

Numbers are easier for machines to work with.  But names are easier for people to work with.  As the number of systems on the early networks grew, it became more difficult to keep track of their addresses.  The file  {{File{/etc/hosts}}} was used to set up a manually mapping between IP addresses and hosts before the advent of DNS.  The users could now refer to systems by name and the  {{File{/etc/hosts}}} file would convert that name to an IP address.  Users/administrators would have to maintain their {{File{hosts}}} file themselves, and some would share these files with others.

As the networks kept growing, maintaining these static files and their frequent changes became more difficult.  They would often fall out of date as systems were added, removed, and addresses were changed.  A centrally accessible yet locally maintained system needed to be devised to handle this conversion of host names to IP addresses.  This system became the Domain Name System (DNS).

The {{File{/etc/hosts}}} still exists on all unix systems.  We worked with it during our last labs since we did not yet have DNS in place.  Windows systems also have a hosts file buried within their system32 directory.  By default, hosts defined in the hosts file will override information that comes from DNS.  This is an importing thing to keep in mind.  If someone tampers with your hosts file, they could easily redirect your communication elsewhere.  Check the output of the {{Command{w}}} command on the class shell server.  Where am I connecting from?  Now examine the {{File{/etc/hosts}}} file.

On Unix systems, the file {{File{/etc/nsswitch.conf}}} sets the query order for the databases on the system.  By default, the local files (eg, {{File{/etc/hosts}}}) are searched first and any matches in the local files will override DNS.  This mechanism can also be used to obtain user and group information from other sources.

!! DNS - Domain Name System


A distributed and hierarchical database
* must be robust.  Function of the internet relies on this
* must be distributed
** different groups are responsible for different segments of the data
** if one node goes down the rest of the global data network is not impacted
* must be efficient
** LOTS of requests per second
** caching
*** cached responses saved on local DNS servers for the TTL time (time to live)
*** change to a low TTL value before major updates so records aren't cached very long
*** otherwise, keep a high TTL value to maximize caching and optimize performance

!! Delegation

An example of a proper, fully qualified domain name:  www.ncs205.net''.''  (notice the dot at the end)
The dot at the end is often omitted, but it signifies the root servers.  The delegation of a host name begins on the right end and traverses towards the left.

* root servers  (.)
** These are the main DNS servers for the internet.  There are 13 root servers, a.root-servers.net. through m.root-servers.net.
* Top level domain (TLD):  ccTLD, gTLD, sTLD
** ccTLD:  Country code TLD.  eg:  us, de, cn
** gTLD: Generic TLD.  General purpose.  eg: net, org, com
*** New gTLDs:  Expanding further beyond the original net, com, org:  http://newgtlds.icann.org/en/program-status/delegated-strings
** sTLD: Sponsored TLD.  Has a sponsor representing a specific group.  eg:  .xxx, .travel, .jobs
** Policy set by the Internet Corporation for Assigned Names and Numbers ([[ICANN|https://www.icann.org/]])
** Lists maintained at the Internet Assigned Numbers Authority ([[IANA|http://www.iana.org/domains/root/db]])
* Domain ownership maintained by central registry operators.  These are the companies that are responsible for managing domains within their TLD.
** com & net: ~VeriSign
** org: Public Interest Registry
** edu: Educause
** us:  Neustar
** info: Afilias
** arpa: IANA
* Domain sale contracted out to commercial registrars
** Various registrars sell second-level domains.  eg:  Google, Godaddy, NameCheap, CloudNS, name.com, etc.
** The Authoritative name servers for a domain are stored with the TLD's central registry
** The {{Command{whois}}} command will display registration information for a domain:  {{Command{whois sunypoly.edu}}}
* sub-domains
** each domain can delegate authority for sub domains to other servers
** Allows a domain owner to create their own sub-domains
*** cs.sunypoly.edu : edu delegates sunypoly to ITS, ITS delegates cs to the ~DogNET
*** ncs205.net : net delegates ncs205 to me, I'll delegate //username//.ncs205.net to you
* Glue Records
** An IP address of a domain's name server held at the domain's registry
** Prevents circular dependencies
** Check whois information for a domain to see its registered name servers.
** The {{Monospaced{A}}} record results for name servers must be statically published upstream
*** The {{Monospaced{A}}} record is a type of DNS record.

Glue records are important, otherwise we have a chicken and egg problem.  For example, .net needs to publish the glue records for ncs205.net so anyone looking for its records knows which server to contact.

When I registered the domain ncs205.net, I needed to include the names of the domain's DNS servers.  This is so anyone searching for records belonging to ncs205.net will know where to look for them.  The names of the DNS servers for ncs205.net are ns1.ncs205.net, ns2.ncs205.net, and ns3.ncs205.net.  But how can I look up the IP address for those three hosts if I don't know the IP address of the ncs205.net DNS server?  To get around this problem, I need to publish glue records to the .net registry.  These are hard-coded DNS records so the .net DNS servers can provide the IP addresses for those three servers.

[img[img/dns.png]]


!! Host name representation

Fully Qualified domain name (FQDN)
* Similar to an absolute path in the filesystem
* Instead its read right to left
* First is root, the .
* Next the tld (com, edu, org, etc)
* Next the second-level domain
* Sub domains or host names would follow.
* Example:  fang.cs.sunyit.edu.  
* Client tools will append the trailing ''.'' if you leave it out, but its use in your DNS zone file configuration matters.

<<<
Use the {{Command{whois}}} tool to obtain domain registration and ownership information.
{{Command{yum provides '*/whois'}}} to see which package contains that command
<<<


!! Zone Apex 
* Also known as a naked domain
* What will your domain itself resolve to?  
** It will typically resolve to the same IP address as www.

{{{
@	IN	A	192.168.12.25
@	IN	MX 10	mail
}}}
{{Note{''Note:'' The @ is a shortcut which represents the name of the zone.}}}


!! Authoritative name server
* Contains the data for a particular zone
** A DNS zone is a collection of records for a particular domain or sub-domain.
* It is derived from chaining of NS records starting with the root servers down to the published name servers for a domain
** The NS records identify the authoritative DNS servers for a zone and delegate subdomains
* There is typically a master DNS server and one or more slaves that sync from it
** RFC best practices require at least 2 authoritative name servers per domain
* Authoritative name servers are guaranteed to be up to date.  The data is coming from the source and is not cached

The {{Command{dig}}} command is a powerful tool for querying DNS records.

Trace the delegation from the root servers down to the ~DogNET DNS servers:  {{Command{dig +trace fang.cs.sunyit.edu}}}
Trace google's delegation:  {{Command{dig +trace www.google.com}}}
Query a ~DogNET authoritative DNS server for a record:  {{Command{dig fang.cs.sunyit.edu @ns1.cs.sunyit.edu}}}
 - notice the {{Monospaced{''aa''}}} flag in the flags section at the top.  This means its an authoritative response.
 - Can you spot the misconfiguration in the Additional Section at the bottom?

!! Zone transfer
* Keeps authoritative DNS servers in sync with each other.
* We want to have multiple servers per zone, ideally distributed across networks.
** DNS is a vital resource.  We don't want single points of failure.
* Zone transfers are triggered automatically when the master is updated or based on a timeframe in the SOA record
* Typically restricted to systems authorized by an ACL (access control list)
** We don't want anyone to be able to pull down all of our records.  Only permit trusted name servers to do this.
* Our pyramid topology
** ns0 setup - A top server which doesn't allow queries from end users distributes zones to lower servers that handle client requests
** especially useful for protecting records and security keys
** and ensuring availability and integrity of the data
** ns0 is the master and kept secure.  When updates are published, it will signal the zones to connect and pull the data

!! Non-authoritative
* Caching name servers.  These are the general-purpose name servers available, such as those offered by your ISP, the Google 8.8.8.8 servers, or the ~CloudFlare 1.1.1.1 DNS servers.
** They cache records for the TTL value to improve performance
** {{Command{dig www.google.com}}}
** clear the cache (if you run the name server) - {{Command{rndc dumpdb}}}
*** or {{Command{ipconfig /flushdns}}} on Windows
** cache snooping
*** analyze the cache to see if malware is on your network (or to snoop on what your users are accessing)
* data retained in the cache might be a little stale

From off campus, such as our shell server:
{{Command{dig fang.cs.sunyit.edu}}}
 - We're now querying the local name server instead of the campus server
 - notice lack of aa flag


!! Recursive vs non-recursive 
* non-recursive : I don't have that data, but you can look over there
** required for core nameservers to offload lookups to other servers (requires less resources)

* recursive : I don't have that data, but let me find it for you
** Performs queries on behalf of clients on the network  (Your ISP may have one)
** pro - data is stored in the cache for faster future lookups
** con - ~DDoS amplification attacks
*** These kind of attacks are a big deal:
<<<
     Up to 70:1 attack ratio (60 bytes for the request, 4000 for the response)
     spamhaus attack - 100gbps
     demo : dig isc.org any
                dig ripe.net +edns=0 +bufsize=4096 any
     DNS traffic is UDP, fire and forget, easy to spoof
     Like open mail relays - have to close down open recursive servers now that they're being actively exploited
     dig +short amiopen.openresolvers.org txt 
<<<
** http://www.theregister.co.uk/2013/06/03/dns_reflection_ddos_amplification_hacker_method/
** http://blog.cloudflare.com/the-ddos-that-knocked-spamhaus-offline-and-ho
** mix - allow recursion for trusted local hosts, but not for remote ones

From the class shell server:
{{Command{dig www.google.com @ns1.cs.sunyit.edu}}} - You (hopefully) will get a //recursion requested but not available// error.
{{Command{dig www.google.com @ns1.verizon.com}}} - Same as above.
{{Command{dig www.verizon.com @ns1.verizon.com}}} - This should return a record because the name server we're querying is authoritative for  it.


!! Hints file
*Store and serve IP addresses for root servers on the local DNS server
** Another chicken and egg problem.  A DNS local server needs to know where the root servers are in order to know where to start looking for data.  This hard-coded mapping between the root server names and their IP addresses is called the hints file. It works similar to the {{File{/etc/hosts}}} file.  You can find a copy of it in {{File{/var/named/named.ca}}} after you install your DNS server software.

!! Querying DNS

Tools for searching DNS:
* {{Command{nslookup}}} - A standard utility available in windows and unix.  It's in the bind-utils package on ~CentOS
* {{Command{host}}} - A barebones DNS query utility
* {{Command{dig [record_type] record [@server]}}} - A very powerful DNS query utility.  Also available in the bind-utils package
** {{Command{dig -x //ip//}}} for reverse DNS queries
* {{Command{drill}}} - A popular alternative to dig.

!!! Using dig
* See available options with {{Command{dig -h}}}
* Don't forget the -x for reverse queries
* perform zone transfer to test data
** From either the class shell server (the slave) or your core VM (the master):  {{Command{dig merantn.ncs205.net @localhost axfr}}}
** axfr is the record type for a zone transfer
** It's always good to compare results between your master server and the slave to ensure they match
* see TTL and cache time remaining
* dig errors
** NXDOMAIN - No records for any type
** NODATA - No records for the requested type  (displayed as NOERROR with no answer section)
** REFUSED - The server is refusing our connection likely due to recursion being requested.


!! Creating Zones

DNS records are grouped into zones on the actual authoritative name servers
Each zone contains resource records of various record types
* lines in the zone files are our records
* There are two types of zones, forward and reverse
** forward: convert host name to IP address
** reverse: convert IP address to host name


!! Resource Records

Resource records are the individual records within a zone and there are many different types.

[[RFC 1033|https://www.ietf.org/rfc/rfc1033.txt]] - Domain Administrators Operations Guide

Format: 
''<name>   [<ttl>]   <class>   <type>   <data>''

Example:
www		5m	IN	A	10.1.2.3

www = the host name to map to the IP address
5m = Change the TTL of this record to 5 minutes.  This field is optional.
IN = Internet
A = The type of record we're creating
10.1.2.3 = The IP address we're mapping the hostname to


!!! SOA - Start of Authority

Each zone must have a SOA record at the top containing the:
* serial number for the zone.  This number must increment every time a change is made, otherwise changes will not propagate
** It is an unsigned 32 bit value and must be between 1 and 4294967295.
* refresh - How often slaves check in with their master
* retry - if the master is down, slaves will retry after this amount of time 
* expire - How long a slave will serve data after losing contact with the master.  After this time the data will considered too stale to provide to others.
* minimum - Sets the TTL to cache negative responses. Negative responses are those where a record is not found or does not exist.
** The default unit for the last 4 values is seconds.  Time may be appended as in the example below:  ''m''inutes, ''d''ays, or ''w''eeks.

Here's a sample SOA record:

{{{
@ IN  SOA ns1.merantn.ncs205.net. hostmaster.merantn.ncs205.net. ( 
 2022102900  ; serial number
 1d    ; refresh 
 5d    ; retry 
 2w    ; expire
 30m   ; minimum
)
}}}
''Note:''  The @ is a special shortcut that represents the name of the zone.

{{Warning{''Important note on the SOA serial number:''  This number ''must'' always increment when changes are made to the zone file.  Be very careful when doing so.  Typos here will cause you problems later.  You can use simple numbers for the serial number (eg, start with 1 and then increment + 1 after each change).  However, the ''best practice'' is to use the date you made the change in YYYYMMDD format followed by a two digit count.  This is represented above with the serial number {{Monospaced{''2022102900''}}}.  This format will provide a value that's always increasing while indicating the date when the zone was last updated.  ''Always'' update the date value to the current date when you're updating the zone and increase the count number by one if you're making an additional change for the day.  Reset the count to {{Monospaced{00}}} after updating the date to indicate the first change of the day.

__''If you do not update your serial number then zone changes will not propagate beyond your local DNS server!''__}}}



!!! NS - Nameserver record
 - identify name servers for the current zone and delegate sub-domains
{{{
		IN  NS  ns1.merantn.ncs205.net.
		IN  NS  ns5.ncs205.net.
}}}
''Note:''  If no record name is provided (the missing first column)  it is inherited from the previous record.  In this case, the previous record was the @.

!!! A
 - Address records map a host name to an IP address
 - We can set a TTL value for a specific record
 - Useful if you are about to make a change
{{{
ns1		IN	A	192.168.12.26

test		IN	A	192.168.12.24
www		IN	A	192.168.12.25
core		IN	A	192.168.12.26
}}}

{{Note{''Note:''  A relative hostname may be used instead of entering the fully qualified domain name (FQDN).  If a host name does not end with a . (dot), then the name of the zone will be appended to it.}}}

For example, these two records are both functionally equivalent.  Notice the ''.'' at the end of the FQDN.
{{{
www				IN	A	192.168.12.25
www.merantn.ncs205.net.		IN	A	192.168.12.25
}}}


!!! CNAME
* An alias which will resolve to the canonical host then return its A record IP address
* depth limited to 6 aliases

These will all create CNAME records which will resolve to the core VM:
{{{
loghost		IN	CNAME	core
ntp		IN 	CNAME	core
directory	IN	CNAME	core
}}}
The same concept as above applies here.  The zone name will be appended to each host name which does not end with a dot.


!!! PTR
 - pointer records map from ip address back to host name.  These record types must ''only'' be used in //reverse// zones.  Reverse zones contain mappings of IP addresses to host names.  Do not put them in the same zone file as your forward DNS records!

{{Command{cat /etc/named/master/merantn.ncs205.net.rev}}}
{{{
24 IN  PTR  test.merantn.ncs205.net.
25 IN  PTR  www.merantn.ncs205.net.
26 IN  PTR  core.merantn.ncs205.net.
}}}

!!! MX
* mail exchanger record identifies which systems to send mail to
* These are weighted by priority for multiple mail servers
* Must point to an A record host name
* Example:  {{Command{dig mx google.com}}}

!!! TXT
* create text string records in DNS
* Useful for publishing additional information about a zone or a host
* The text value may need to be enclosed in quotes


!!! These are only the primary record types!  There are many more.


Sources
 - http://dns.measurement-factory.com/surveys/openresolvers.html


! Assignment

<<tiddler [[Lab 59 - Bind config and zones]]>>
We'll use this time to either get caught up on past material or push ahead to the next.  Getting a head-start on the [[Week 14, Part 1]] material would be wise.

[[Lab 58 - Working with logs]] was also finished and is now available for you to complete.
{{{
# cat /etc/motd
************************************
*        Happy Thanksgiving        *
************************************
*                                  *
*                      .--.        *
*      {\             / q {\       *
*      { `\           \ (-(~`      *
*     { '.{`\          \ \ )       *
*     {'-{ ' \  .-""'-. \ \        *
*     {._{'.' \/       '.) \       *
*     {_.{.   {`            |      *
*     {._{ ' {   ;'-=-.     |      *
*      {-.{.' {  ';-=-.`    /      *
*       {._.{.;    '-=-   .'       *
*        {_.-' `'.__  _,-'         *
*                 |||`             *
*                .='==,            *
*                                  *
************************************
}}}
! Material

!! Crypto & Securing Communication:

!!! Reading:
Oddly, our textbook does not discuss SSL encryption with Apache.  The concepts we're applying to Apache are universal among services.  Only the implementations differ slightly.

Here's are some alternate sources to review:

!!! Watch:
* Brief overview: https://www.youtube.com/watch?v=w0QbnxKRD0w
* Crypto overview: https://www.youtube.com/watch?v=AQDCe585Lnc
** The math behind asymmetric encryption: https://www.youtube.com/watch?v=YEBfamv-_do
* The TLS handshake: https://www.youtube.com/watch?v=cuR05y_2Gxc

!! Scheduled Tasks:

!!! Read:
* Linux Administration Chapter 9, pages 197-199 (cron) 

{{Warning{''Warning:'' The textbook is light on the details and only discusses cron from a user perspective.  There is not much mentioned about the system scheduled tasks saved in the files within {{File{/etc/}}}.  The notes below are more thorough.}}}


! Notes

Understanding the core concepts involved with securing network communication is important for a security practitioner.  The advent of [[Let's Encrypt|https://letsencrypt.org/]] and the free SSL certificates they offer has made trusted encryption available to the masses.  Prior to the Let's Encrypt project, a site operator had to pay a commercial certificate authority to issue a certificate for their site.  This added expense limited encryption to those with the time and budget to pay for it. 

We're now going to cover some core encryption concepts while implementing secure communication for our web sites.

!! Different concepts for different purposes
* ''Encoding'' - Data is transformed from one form to another.  Usually easily reversible and not secure.  
** base64 encoding to convert a binary file to text for transmission over email (a text-based medium)
*** The {{Command{base64}}} command will encode or decode a base64 encoded string
** audio or video encoding, employing a codec (coder-decoder)
* ''Hashing'' - Data is converted into a fixed-size string (a hash) using a non-reversable hash function.  The length of the hash is always the same, regardless of the amount of input data.
** Algorithms have evolved over time.  Current available algorithms are md5, sha-256, and sha-512.
** Used to secure passwords.  A hash of your password is stored on the system.  When you log in, the password you enter is hashed and compared to the hash stored on the system.
*** Salt: Extra data that is added to a password to ensure the same passwords do not have the same hash.  Significantly slows down brute force attacks.
*** Generate a password hash with perl: {{Command{ perl -e 'print crypt("PlaintextPassword","\$6\$hash-salt\$") . "\n"' }}}
**** {{Monospaced{//PlaintextPassword//}}} is the password you would like to hash
**** {{Monospaced{//hash-salt//}}} is the the salt to use.  Salts should be valid random characters and ideally unique to each stored password.
** Used for integrity validation.  A hash of a file can be saved.  If the file changes, its hash will no longer match the stored copy.  
*** Commands to generate a hash of a file: {{Command{md5sum}}}, {{Command{sha256sum}}}, and {{Command{sha512sum}}}.
*** Hashes of your labs are stored in {{File{/opt/pub/ncs205/submit/checksums}}} when they are collected to ensure changes are not made after they are graded.
* ''Encryption'' - Use of an encryption algorithm and a secret key (cipher) to transform a private message (plain-text) into encrypted data (cipher-text).  Only those possessing the secret key can view the original message.
** Encryption is reversible with the encryption key

!! Main Encryption Goals:
* Confidentiality - Prevent disclosure to unauthorized parties
* Integrity - Prevent tampering of transmitted data
* Authenticity - Ensure communication is genuine and with the intended target

!! Encryption basics

There are two different types of encryption algorithms - symmetric and asymmetric.  It is important to understand the differences between them and where each is appropriate.

!!! Symmetric cryptography:
* Same key is used for both encryption and decryption of the message
* Also known as a shared secret
** {{Command{openssl aes-256-cbc -a -salt -in secretfile.txt}}}
** Secure file transfer with netcat and openssl
*** receiver#  {{Command{ nc -l 4444 | openssl aes-256-cfb -salt -d | tar -xvf - }}}
*** sender#  {{Command{ tar -cvf - file | openssl aes-256-cfb -salt -e | nc client 4444 }}}
* Pros:
** Fast
** Not resource intensive 
** Useful for small and large messages
* Cons:
** Key exchange must occur over a secure channel
** How can you exchange crypto keys over a secure channel that doesn't yet exist because you haven't exchanged keys yet?  Another chicken and egg problem.

!!! Asymmetric:
* Public key cryptography
* Two keys instead of one shared secret
** public key - available for everyone.  Can be published
** private key - kept secret and secure.  Typically locked with a passphrase.
* Data encrypted with one key can be viewed or verified by the other
** Can be used for encrypting or signing messages
* Pros:
** Safe key distribution over an insecure channel
* Cons:
** Slow
** More resource intensive
** Only useful for small messages


!!! Symmetric / Asymmetric Hybrid
* Use asymmetric encryption only to transmit a symmetric key.
* Then use symmetric encryption for the actual message.
* The best of both algorithm types:
** Can safely exchange key data
** Fast
** Not resource intensive
** Useful for small and large messages


!! Encryption Uses:

PGP / GPG
* Encrypt or sign files and messages
* Command line tools available:  {{Command{gpg}}}
SSH
* user keys for authentication instead of passwords
** Use ssh-keygen to generate keys
* host keys for encrypting communication between client and server

!!! SSL Certificates
* A SSL certificate contains information about the owner of the certificate and their public key
* Signed by a Certificate Authority (CA) to establish trust
** The Certificate Authority is //supposed// to take some steps to verify the site owner actually owns the site and they're only issuing certificates to the owners.
** Typically a commercial company like Godaddy, Verisign, or Entrust
** Or nonprofit CA to issue free certs:  https://letsencrypt.org/
** ~Self-signed certificates can be created without using an external entity, but they won't be trusted by default
* The CA's signature is added to the certificate to establish trust
** If you view a site's certificate in your web browser, you can see the chain of trust from the Certificate Authority to the site certificate.
* Certificate Verification occurs by matching the host name in the certificate to the host name in the network communication
** Host name in the URL must match the host name in the certificate (single host)
** Or use a wildcard certificate for many sites (*.sunyit.edu)


!!!! SSL Trust

CA certificate stores
* Hardcoded list of trusted root CA certificates in either the application or operating system
** Stored within {{File{/etc/pki/tls/certs/ca-bundle.crt}}} in ~CentOS
** {{Command{grep Issuer /etc/pki/tls/certs/ca-bundle.crt | less}}} to see who's CA certificates the OS is trusting
* Intermediate CA resellers
** These resellers are trusted by a root CA certificate to issue certs on their behalf
* Transitive trust
** A trusts B and B trusts C, thus A trusts C
* Web of trust instead of a direct chain
** There are so many trusted certificate authorities, a weak link in any one of them completely destroys the ability to truly trust any of them.
* Certificate Authority weaknesses
** Several Breaches at CA Intermediaries.  This sort of thing seems to happen a lot.
***[[DigiNotar (2011)|https://security.googleblog.com/2011/08/update-on-attempted-man-in-middle.html]] - Issued a wildcard certificate for google.  About 500 other fake certificates were issued.
** Bad actors
*** Man in the middle proxies
**** [[Gogo Serving Fake SSL Certificates to Block Streaming Sites|http://www.pcmag.com/article2/0,2817,2474664,00.asp]]
**** [[SSL/TLS Interception Proxies and Transitive Trust|http://www.secureworks.com/cyber-threat-intelligence/threats/transitive-trust/]]
*** Malware
**** [[Lenovo's Superfish|http://www.slate.com/articles/technology/bitwise/2015/02/lenovo_superfish_scandal_why_it_s_one_of_the_worst_consumer_computing_screw.html]]
*** [[Microsoft Blacklists Fake Finnish Certificate|http://yro.slashdot.org/story/15/03/18/2048244/microsoft-blacklists-fake-finnish-certificate]]
*** [[Chinese CA issues Google certificates|https://googleonlinesecurity.blogspot.com/2015/03/maintaining-digital-certificate-security.html]]
*** [[Thawte issues certs for domains it doesn't own|http://www.itworld.com/article/2999145/security/google-threatens-action-against-symantec-issued-certificates-following-botched-investigation.html]]
*** [[Researchers find, analyze forged SSL|http://www.net-security.org/secworld.php?id=16843]]
** Certificate revocation problems
*** The mechanisms in place to get the word out that an issued certificate cannot be trusted aren't very robust.
* Leaks or vulnerabilities - Many ways to attack the infrastructure
** [[Heartbleed|https://en.wikipedia.org/wiki/Heartbleed]] - Broke encryption for almost the entire Internet.  For two years anyone could obtain a server's encryption key.
** [[Poodle|https://www.openssl.org/~bodo/ssl-poodle.pdf]]
** Weak ciphers and downgrade attacks
** There are too many other examples

!!!! Countermeasures for the weaknesses in the trust system:
* Public key pinning
** Lock a certificate to a specific CA
** http://thenextweb.com/apps/2014/09/02/firefox-32-arrives-new-http-cache-public-key-pinning-support-easy-language-switching-android/
** https://wiki.mozilla.org/SecurityEngineering/Public_Key_Pinning
** https://raymii.org/s/articles/HTTP_Public_Key_Pinning_Extension_HPKP.html

* ~DNS-based Authentication of Named Entities  (DANE)
** Remove the ~CAs from the process
** Use DNS to authenticate certificates much like SSH fingerprint records
** Add a ''tlsa'' resource record to your DNS zone
** Move from web of trust to chain of trust like DNSSEC
** Since DNS is totally open, if something is compromised it should be detectable
** Easy to revoke certificates
** https://www.huque.com/bin/gen_tlsa
** [[DerbyCon Video|http://www.irongeek.com/i.php?page=videos/derbycon4/t404-dns-based-authentication-of-named-entities-dane-can-we-fix-our-broken-ca-model-tony-cargile]]

* Online Certificate Status Protocol - Obtain revocation status of a certificate
* Hardcoded blocklists in web browsers


!! Web Encryption

!!! Background

HTTP encapsulated with TLS
 - TLS = Transport Layer Security
 - Replacement for SSL protocol
This is an encryption layer on top of HTTP
 - Used to authenticate the server and encrypt the communication

Use Hybrid encryption
* Encryption algorithms are decided by the browser and the server
* The most secure method available to both is used
* Symmetric encryption is used for the transaction
* But we need to safely share the key
** Asymmetric crypto is used to send the key
** The server's public key is stored inside the site's certificate

HTTPS Handshake:
* Browser initiates the connection
* Server responds with its certificate 
* Browser advertises its encryption methods and sends a symmetric session key encrypted with the server's public key
* Server decides which cipher to use
* Server and client use this session key for symmetric encryption of the data
** Forward Secrecy - key agreement protocols which ensure a compromise of private keys will not lead to a compromise of past session keys.  This helps mitigate future SSL attacks.

Public key crypto is only used to establish faster symmetric encryption


!!! Implementation

Let's set all of this up to secure communication to our web server.

''Note:''  The examples below contain my username, IP address, and DNS records.  Be sure to replace them with your values.
  

!!!! Key generation
* Save on your web VM within the directory {{File{/etc/pki/tls/}}}
** This is the standard system directory for keys and certificates

----
!!!!! There are two ways to create SSL keys and certificates:
A) Manually:
- ''Do not do this'' for your web server ~VMs; this is only here for informational purposes.  We will be using the automated process below.

* SSL Keys
** Create and secure a SSL key with a passphrases
** Create key: {{Command{ openssl genrsa -aes256 -out www.//username//.key 2048 }}}
** Verify key: {{Command{ openssl rsa -noout -text -in www.//username//.key }}}
* Certificate Signing Request (CSR)
** The CSR will collect the server information.  It would be sent to a commercial certificate authority who will then create and sign the certificate.
** Create CSR: {{Command{ openssl req -new -sha256 -key www.//username//.key -out www.//username//.csr }}}
** Verify CSR: {{Command{ openssl req -noout -text -in www.//username//.csr }}}
* Certificate Authority (CA)
** Either commercial or self-signed
** Commercial - send the CSR file to the CA
** ~Self-Signed:
*** Become an untrusted certificate authority yourself:
**** Create CA Key: {{Command{ openssl genrsa -aes256 -out ncs205CA.key 4096 }}}
**** Create CA Cert: {{Command{ openssl req -x509 -new -sha256 -key ncs205CA.key -out ncs205CA.crt -days 18250 }}}
**** Sign a certificate:  {{Command{ openssl x509 -req -in www.merantn.csr -CA ncs205CA.crt -~CAkey ncs205CA.key -~CAcreateserial -out www.merantn.crt -days 735 }}}
**** Display the new certificate: {{Command{ openssl x509 -noout -text -in ncs205CA.crt }}}

Whoever control's a systems certificate store controls secure communication to that system!  If I create a CA certificate and can add it to your system, any certificates I issue will be trusted.  Man in the middle attacks would not display the normal "Untrusted site" warnings in your browser unless the site operator has deployed countermeasures.

'' - or - ''

B) ''Automated:'' Using Let's Encrypt and {{Command{acme.sh}}} to generate SSL certificates
 - Use this method to create an SSL certificate for your web servers

A trusted SSL certificate must only be issued to someone who can prove they own the domain. 

Let's Encrypt and the {{Command{acme.sh}}} tool can obtain this verification either by checking for a small file placed within the ~DocumentRoot of your website or checking DNS for a particular record.  Only someone who actually owns the domain should be able to perform these tasks.  Normally, the first method is utilized because it is easy to automate.  This is not an option for us because our web servers are on a private network which cannot be accessed directly from the internet.  The Let's Encrypt servers will not be able to access our web servers to verify them.  Our only option is DNS verification because you control your DNS servers and the DNS records are available outside of our lab environment.

{{Warning{''Warning:'' Let's Encrypt certificates are only valid for 90 days and must be continually renewed.  An automated mechanism must be in place to ensure the certificates are renewed prior to expiration.  A scheduled task must be created to renew the certificates before they expire.  This isn't a concern for us since the class will end before these certificates expire.  The [[acme.sh documentation|https://acme.sh]] will walk you through how to set up this scheduled task.}}}


You cannot proceed if your DNS server is not yet set up properly.  Ensure an external test properly returns an IP address before continuing with SSL configuration.  Replace my username with yours.
{{{
[merantn@core ~]$ dig www.merantn.ncs205.net @1.1.1.1

; ; <<>> ~DiG 9.11.4-P2-~RedHat-9.11.4-9.P2.el7 ; <<>> www.merantn.ncs205.net @1.1.1.1
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 59587
;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 1452
;; QUESTION SECTION:
;www.merantn.ncs205.net.                IN      A

;; ANSWER SECTION:
www.merantn.ncs205.net. 249     IN      A       192.168.12.25

;; Query time: 71 msec
;; SERVER: 1.1.1.1#53(1.1.1.1)
;; WHEN: Mon Apr 27 04:43:32 EDT 2020
;; MSG SIZE  rcvd: 89
}}}


Complete the following steps to use {{Command{acme.sh}}} to generate an SSL certificate for your web server:

{{Warning{''Warning'': Getting this all set up is a well choreographed dance.  All of the pieces need to fall together precisely in order for it to work.  Once it's set up, it'll run flawlessly for years.  Go slow, pay attention, and mind the typos.  Copy & paste as much as you can.  Typing all of this out is asking for pain and punishment.  

Replace my username and IP addresses where you see them.  Copy the large commands from this page to notepad, make the edits, and then paste the commands into the shell.  Be sure to review everything closely before executing.}}}

On your DNS server:
# Add a CAA ([[Certification Authority Authorization|https://letsencrypt.org/docs/caa/]]) record to your zone file.  A CAA DNS record indicates which Certificate Authorities are allowed to issue certificates for the domain.
# Add a CNAME record resembling the following to your forward zone:
** {{Monospaced{_acme-challenge.www	IN	CNAME	//username//.acme.ncs205.net.  }}}
** Be sure to replace //username// with your username
** See my full zone below for an example.
# Increment your SOA serial number and reload your zone file with {{Command{rndc reload}}}
** Notice the serial number below has changed from the last time I posted my zone file.  Do not omit this crucial step.

My full zone file:
{{{
[root@core ~]# cat /etc/named/master/merantn.ncs205.net.fwd
$TTL 5m
@ IN  SOA ns1.merantn.ncs205.net. hostmaster.merantn.ncs205.net. (
 2022041001     ; serial number
 1d    ; refresh
 5d    ; retry
 2w    ; expire
 30m   ; minimum
)
                IN      NS      ns1.merantn.ncs205.net.
                IN      NS      ns5.ncs205.net.
                IN      CAA 128 issue "letsencrypt.org"


ns1             IN      A       192.168.12.26

test            IN      A       192.168.12.24
www             IN      A       192.168.12.25
core            IN      A       192.168.12.26

loghost         IN      CNAME   core
ntp             IN      CNAME   core
directory       IN      CNAME   core

_acme-challenge.www     IN      CNAME     merantn.acme.ncs205.net.
}}}

* Don't forget to replace my username with yours everywhere it appears.

Wait for DNS to propagate and you're able to verify your records exist in local and external DNS before proceeding.  These two commands can be used to verify that the DNS records are ready.

{{Commands{
''1)'' Local check:
[root@core ~]#  ''dig _acme-challenge.www.merantn.ncs205.net CNAME @localhost +noall +answer''

&#59; &lt;&lt;&gt;&gt; ~DiG 9.11.4-P2-~RedHat-9.11.4-26.P2.el7_9.4 &lt;&lt;&gt;&gt; _acme-challenge.www.merantn.ncs205.net TXT @localhost +noall +answer
&#59;&#59; global options: +cmd
@@_acme-challenge.www.merantn.ncs205.net. 300 IN CNAME merantn.acme.ncs205.net.@@

''2)'' External DNS check against the ~CloudFlare DNS server at 1.1.1.1
[root@core ~]# ''dig _acme-challenge.www.merantn.ncs205.net CNAME @1.1.1.1 +noall +answer''

&#59; &lt;&lt;>> ~DiG 9.11.4-P2-~RedHat-9.11.4-26.P2.el7_9.4 &lt;&lt;&gt;&gt; _acme-challenge.www.merantn.ncs205.net TXT @1.1.1.1 +noall +answer
&#59;&#59; global options: +cmd
@@_acme-challenge.www.merantn.ncs205.net. 300 IN CNAME merantn.acme.ncs205.net.@@

}}}
You should see the same CNAME record value returned by both queries.  __If you don't see any output in the second command, then you likely either forgot to properly increment your serial number or reload your zone.__

{{Note{''Note:'' I only added the ''+noall'' and ''+answer'' options to ''dig'' to trim the output so it's easier to post here.  You don't have to use these options in your checks.  Omitting them will yield more details which are often very useful.}}}

Once the CNAME records are fully in place and verified, complete the following steps on your web server VM:


On your web VM:
# Download the {{Command{acme.sh}}} shell script and save it to {{File{/usr/local/sbin/}}} on your web server VM
## {{Command{wget -O /usr/local/sbin/acme.sh https://raw.githubusercontent.com/acmesh-official/acme.sh/master/acme.sh}}}
## Be sure to make the file executable
# Download the ~CloudNS API plugin
## {{Command{mkdir -p /root/.acme.sh/dnsapi/}}}
## {{Command{wget -O /root/.acme.sh/dnsapi/dns_cloudns.sh https://raw.githubusercontent.com/acmesh-official/acme.sh/master/dnsapi/dns_cloudns.sh}}}
# Define the API credentials
** We will be performing DNS verification via API.  These two commands will save the API username and password to the shell environment for the acme.sh script to retrieve. 
## {{Command{export ~CLOUDNS_SUB_AUTH_ID="//username//"}}}
## {{Command{export ~CLOUDNS_AUTH_PASSWORD="//password//"}}}
## Verify the username and password variables:  {{Command{ echo $~CLOUDNS_SUB_AUTH_ID - $~CLOUDNS_AUTH_PASSWORD}}}.  You should see them repeated back to you.
**  The actual username and password will be posted to the Discord channel for this week's material.
# Begin the authorization process for a certificate for your web server from Let's Encrypt.  ''Be sure to replace my username with yours everywhere it appears''
## Perform a test of the SSL certificate issue:  {{Command{/usr/local/sbin/acme.sh &#045;-server letsencrypt &#045;-issue -d www.merantn.ncs205.net &#045;-domain-alias merantn.acme.ncs205.net &#045;-log &#045;-dns dns_cloudns &#045;-dnssleep 60 &#045;-test}}}

If the certificate is properly issued, you should see it displayed to the screen followed by similar text:
{{Monospaced{
&#045;&#045;&#045;&#045;&#045;END CERTIFICATE&#045;&#045;&#045;&#045;&#045;
[Thu Apr 15 00:38:10 EDT 2021] Your cert is in  /root/.acme.sh/www.merantn.ncs205.net/www.merantn.ncs205.net.cer
[Thu Apr 15 00:38:10 EDT 2021] Your cert key is in  /root/.acme.sh/www.merantn.ncs205.net/www.merantn.ncs205.net.key
[Thu Apr 15 00:38:10 EDT 2021] The intermediate CA cert is in  /root/.acme.sh/www.merantn.ncs205.net/ca.cer
[Thu Apr 15 00:38:10 EDT 2021] And the full chain certs is there:  /root/.acme.sh/www.merantn.ncs205.net/fullchain.cer
}}}

The {{Monospaced{&#045;-test}}} option at the end of our last command means this is only a test run of the process.  We must ensure everything works before requesting the real certificate.  If things aren't set up correctly and you request the real certificate too many times, you'll end up blocked by the validation servers.

If the previous command completed successfully, run it again without the {{Monospaced{&#045;-test}}} option at the end.  You may also need to add a {{Monospaced{-f}}} option to force it.

# Use {{Command{yum}}} to install the package {{Monospaced{''mod_ssl''}}} on your web VM.  This is the Apache extension which will provide encryption
# Edit the Apache SSL configuration file, {{File{/etc/httpd/conf.d/ssl.conf}}}
## Search for the ''~SSLCertificateFile'' directive and change the path to {{File{/etc/pki/tls/certs/www.merantn.ncs205.net.cer}}}
*** This file contains our server's public key
## Search for the ''~SSLCertificateKeyFile'' directive and change the path to {{File{/etc/pki/tls/private/www.merantn.ncs205.net.key}}}
*** This file contains our servers private key
## Search for the ''~SSLCertificateChainFile'' directive and change the path to {{File{/etc/pki/tls/certs/www.merantn.ncs205.net.fullchain.cer}}}
*** This file contains the intermediate certificates.  Our browser contains the Root CA certificate.  Including the intermediate certificate in our bundle completes the trust chain.
*** This configuration directive may need to be uncommented.
# Install the certificates to their proper place on the system and restart Apache:
** {{Command{/usr/local/sbin/acme.sh &#045;-install-cert -d www.merantn.ncs205.net &#045;-log &#045;-cert-file /etc/pki/tls/certs/www.merantn.ncs205.net.cer &#045;-key-file /etc/pki/tls/private/www.merantn.ncs205.net.key &#045;-fullchain-file /etc/pki/tls/certs/www.merantn.ncs205.net.fullchain.cer &#045;-reloadcmd "/usr/sbin/apachectl restart" }}}
# Verify Apache is running: {{Command{systemctl status httpd}}}
# Verify the apache config: {{Command{apachectl configtest}}}
** This will identify any errors that might have been introduced.
** It's always wise to validate your configuration before restarting a service.  If you don't and there's a problem, your service will be offline while you sort it out.
** Not a big deal here, but this will be a big deal in the future when downtime costs money and might get you fired.
----

View the server certificate chain:  {{Command{ true | openssl s_client -connect www.merantn.ncs205.net:443 -showcerts}}}
 - Replace my username with yours.
 - You should see ''Verify return code: 0 (ok)'' near the bottom.  This return code means a valid certificate was presented and the connection is fully trusted.

Verify your SSL certificate is fully trusted with {{Command{curl}}}.  Notice the certificate information and dates in the connection details.  Be sure to use ''https'':
{{{
[root@www ~]# curl -v https://www.merantn.ncs205.net/
* About to connect() to www.merantn.ncs205.net port 443 (#0)
*   Trying 192.168.12.25...
* Connected to www.merantn.ncs205.net (192.168.12.25) port 443 (#0)
* Initializing NSS with certpath: sql:/etc/pki/nssdb
*   CAfile: /etc/pki/tls/certs/ca-bundle.crt
  CApath: none
* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
* Server certificate:
*       subject: CN=www.merantn.ncs205.net
*       start date: Nov 19 06:13:15 2021 GMT
*       expire date: Feb 17 06:13:14 2022 GMT
*       common name: www.merantn.ncs205.net
*       issuer: CN=R3,O=Let's Encrypt,C=US
> GET / HTTP/1.1
> User-Agent: curl/7.29.0
> Host: www.merantn.ncs205.net
> Accept: */*
>
< HTTP/1.1 200 OK
< Date: Fri, 19 Nov 2021 07:14:57 GMT
< Server: Apache/2.4.6 (CentOS) OpenSSL/1.0.2k-fips PHP/7.3.33
< Last-Modified: Thu, 11 Nov 2021 19:44:17 GMT
< ETag: "56-5d0889169ada8"
< Accept-Ranges: bytes
< Content-Length: 86
< Content-Type: text/html; charset=UTF-8
<
<HTML>
<BODY>
<BR><BR><BR>
<center><B>Welcome to NCS205!</B></center>
</BODY>
</HTML>
* Connection #0 to host www.merantn.ncs205.net left intact
}}}


When an SSL certificate is deployed, connections must be by hostname because that is the primary mechanism to verify that the SSL certificate is valid.  Connections by IP address or localhost will return verification errors.  
 - Look in the {{Command{curl}}} output above for the certificate ''Subject:'' field, {{Monospaced{subject: CN=www.merantn.ncs205.net}}}.  The hostname in the certificate subject field must match the hostname used by curl or your browser to access the site.

{{{
[root@www ~]# curl https://localhost/
curl: (51) Unable to communicate securely with peer: requested domain name does not match the server's certificate.

[root@www ~]# curl https://192.168.12.25/
curl: (51) Unable to communicate securely with peer: requested domain name does not match the server's certificate.
}}}


!!! Helpful sites:
* Test server SSL quality:  http://www.ssllabs.com/
* https://mozilla.github.io/server-side-tls/ssl-config-generator/
* https://cipherli.st/

!!! Additional Reading

https://www.schneier.com/blog/archives/2010/07/dnssec_root_key.html
Implementation http://www.tldp.org/HOWTO/SSL-Certificates-HOWTO/x64.html
SSL/TLS Details: http://en.wikipedia.org/wiki/Transport_Layer_Security
[[Bulletproof SSL & TLS|https://www.feistyduck.com/books/bulletproof-ssl-and-tls/]]


!! Scheduled tasks

Any system will contain jobs which must be run at some sort of periodic interval.  They can be regular maintenance tasks common to all Unix systems or unique tasks custom to a specific server.

Log file rotation is an example of a regularly scheduled task common to all systems.  Every day, cron executes the {{Command{logrotate}}} command to ensure log files do not accumulate indefinitely.  Log files are renamed and sometimes compressed according to the schedule outlined in it's configuration file {{File{/etc/logrotate.conf}}}.  Very old log files will be deleted once its retention period has expired.

The collection of your PDF labs is an example of a unique task custom to our class shell server.  A script is executed every hour which collects your new labs from the {{File{/opt/pub/ncs205/submit/}}} directory and copies them to my grading queue.  Here is the cron job which makes that happen:
{{{
[root@shell ~]# cat /etc/cron.d/lab-collect
# Example of job definition:
# .---------------- minute (0 - 59)
# |  .------------- hour (0 - 23)
# |  |  .---------- day of month (1 - 31)
# |  |  |  .------- month (1 - 12) OR jan,feb,mar,apr ...
# |  |  |  |  .---- day of week (0 - 6) (Sunday=0 or 7) OR sun,mon,tue,wed,thu,fri,sat
# |  |  |  |  |
# *  *  *  *  * user-name  command to be executed

0    *  *  *  * root       /opt/rsl/collect.sh
}}}


Tasks may be scheduled using these two services
* {{Monospaced{cron}}} - run periodically at a specified interval.  This system is for tasks which will run regularly.
* {{Monospaced{at}}} - run once at a scheduled date & time.  This system is for tasks which are one-offs.

!!! More examples of tasks to schedule at a regular interval:
* SSL certificate renewal
* Daily reports
* Garbage collection (remove old or temporary files to free disk space)
* Vulnerability checks
* System updates
* Source code updates
* {{Command{mysqldump}}} - database backups
** create a read-only mySQL account
** save backup to {{File{/opt/work/backups/}}}
* System backups

* Or, using {{Command{at}}}, scheduling a one-time job for a more convenient time
** ie: something that may be bandwidth or CPU intensive.


!!! cron - run periodically
* {{Command{crond}}}
** A service that is installed with the OS and running by default
** Started automatically on system boot 
** Permission for regular users to use this tool is granted or revoked via the {{File{/etc/cron.{deny,allow&#125;}}} files
* System cron configuration files:
** {{File{/etc/crontab}}} file - The main configuration file.  Each line in this file is a job to run at the specified schedule
** {{File{/etc/cron.d/}}} directory - Where to put individual files containing single or related jobs. Separate files are easier to maintain in an automated fashion.
** {{File{ /etc/cron.{hourly,daily,weekly,monthly} }}} - Scripts placed in these directories will be executed on that interval

* User cron configuration: 
** {{Command{crontab}}} command for accessing scheduled ''user'' jobs
** {{Command{crontab}}} [-e|-l] - edit or list the ''user'' cron jobs
** User cron config files are stored in {{File{/var/spool/cron/}}} if you'd like to review them

{{Note{''Note:'' 
 - Scheduled tasks related to the function of the system or its services should be saved in the cron configuration files within {{File{/etc/}}}.  
 - Scheduled tasks which are only for a specific user (generally a non-root user) should be saved using the {{Command{crontab -e}}} command. }}}


!!!Crontab file format
* See {{File{/etc/crontab}}}
* Declare variables (if you need to) - Shell variables can be set to define items such as the $PATH to use for executables and the user account to email any results to.
** The defaults are reasonable.  You generally only need to set variables to make changes to the defaults or define something not already set.
* Output optionally sent to the owner via email (on by default) 
* Command execution fields:
** Time to execute command, (minute, hour, day, month, weekday) 
** User to run as (This can only be specified when running from a system crontab configuration file)
** Command string to execute
* Special time nicknames:
** @reboot
** @daily
** Complete list is available in the cron man page
* Special time formatting:
** */2 : Every 2 hours, months, days, etc
** 1-4 : Range, for example from 1 to 4
** 1-6/2 : Every other between 1 and 6 
** 2,5 : Multiple, for example 2 and 5

!!! Cron man pages:
* Check man page for the {{Command{cron}}} command
* Notice the ''See Also'' section at the bottom where {{Command{crontab}}} is listed in two different manual sections
* {{Command{man man}}} - will describe how to access different manpage sections

The website https://cron.help/ is also be a good resource for validating your cron scheduling.

!!! Troubleshooting cron
* Logging on Linux does a great job of providing useful information to help troubleshoot issues
** Most services write to a log file somewhere below the directory {{File{/var/log/}}}
* The cron log file will be a good resource - {{File{/var/log/cron}}}
* Be sure to include the last few lines of this log file if you need to reach out for help


!!! at - run once
* System dependent:
** ~FreeBSD - The atrun utility is executed via cron every 5 minutes to check for and run {{Command{at}}} scheduled tasks
** ~CentOS - atd - Daemon running in the background for processing tasks scheduled via the {{Command{at}}} utility
** This is not installed or running by default (on ~CentOS)
** It must be set to start on boot and be manually started after installation, just like any other new service we add.
* {{Command{at}}} user command with time for task to execute specified as an argument
** flexible time formatting
*** {{Command{ at +15 minutes }}}
*** {{Command{ at 4:15 }}}
*** {{Command{ at 4pm tomorrow }}}
*** {{Command{ at 4pm October 15 }}}
* Display scheduled job with {{Command{at -c}}}
** Scheduled jobs stored in {{File{/var/spool/at/}}} files
** {{Command{ atq }}} - display scheduled at jobs
** {{Command{ atrm }}} - remove scheduled at job
** Can use {{File{/etc/at.{allow,deny&#125;}}} files to control access to this utility

!!!! Examples:
For our recent security competition, I wanted to lock access to a system for lunch and re-enable access after the lunch break ended:

{{{
root@vce1:~# at 12pm
warning: commands will be executed using /bin/sh
at> pct stop 1215
at> <EOT>
job 2 at Sat Apr 9 12:00:00 2022

root@vce1:~# at 1pm
warning: commands will be executed using /bin/sh
at> pct start 1215
at> <EOT>
job 3 at Sat Apr 9 13:00:00 2022

root@vce1:~# atq
3      Sat Apr 9 13:00:00 2022 a root
2      Sat Apr 9 12:00:00 2022 a root
}}}


! Assignment

<<tiddler [[Lab 60 - SSL Certificates]]>>

<<tiddler [[Lab 61 - Scheduled Tasks]]>>
! Material

!! Read:
* Linux Administration Chapter 6 - Managing users & Groups
* [[sudo tutorial|https://phoenixnap.com/kb/linux-sudo-command]]
* [[sudoedit tutorial|https://www.howtoforge.com/tutorial/how-to-let-users-securely-edit-files-using-sudoedit/]]

!! Watch:
* {{Command{sudo}}} use and configuration:  https://www.youtube.com/watch?v=YSSIm0g00m4
** Note: In the video, the sudoers file is edited by executing {{Command{sudo visudo}}}.  This may not work on our ~VMs.  Instead use {{Command{su}}} to become root and then run {{Command{visudo}}} to edit the sudo configuration.


! Notes - Access control & user management

''Authentication'' - Who you are.  The process of ascertaining that someone is actually who they claim to be
''Authorization'' - What you are allowed to do.  Rules to determine who is allowed to perform certain tasks

!! Access control

!!! From the beginning unix maintained a multi-user system
* All objects (files & processes) have owners
* A user owns new objects they create 
* The administrative user (root) can act as the owner of any object
* Only root can perform most administrative tasks

!!! Groups

A mechanism to grant permissions to groups of users, such as all students in a particular class.

* The filesystem has a more sophisticated access control system
* Each file has a user owner and a group owner
* Permissions can be set so group members may have their own set of access controls (rwx)
* Groups can be harnessed to control access to the system

The directory {{File{/opt/pub/ncs205/}}} is set so only those in this class can access its files.


!!! root (uid 0)
* The root user is the standard unix superuser account
* There's nothing special about the user name - it's all in the user ID (UID) number
** Unix systems track everything by number:  process ~IDs, device ~IDs, IP addresses, uid, and gid
** We prefer names over numbers
* Check out the {{File{/etc/passwd}}} on your ~VMs.  There's a second uid 0 user account named {{Monospaced{toor}}}
** An unknown uid 0 backdoor account would normally be a huge red flag.  But this account is so I can get into your systems to help if something breaks.
** It has the same privileges as your {{Monospaced{root}}} account, but uses a password that I have.
** As far as the system is concerned, {{Monospaced{root}}} & {{Monospaced{toor}}} are the same person because they have the same user uid number.

!!! Privilege separation 
* superuser (uid 0) - The superuser - ideally only use the system with superuser privileges when necessary.
* normal users - the regular users on the system.  How we all access and use the class shell server.
* service accounts - These are the accounts our services run as, such as the {{Monospaced{apache}}} and {{Monospaced{mysql}}} users on your web server or {{Monospaced{named}}} user on your core VM.
** nobody or daemon accounts
*** Generic unprivileged accounts which run services as unprivileged users in case the services are broken into.  This way they'll have very limited access to the rest of the system
*** Services ran as root in the old days.  If a service was exploited and an attacker was able to access files or run commands, they would then have access to the entire system.
** principle of least privilege - Only grant users the access they need.  If an account or service is broken into, the damage will be limited.
*** This is why we don't all access the shell server as {{Monospaced{root}}}.  I use an unprivileged user also and only elevate to {{Monospaced{root}}} when necessary.

!!! Privilege escalation
* Limit direct access to the {{Monospaced{root}}} account.  
* Privilege separation - Only obtain superuser privileges when you need them
** Don't always operate as the {{Monospaced{root}}} user
* Instead log in as a regular user and escalate when needed
** This is also good for accountability if many users have the root password.
** {{Command{su}}} command - Substitute user
*** Change the effective userid to another system user
*** Real id is the userid you log in as, the user id associated with the process that created the current process
*** Effective id is one the system uses to determine whether you have access to a resource
*** http://bioinfo2.ugr.es/OReillyReferenceLibrary/networking/puis/ch04_03.htm
*** {{Command{su [username]}}}  - Change to another user, simulating a full login.  The current shell environment will not be inherited.
*** {{Command{su - [username]}}}  - Change to another user, inheriting the current shell environment
** {{Command{sudo}}} - Allow elevated privileges on a limited scale (per command).  
*** {{Command{sudo}}} Allows an administrator to grant root privileges to users without divulging the root password.
*** Or allow a user to just run a few commands as the superuser.
*** Display what you are allowed to access via sudo: {{Command{sudo -l}}}
*** {{Command{sudo //command//}}} - Run a command as another user (defaults to the root user)
*** {{Command{sudoedit //file//}}} - Edit a file as another user (defaults to the root user).  Running {{Command{sudoedit //filename//}}} is the same as running {{Command{sudo -e //filename//}}}.
*** {{Command{sudo -l}}} - Display which commands are available to the current user via sudo
*** sudoers file: {{File{/etc/sudoers}}} - This is where the sudo configuration is saved.  Don't edit this file directly.  Use {{Command{visudo}}} to edit it.
**** Separate sudo configuration files can also be saved within the directory {{File{/etc/sudoers.d/}}} to keep things better organized.
**** {{Command{visudo}}} will lock the file and perform syntax checks after saving it.
** You can control who can access particular resources with user or group permissions
* Both {{Command{su}}} and {{Command{sudo}}} will log escalation events
** su will log when a unprivileged user switches to another user
** sudo/sudoedit will log each command executed or file modified
* setuid bit 
** set ID upon execute
** An extra permission bit that can be set with chmod 
** The program will run as the user who owns the file.
** Examples:  passwd and crontab commands
** The passwd command needs extra privileges in order to change a user's password, so extra system privileges are granted just to that command.

!!! Finer grained access controls
* ~SELinux and mandatory access controls (MAC)
** Enabled by default in ~CentOS
** ~SELinux will cause us problems if we don't either configure or disable it
** Controlled by the {{Command{setenforce}}} command for current boot
** and by the /etc/selinux/config file on boot
** It's presently disabled on all of our class ~VMs
* Filesystem access control lists (~ACLs)
** Finer grained per user access to files
** Controlled by {{Command{setfacl}}} and displayed by {{Command{getfacl}}}
** Active ~ACLs noted with a + at the end of the file permissions list

!!! Verifying users with PAM
* Pluggable Authentication Modules (Chapter 6, Page 125)
* Configuration resides in /etc/pam.d/
* Originally access was determined by just checking passwords against the password files
* Modules are used for user validation and verification
** Can determine who you are
** And if you have permission to access the resource
** Can also enable additional types of authentication, such as two-factor with hardware or soft tokens.
* Examples: 
** {{File{/etc/pam.d/su}}} - limit who can use the {{Command{su}}} command
*** uid 0 users can always run the {{Command{su}}} command
*** Change to require wheel group membership
*** Can set to implicitly trust members of the wheel group (dangerous!)
* Other pam functions: 
** Pam can also create home directories on first login with pam_mkhomedir
** Check password complexity with pam_crack
** Lock accounts on too many failed attempts with pam_tally or pam_faillock


!! Users and Groups

!!! Password files
* {{File{/etc/passwd}}} - Everyone can read this file
** Contains fields identifying the user
** It used to also contain the hashed password but this was moved elsewhere to hide it from normal users
** Don't leave the old password field (position 2) blank!  If blank, no password is required for login.  Use the placeholder character {{Monospaced{''x''}}}.
*** An {{Monospaced{''x''}}} in {{File{/etc/passwd}}} column 2 means see {{File{/etc/shadow}}} for the password hash
* {{File{/etc/shadow}}} (Linux) or {{File{/etc/master.passwd}}} (~FreeBSD) - Only root can read this file
** A secure file which contains the password hashes so normal users cannot read them for brute force cracking
** Also contains password and account expiration attributes
* Use {{Command{vipw}}} to edit these files so you have file locking and format verification
** This verification prevents errors from breaking access to the system
** {{Command{vipw}}} will edit the password file
** {{Command{vipw -s}}} will edit the shadow file
* password hashing:
** Sample password hash: {{Monospaced{ $6$hA6IJImd$~TCWDXE6zeHgRYKBNAG2jqHNMyPp9FCW2KdlVFKGWto9BcV9chEjCX3zZAzxx5tqbKn3wve13VWLD8Vb5O214x1 }}}
*** The full hash has three components, separated by the {{Monospaced{$}}} delimiter:  Algorithm type, salt, and hashed (encrypted) password
** Different hashing algorithms and their tags from old (weak) to new (strong):  DES, ~MD5 ({{Monospaced{$1$}}}), Blowfish ({{Monospaced{$2a$}}}), ~SHA256 ({{Monospaced{$5$}}}), ~SHA512 ({{Monospaced{$6$}}})
*** The tag at the beginning of the password hash identifies the algorithm used.
** {{Command{authconfig &#045;-test | grep hash}}} - See what hashing algorithm is used by default on your system
** {{Command{authconfig &#045;-passalgo=md5 &#045;-update}}} - Change the default hash type (don't actually run this)
** {{File{/etc/sysconfig/authconfig}}} - Authentication configuration settings
** {{File{/etc/libuser.conf}}}
** salting
*** Randomize hashes by adding a salt to the password before hashing
*** Prevents identical passwords from having the same hash
*** Increases difficulty for brute force attacks or hash lookup tables (rainbow tables), since now a potential password value has to be tested for each possible hash value.
** Password cracking:
*** John the Ripper
*** hashcat
*** GPU processing makes this all much faster now, especially for weak algorithms and passwords
*** Protect your hashes!

Password strength:
[img[img/passwords.png]]


* uid numbers 
** multiple users with same UID number - The system only cares about the number.  If multiple users have the same UID number, then they are effectively the same user and can access each other's files
** System accounts (UID < 10)
** Service accounts (~UIDs between 10 and 500)
** Users UID > 500 (Linux) - Regular users.

!!! Group file
* {{File{/etc/group}}} - Where groups and group memberships are defined.
** wheel group - special administrator group.  Usually allows extra system access

!!! Shell
* default shell : {{Command{/bin/bash}}} (Linux) or {{Command{/bin/tcsh}}} (BSD)
* lockout shell : {{Command{/sbin/nologin}}}
** Users with this shell are not allowed to log into the system.  Service accounts or banned users will be set to this shell.
* Available shells defined in {{File{/etc/shells}}}

!!! Locking accounts
* Replace the hash with a {{Monospaced{*}}} or {{Monospaced{!!}}} to lock the account.
** This is not enough on //some// systems.  Users may still be able to log in with SSH keys instead of passwords.
* Also change shell to {{Command{/sbin/nologin}}}
** This is a standard lockout shell.  A user must have a valid login shell in order to connect to a system
** The command {{Command{/sbin/nologin}}} just echos //This account is currently not available.// and terminates, thus disconnecting the user from the system.
* {{File{/var/run/nologin}}} or {{File{/etc/nologin}}}
** If this file exists, only root will be allowed to log into the system.   The contents of the file will be displayed to the user before they are disconnected.
*** This is helpful if a system needs to be closed for temporary maintenance.
* Check out service accounts in the password file - they should not have passwords or valid shells
** A service account with a password or valid shell is being abused by an attacker.

!!! New user:
* Use utilities ({{Command{useradd}}}, {{Command{userdel}}}, {{Command{usermod}}}) or edit the password files directly
* Create a home directory for the user
** Set home dir ownership and permissions so the new user can access it
* Set up environment (dot files)
** Copy the environment configuration files within {{File{/etc/skel/}}} (Linux) or {{File{/usr/share/skel/}}} (~FreeBSD) to the new user's home directory
*** Note:  All environment configuration file names begin with a dot.
*** Don't forget to change ownership on the environment files in the user's home directory too

!!! Remove or lock user
* Delete or comment lines in password files
** Will no longer be known to the system, but non-destructive
** Change password hash and change shell


!!! Authentication factors:

Multi-factor authentication (MFA):
* Passwords are not good enough anymore; they are easily stolen.
* Increase security by combining multiple authentication factors.
* More sites and organizations are now requiring MFA
** [[Linux Kernel Git Repositories Add 2-Factor Authentication|http://www.linux.com/news/featured-blogs/203-konstantin-ryabitsev/784544-linux-kernel-git-repositories-add-2-factor-authentication]]
** SUNY Poly recently switched from GMail to MS Outlook and added MFA for email account login

!!!! Methods of authentication:
* ''Something you know'':  passwords
** Should be of sufficient length and complexity to be hard to crack
** Minimum of 10-12 characters
** correct horse battery staple: http://xkcd.com/936/
** Should be unique across systems
*** [[Russian Hackers Amass Over a Billion Internet Passwords|http://www.nytimes.com/2014/08/06/technology/russian-gang-said-to-amass-more-than-a-billion-stolen-internet-credentials.html?_r=0]]
*** [[Stolen user data used to access account|http://community.namecheap.com/blog/2014/09/01/urgent-security-warning-may-affect-internet-users/]]
*** [[ebay|http://money.cnn.com/2014/05/21/technology/security/ebay-passwords/]] 
** Password Cards: http://www.passwordcard.org/en, http://www.evenprime.at/2012/04/password-security-with-password-cards/, etc
** Password vaults
*** [[Password Safe|https://www.pwsafe.org]]
*** [[KeePass|https://keepass.info/]]
** One-time passwords (OTPW)

* ''Something you have''
** [[yubikey|http://www.yubico.com/]]
** [[Google Titan Key|https://cloud.google.com/titan-security-key/]]
*** https://www.cnet.com/news/google-made-the-titan-key-to-toughen-up-your-online-security/
** [[DoD CAC card|http://www.cac.mil/common-access-card/]]
** [[Google 2 factor|https://www.google.com/landing/2step/]]
** [[RSA SecurID|http://www.emc.com/security/rsa-securid/rsa-securid-hardware-authenticators.htm]]

* ''Something you are''
** biometrics:  fingerprint, retina, voice print, facial, vein patterns

* ''Somewhere you are''
** Geofencing - Tie authentication to a particular location
*** Someone may only log in or may not log in from a specific geographic location
** ~GeoIP libraries
** pam_geoip


!!! SSH authentication & increasing security

!!!! ssh keys
* Access systems with keys instead of just passwords for added security
* 1.5 factor authentication:  Slightly better then just passwords
* Create keypairs with ssh-keygen
** Asymmetric keypairs are used for authentication.  You keep the private key secure and locked with a passphrase.  The public key is distributed to systems you have permission to access.
* Public keys are stored in ~/.ssh/authorized_keys
* Host public keys are stored in ~/.ssh/known_hosts
* ssh-agent & ssh-add : add your ssh keys to the agent to be used for connecting to multiple systems
* pssh - parallel ssh for connecting to multiple systems

!!!! sshd configuration
* Host keys
** Host key warning - A warning appears on new systems to verify the host key to ensure you're not the victim to a man-in-the-middle attack
* Require SSH keys to access the system (disable password authentication)
** A little more secure then just passwords.  An attacker cannot just capture a password, they also must capture the SSH key
* Deny root login - Don't allow users to log in directly as root.  Must log in first as a regular, unprivileged user and then escalate to root with either {{Command{su}}} or {{Command{sudo}}}
** No system should allow direct root login.  Turning this off is an excellent security first-step
** Our shell server sees about 50 attempts per day to log in as root.  Countermeasures identify and block these attackers.
** {{Command{grep 'sshd-root.*Found' /var/log/fail2ban.log | wc -l}}}
* Require group membership - Must be in a particular group to log in to the system via ssh


! Assignment

<<tiddler [[Lab 62 - VM Lockdown - Secure your VMs]]>>

----

<<tiddler [[Lab 63 - sudo]]>>

----

<<tiddler [[Lab 64 - Enable Two-Factor Authentication]]>>

----
/%
<<tiddler [[Lab 65 - SSH Intrusion]]>>
%/
----
! Material

!! Reading:
* Linux Administration, Chapter 14 - Linux Firewall

There's a lot of good networking information in this chapter:
* A NAT Primer on pages 301-302 is important to know in a general sense, but not necessary for this class.
* The ~NetFilter background information throughout the chapter is good to know, but the {{Command{ iptables }}} command for managing the firewall has largely been replaced by the {{Monospaced{firewalld}}} tool, {{Command{ firewall-cmd }}}
* The flow chart on the top of page 304 is important to understand
* Pay particular attention to the firewalld section from 317-319.

! Notes

Effective security requires a [[multi-layered approach|https://www.techrepublic.com/blog/it-security/understanding-layered-security-and-defense-in-depth/]], [[defense in depth|https://www.us-cert.gov/bsi/articles/knowledge/principles/defense-in-depth]], and adherence to [[principle of least privilege|https://www.us-cert.gov/bsi/articles/knowledge/principles/least-privilege]].  Ideally, a weakness or vulnerability uncovered in one layer will be mitigated by another security layer.  

Five good examples of this we have deployed:
* {{Monospaced{ntpd}}} and {{Monospaced{named}}} on your core VM have ~ACLs in place to limit who can communicate with those services
* Direct login with the root account is now blocked.  Only authorized user accounts can elevate privileges via {{Command{su}}} or {{Command{sudo}}} through membership to the wheel group.  Just having the root password isn't enough.
* Rather than give a webmaster full root access, we configured {{Command{sudo}}} to provide elevated privileges just to the functions he may need, adhering to the principle of least privilege
* By implementing two-factor authentication, we have a second layer of security to protect us in case credentials are stolen.  
* SSH access to our class shell server cannot be blocked by a firewall.  We all need to be able to connect from anywhere.  The server is under constant brute-force login attack from all over the internet.  The [[fail2ban|https://www.fail2ban.org/]] tool was deployed on the class shell server to automatically detect and block those attacks.

Host-based firewalls are another important layer in the security of a system.  Perimeter firewalls are important, but what happens if an attacker is already within your walls?  Restricting access to open ports on your systems to only those needing to communicate with them is a good way to further limit your exposure to attack.

Systems must always be built with the assumption that outer layers have been or will be breached.  For example, 
* Assume your password will be stolen at some point.  Use two-factor authentication everywhere you can.
* Assume the perimeter firewall will be breached at some point.  Deploy a host-based firewall on your servers.
** Assume the host-based firewall may be accidentally disabled or bypassed.  Configure your services to properly use ~ACLs.
* Assume a user will have a weak password that could be brute-forced.  Deploy a system to detect and block brute-force login attempts.


!! ~SaltStack Example

[[SaltStack|https://www.saltstack.com]] is an open-source platform for server automation and remote task execution.  It's very powerful, easy to deploy, and easy to use.  We're using it in our virtual lab to facilitate the management and monitoring of the class ~VMs.  

~SaltStack consists of:
* A master server which serves as the central control hub to issue commands and push configuration changes
* Minions, which are the nodes connecting to and being managed by the master.

Your ~VMs are all Salt minions connecting to a master server I control.  This enables me to quickly and easily push configuration changes and review the state of your ~VMs in bulk.  Instead of having to connect to each system to fix or review something, I can issue a single command which will run on all of them.

Early last year two [[highly critical vulnerabilities|https://labs.f-secure.com/advisories/saltstack-authorization-bypass/]] ([[CVE-2020-11651|https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11651]] and [[CVE-2020-11652|https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-11652]]) were disclosed impacting the ~SaltStack master server.  This vulnerability allows any attacker who can communicate with the ~SaltStack network ports on the master to completely take it over by sending control messages, read and write files to the master, and steal its secret keys.  The attacker will thus have complete control of the master server and all minions connecting to it.  This vulnerability will then allow for a complete compromise of all systems within a ~SaltStack deployment.

The [[Common Vulnerabilities and Exposures (CVE)|https://en.wikipedia.org/wiki/Common_Vulnerabilities_and_Exposures]] database managed by [[Mitre|https://cve.mitre.org/]] contains a list of all publicly disclosed security vulnerabilities. The ~CVEs are assigned a [[CVSS score|https://nvd.nist.gov/vuln-metrics/cvss]] ranging from 0 (benign) to 10 (critical) to rate their severity.  A CVSS score of 10 generally means full system compromise can be remotely accomplished.  These are "drop what you're doing and fix this now" vulnerabilities.  

Both ~SaltStack ~CVEs were assigned a CVSS score of 10.  ~F-Secure, the company which discovered the weaknesses, posted in their blog:  “Patch by Friday or compromised by Monday”.

This is a great example of the need for multiple layers of security.  There will always be time gaps between when vulnerabilities are introduced in software, when they are discovered, when patches are available, and when those patches can be applied.  Those time delays are occasionally significant
* 2 years for [[CVE-2014-0160|https://www.cvedetails.com/cve-details.php?t=1&cve_id=CVE-2014-0160]], also known as [[HeartBleed|https://heartbleed.com/]], which allowed the compromise of a web server's SSL secret keys and the decryption of ~SSL-encrypted network communication.
* 20 years for [[CVE-2020-0601|https://nvd.nist.gov/vuln/detail/CVE-2020-0601]] with a [[8.1|https://nvd.nist.gov/vuln-metrics/cvss/v3-calculator?name=CVE-2020-0601&vector=AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:H/A:N&version=3.1&source=NIST]] rating ([[details|https://krebsonsecurity.com/2020/01/cryptic-rumblings-ahead-of-first-2020-patch-tuesday/]]) involving a core cryptographic library in Windows.

~F-Secure, in their [[blog post|https://blog.f-secure.com/new-vulnerabilities-make-exposed-salt-hosts-easy-targets/]], identified 6000 vulnerable ~SaltStack master servers through scans of the entire Internet.  Use of a firewall to prevent anyone on the internet from communicating with these servers would have been the first step in protecting them from abuse by the entire world and is especially vital now that vulnerabilities have been discovered.

Luckily, the ~SaltStack master used for our class is protected from the entire Internet by the perimeter firewall.  But what if an attacker is already on our network, either physically or virtually.  What if a misconfiguration of the perimeter firewall allows traffic to our master?  A host-based firewall must be deployed to protect this system by only allowing our class ~VMs to communicate with the Salt master.


!! Packet filter firewalls

Another component of system security which allows us to:
* Filter unwanted network traffic
* Log & monitor network traffic
* Block brute force attacks
* Rate limit to counter minor ~DoS events

Filter minimally based on source or destination address, ports, or protocol types
We can either default to deny or default to allow
Optional logging
 - logging is useful for regular monitoring and debugging

Ingress or Egress filtering
 - Control both what flows in and out of the system.
 - For example, filtering egress from a web server could effectively block [[reverse shell|https://www.acunetix.com/blog/web-security-zone/what-is-reverse-shell/]] attacks


!! Developing firewall rules
* We must first understand what communication is expected to take place
** Know the source and destination of the network traffic
*** Is it a new connection or related to an existing connection ?
** Match services to port numbers
*** Consult {{File{/etc/services}}} for a mapping of port numbers to service names.
*** Low ports 0-1023 are well-known ports and privileged.  They may only be bound by a root-controlled process.
*** Ports 1024-49151 are [[registered|https://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml]].  Port registration is issued by IANA.
*** Ports 49152-65535 are dynamic ports and generally not listening for services.  These are often used for replies and established connections.
** Observe what is currently listening for connections:
*** {{Command{ss}}} - Socket Statistics (the new tool). Installed by default
**** {{Command{ss -tunlp}}} - Display which ports are currently listening for connections.  The -n option will display IP addresses and port numbers instead of their names.  
**** {{Command{ss -tulp}}} - Display which ports are currently listening for connections.  Omitting the -n option will display host and service names instead of their numbers. 
*** {{Command{netstat}}} - Print network connections (the old tool).  This command requires the ''net-tools'' package.
**** {{Command{netstat -tunlp}}} - Display which ports are currently listening for connections.  The -n option will display IP addresses and port numbers instead of their names.  
**** {{Command{netstat -tulp}}} - Display which ports are currently listening for connections.  Omitting the -n option will display host and service names instead of their numbers. 
*** {{Command{lsof -i -n -P}}} - ''l''i''s''t ''o''pen ''f''iles.  Displays files and ports in use along with the processes which are utilizing them.  Requires the ''lsof'' package.
** Use a tool like [[nmap|https://nmap.org/book/man.html]] to scan a remote system
*** Scanning a remote system with nmap can reveal what services are listening or how effective your firewall is
** Some services listen on both TCP and UDP.  Be sure to take note of the protocol being used.
* Once you know what should be listening, then create rules to allow desirable traffic
** Determine if any traffic should be blocked
** Decide what to do with remaining ports
*** Allow 
*** Allow but log
*** Deny

!!! Examples

Display listening ports on our class lab Proxmox server.  The third column shows the IP address the services is bound to.  The IP address of 0.0.0.0 means all IP addresses.  The IP address 127.0.0.1 means localhost, or the internal system.
{{{
root@lab ~ # netstat -tunpl
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name
tcp        0      0 0.0.0.0:2222            0.0.0.0:*               LISTEN      789/sshd: /usr/sbin
tcp        0      0 0.0.0.0:111             0.0.0.0:*               LISTEN      1/init
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      8673/nginx: master
tcp        0      0 127.0.0.1:85            0.0.0.0:*               LISTEN      1669/pvedaemon
tcp        0      0 127.0.0.1:25              0.0.0.0:*               LISTEN      1633/master
tcp        0      0 0.0.0.0:443             0.0.0.0:*               LISTEN      8673/nginx: master
tcp        0      0 0.0.0.0:3128            0.0.0.0:*               LISTEN      1684/spiceproxy
udp        0      0 0.0.0.0:111             0.0.0.0:*                           1/init
}}}

{{Note{''Note:'' Examine your output closely.  Not all ports need to be opened in the firewall.  In the output above, we can see ports 25 & 85 are bound to 127.0.0.1, the loopback address.  These two ports can only be connected to from within the system and should not receive firewall rules.  Also think about what services on your server should be receiving external connections.  Not all should not be receiving connections from external systems and should not have firewall rules created for them.}}}


Perform a port scan of the class lab Proxmox server from an external host.
{{{
nick@trillian:~>nmap lab.ncs205.net
Starting Nmap 7.70 ( https://nmap.org ) at 2022-04-12 23:19 EDT
Nmap scan report for lab.ncs205.net (94.130.36.42)
Host is up (0.12s latency).
Not shown: 892 filtered ports, 103 closed ports
PORT     STATE SERVICE
22/tcp   open  ssh
53/tcp   open  domain
80/tcp   open  http
443/tcp  open  https
3128/tcp open  squid-http

Nmap done: 1 IP address (1 host up) scanned in 84.21 seconds
}}}

Notice the difference between the two reports?  
* Ports listed in the first report but not the second are being blocked at the external firewall
* Ports listed in the second report but not the first are being forwarded by the Proxmox server to internal ~VMs


!!Stateful inspection - State module

* Stateless - inspect each packet in isolation.  Examine source and destination hosts and ports then decide what to do.
* Stateful - maintain the state of network connections.  These states can be used to determine policy.
** For example, allow traffic that is already part of an established connection (TCP) or is a reply to a previous request (UDP)

Inspect the traffic to allow expected replies
* -m state &nbsp; &#45;-state //state//
* State is a comma delimited list of:
** NEW - the packet has started a new connection
** ESTABLISHED - the packet is associated with a connection which has seen packets in both directions
** RELATED - the packet is starting a new connection, but is associated with an existing connection
** INVALID - the packet  could not be identified for some reason (typically some type of error)

Going into detail on these networking concepts is beyond the scope of this class.  But they are important to understand.


TCP - already stateful:
* 3-way handshake
* Evaluate TCP flags to determine state
[img[img/state-tcp-connection.jpg][http://www.iptables.info/en/connection-state.html]]

UDP - stateless
* no flags to evaluate
* kernel tracks outbound UDP packets.  Responses to outstanding requests are marked
[img[img/state-udp-connection.jpg][http://www.iptables.info/en/connection-state.html]]


!! Tool Overview
* netfilter - What's running under the hood
** This is manipulated with the deprecated {{Command{iptables}}} command.
* firewalld - A new front-end to simplify managing the firewall
** This is manipulated with the new {{Command{firewall-cmd}}} command.  This is the standard tool to use in ~CentOS 7.

The new ~FirewallD and its set of tools makes the management of a basic firewall very easy.  You no longer need to know intricacies of how ~NetFilter works and how its rules are created.  ~FirewallD will take care of that for you.  It's good info to know and is included here for reference.  You can skip the //Linux ~NetFilter// section if you'd like and continue with the ~FirewallD section down below.  We'll be using {{Command{ firewall-cmd }}} to manage our firewall instead of {{Command{ iptables}}}, the old command.

----

!!Linux [[netfilter|http://www.netfilter.org/]]
* controlled by the {{Command{ iptables }}} command

!!! Table:
* Sets of chains 
* Default table is named filter
* Additional tables:
** NAT table
** Mangle table - for specialized packet alteration (~QoS)
** Raw table - for configuration exemptions

!!! Chains of rules:
* Firewall rules are grouped into chains
* Rules within a chain are interpreted in order, top to bottom
** Until a match is found
** Or the default target is reached (ACCEPT or REJECT)
* Default chains:
** INPUT: traffic addressed to the system
** OUTPUT: traffic leaving the system
** FORWARD: all packets arriving on one network interface and leaving another
* Custom chains can be created for organizing similar rules

!!! Rules:
* Rules contain a criteria and a target
* The criteria is based on attributes of the packet, such as IP addresses or ports.
* If the criteria is match, either perform the specified action or continue rule processing within the target
* If the criteria is not matached, move on to the next rule.
* Terminate with the chain's default target

!!! Targets:
Each rule contains a target clause to determine what to do with matched packets:
* ACCEPT - allow the packet to proceed
* DROP - silently reject the packet (causes TCP retries)
* REJECT - reject the packet with an ICMP error message
* LOG - track the packet as it matches a rule
* REDIRECT - redirect packets towards a proxy
* RETURN - terminate user-defined chains
* QUEUE - transfer packets to local user programs via a kernel module
* A custom chain may be specified as a target.  Rules in that chain will be evaluated.


!!! iptables Commands

iptables -h

!!!! Saving your rules
iptables-save > /tmp/iptables.rules
iptables-restore < /tmp/iptables.rules

service iptables save
rules are stored in /etc/sysconfig/iptables


!!!! Firewall Operations:
| !Option | !Definition |
|-L [&#45;-line-numbers] [-v] |List all rules|
|-I //chain-name// //position-number//  //rule// |Insert rule into a chain|
|-A //chain-name//  -i //interface//  -j  //target// |Append the current target to the chain|
|-D //chain-name// //position-number// |Delete a rule from a chain|
|-P //chain-name//  //target// |Sets default policy for the chain|
|-F //chain-name// |Flush all rules in a chain|
|-N //chain-name// |Create a new chain|

!!!! Filter criteria command line options:
| !Option | !Definition |
| -p proto |Match by protocol: tcp, udp, or icmp|
| -s source-ip |Match host or network source IP address|
| -d dest-ip |Match host or network destination address|
| &#45;-sport port# |Match by source port|
| &#45;-dport port# |Match by destination port|
| &#45;-icmp-type type |Match by ICMP type code|
| -i int |Match by interface|
| &#33; |Negate a clause|
| -t table |Specify the table to which a command applies (default is filter)|
| -j //target// |Specify target to use|

!!!! Extensions:
| -m state &nbsp; &#45;-state //state// |filter based on specified //state//|
| -m multiport &#45;-dports //port1//,//port2//,...//portN// |filter multiple ports|
| -m owner &#45;-uid-owner //uid// |filter based on user name|


!!! Examples:

{{{

iptables -L --line-numbers -v

# Allow established traffic:
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT


iptables -A INPUT -s 10.103.35.0/24 -p tcp --dport 4444 -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 20,21 -j ACCEPT

iptables -A OUTPUT -p tcp -m multiport --dports 20,21 -j REJECT


# SSH chain:
iptables -N SSH
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.103.36.24/29
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.103.36.10
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 150.156.192.0/24
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 10.156.195.0/24
iptables -A SSH -p tcp --dport 22 -j ACCEPT -s 150.156.193.20
iptables -A SSH -p tcp --dport 22 -j LOG -m limit --limit 1/sec --log-prefix "IPTables-SSH: " --log-level 4

iptables -I INPUT 6 -j SSH -m state --state NEW
}}}

----

!! [[FirewallD|https://firewalld.org/]]

~FirewallD is the new way to manage Linux firewalls.  Everything above in the ~NetFilter section is still running under the hood, but ~FirewallD provides a nice front-end to manage things.

Most of the following information is directly from the [[RedHat Linux Firewalls|https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/sec-using_firewalls]]] documentation.

The Firewall Stack:
[img[img/firewall-stack.png]]


!!! Zones
firewalld can be used to separate networks into different zones according to the level of trust that the user has decided to place on the interfaces and traffic within that network. A connection can only be part of one zone, but a zone can be used for many network connections. 
!!!! Available zones:
* ''Block'':  Any incoming network connections are rejected with an icmp-host-prohibited message for ~IPv4 and icmp6-adm-prohibited for ~IPv6. Only network connections initiated from within the system are possible. 
* ''dmz'': For computers in your demilitarized zone that are publicly-accessible with limited access to your internal network. Only selected incoming connections are accepted. 
* ''drop'': Any incoming network packets are dropped without any notification. Only outgoing network connections are possible. 
* ''external'': For use on external networks with masquerading enabled, especially for routers. You do not trust the other computers on the network to not harm your computer. Only selected incoming connections are accepted. 
* ''home'': For use at home when you mostly trust the other computers on the network. Only selected incoming connections are accepted. 
* ''internal'': For use on internal networks when you mostly trust the other computers on the network. Only selected incoming connections are accepted. 
* ''public'' (default): For use in public areas where you do not trust other computers on the network. Only selected incoming connections are accepted. 
* ''trusted'': All network connections are accepted. 
* ''work'': For use at work where you mostly trust the other computers on the network. Only selected incoming connections are accepted. 


!!! Runtime vs. Permanent Settings

There are two firewall configurations:  
* The runtime settings define the firewall rules currently in effect 
* The permanent settings reflect the stored configuration that will be reloaded if the firewalld service restarts.

Any changes applied to the running firewall only apply while firewalld is running. When firewalld is restarted or the system reboots, the settings revert to their permanent values.

To make firewall changes persistent across reboots, rules need to be saved in both locations.  This can be accomplished two different ways:

Modify the runtime configuration first:
# Add a new rule to the runtime configuration:  {{Command{ firewall-cmd &#45;-add-service=ssh }}}
# Test your rule and system.  Make sure nothing is broken.
# Make your changes permanent if everything works:  {{Command{ firewall-cmd &#45;-runtime-to-permanent }}}
# Or, discard your runtime changes and reload the permanent configuration if there is a problem {{Command{firewall-cmd &#45;-reload }}}

Modify the permanent configuration first:
# Add a new rule to the permanent configuration:  {{Command{firewall-cmd &#45;-permanent &#45;-add-service=ssh}}}
# Reload the permanent configuration {{Command{ firewall-cmd &#45;-reload }}}


!!! Predefined Services

A service can be a list of local ports, protocols, source ports, and destinations, as well as a list of firewall helper modules automatically loaded if a service is enabled. Using services saves users time because they can achieve several tasks, such as opening ports, defining protocols, enabling packet forwarding and more, in a single step, rather than setting up everything one after another. 

Service configuration options and generic file information are described in the firewalld.service(5) man page. The services are specified by means of individual XML configuration files located in {{File{/usr/lib/firewalld/services/}}} which are named in the following format: //service-name//.xml. Protocol names are preferred over service or application names in firewalld. 

This example contains both pre-defined services and a list of ports to allow.  The dhcp-client, ntp, and ssh services are allowed.  The ports and protocols for these services are defined in their XML file.  The TCP ports 4505 and 4506 are also specifically allowed.  Using service names is preferred if a service definition file is available.

{{{
[root@head ncs205]# firewall-cmd --list-all
public (active)
  target: default
  icmp-block-inversion: no
  interfaces: eth0
  sources:
  services: dhcpv6-client ntp ssh
  ports: 4505-4506/tcp
  protocols:
  masquerade: no
  forward-ports:
  source-ports:
  icmp-blocks:
  rich rules:
}}}


!!! Basic commands

Here are a few basic commands involved in managing a firewall using the ~FirewallD management command, {{Command{firewall-cmd}}}

* Display the current state of the firewall: {{Command{ firewall-cmd &#45;-state }}}
* Display all options available: {{Command{ firewall-cmd -h }}}
* Display the active zones: {{Command{ firewall-cmd &#45;-get-active-zones }}}
** The default zone is //public//.  We'll stick with the default for our ~VMs
* Add a port to the permanent zone: {{Command{ firewall-cmd &#45;-add-port=5667/tcp &#45;-permanent }}}
* Remove a port from the permanent zone: {{Command{ firewall-cmd &#45;-remove-port=5667/tcp &#45;-permanent }}}
* Reload the firewall configuration and activate any new rules added to the permanent zone: {{Command{ firewall-cmd &#45;-reload }}}
* Add a new service to the runtime zone: {{Command{ firewall-cmd &#45;-add-service=ssh  }}}
** When possible, try to add services by name instead of port numbers.
* Remove a service from the runtime zone: {{Command{ firewall-cmd &#45;-remove-service=ssh  }}}
* Copy the runtime configuration to permanent: {{Command{ firewall-cmd &#45;-runtime-to-permanent }}}
* Get a list of all services known to firewalld:  {{Command{ firewall-cmd &#45;-get-services }}}
* List the current runtime firewall configuration: {{Command{ firewall-cmd &#45;-list-all }}}
* List the current permanent firewall configuration: {{Command{ firewall-cmd &#45;-permanent &#45;-list-all }}}
* Forward port 80 from the external interface to port 80 on 10.0.0.10 through the internal interface: {{Command{ firewall-cmd &#45;-zone=external &#45;-add-forward-port=port=80:proto=tcp:toaddr=10.0.0.10:toport=80 }}}
** Only here for reference.  We're not doing any port forwarding in this class.  This command is handy if you're working with a Linux-based router.

{{Note{''Note:'' When possible, allow traffic through the firewall with the {{Monospaced{-&#45;add-service}}} option instead of {{Monospaced{-&#45;add-port}}}.  It will result in a cleaner configuration for services which utilize multiple ports, such as DNS.  For example, if you only run {{Monospaced{-&#45;add-port=53/udp}}} and neglect the TCP protocol, you'll allow DNS queries to your server but will block zone transfers to the slave, which utilize TCP. }}}

!Sources

http://bodhizazen.net/Tutorials/iptables
http://www.thegeekstuff.com/2011/06/iptables-rules-examples/
http://www.liniac.upenn.edu/sysadmin/security/iptables.html
http://www.borgcube.com/blogs/2014/05/securing-ntp-and-rate-limiting-using-iptables/
http://fideloper.com/iptables-tutorial	
https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/sec-using_firewalls

! Assignment

<<tiddler [[Lab 66 - Host-based Firewalls]]>>
! Material
!! Read:
* Linux Administration Chapter 8 - Filesystems
* Linux Administration pages 72 & 73 - {{Command{du}}} & {{Command{df}}} commands
** These commands are great to know and very useful for troubleshooting


! Notes

File storage is a core function of any unix system
Something must store the operating system
Along with any system or user data

Core storage concepts
* Various storage layers
* What we store files on and how we're currently making it available to the system
* Network file systems
* swap
/%
Additional storage discussion in the next section
* Additional filesystem types
* Other ways to store data and make files available to the system
%/

!! Storage layers:

[img[img/storage-layers.jpg]]

Hardware storage devices
 - Storage medium (hard drive (spindle), flash / solid state (SSD, ~NVMe), optical, tape)
 - How the medium physically attaches to the system (SATA, SAS, USB, SCSI, etc)
Redundancy (optional): RAID arrays or Volumes
Partitions
Filesystems


!! Storage Hardware Devices

!!! Storage medium:
* Traditional disks: 
** Platter rotation speed in RPM:  5400, 7200, 10k, 15k
*** Rotation speed affects the transfer rate of the volume
** File fragmentation
*** Performance suffers as blocks associated with a file become scattered around the disk
*** Defragmentation arranges the disk so file blocks are all together.  Windows has defrag processes but there is no such functionality in Linux filesystems.
* SSD/~NVMe - Solid State Disk
** Significantly Faster
** Limited number of writes
** Available in several speed and resiliency options.
*** [[SSD Endurance Experiment|http://techreport.com/review/27909/the-ssd-endurance-experiment-theyre-all-dead]]
** Less prone to failure since there are no moving parts
*** No more defragging
** More expensive per GB
** Data written out in pages.  A memory page must be erased before data is written.  Erase is slower then write.
*** TRIM command - erase pages before they're needed to increase performance
** Be sure to align filesystem clusters to SSD pages to avoid extra wear
*** Align partitions to page boundaries 
* Hybrid drives
** Part SSD / part magnetic platter
** These seem to have fallen out of fashion now that SSD prices have come down so far.  I haven't seen them in a few years.
* Flash drives / cards
* Optical - DVD / CD.  Now mostly a relic of the past and replaced by USB
* Tape - Good for archival but mostly replaced by disk-to-disk and cloud backup solutions now that storage is so cheap.

/%
!!! Interfaces

Desktop:
* ATA - Advanced Technology Attachment
** PATA - Parallel ATA  (IDE)
*** ATA/133 - 133 MB/s
*** 40 pin, 80 conductor ribbon cable
*** Fully antiquated now and replaced by SATA
** SATA - Serial ATA
*** Replaced PATA
*** much higher transfer rates
*** many performance enhancements
*** hot swap
*** far better cabling
* [[M.2 Socket|https://en.wikipedia.org/wiki/M.2]] for ~NVMe drives
** Replacing SATA connections
** Uses PCI Express bus
** Significantly faster data transfer rates over SATA/SSD.

Server:
[img[img/scsi.jpg]]
*SCSI - Small Computer System Interface
** Parallel SCSI
*** Old ribbon cables
*** for daisy chaining of devices
*** Used for connecting disks and early peripherals (tape drives, CD writers, scanners, printers, etc)
*** Bus ends must be terminated to prevent noise by absorbing signals which reach the end of the bus 
*** All devices have a unique target number to identify them on the bus (0 - 15), set by dip switches or jumpers
*** Largely antiquated now and replaced by SAS
** SAS - Serial Attached SCSI
*** New standard for enterprise drive connection
*** Point to point connections instead of chained
*** No longer limited to 16 devices on the bus
*** Connectors compatible with SATA
*** Much higher transfer rates
*** SATA price and performance make for good enterprise alternatives
*** Similar connectors and support for SATA drives
* [[M.2 Socket|https://en.wikipedia.org/wiki/M.2]] for ~NVMe drives
** These are starting to show up in servers with enterprise-grade drives to store the operating system

* Fibre Channel
** enterprise
** high bandwidth
** speed
** can connect many devices at once

Both:
* USB
** slow compared to the alternatives, especially < USB 3.
** v1: 1.5 Mbit/s or 12 Mbit/s (~Full-Bandwidth)
** v2: ~Hi-Speed - 480Mb/s
** v3:  ~SuperSpeed - 4Gb/s
** v3.1: ~SuperSpeed+ - 10Gb/s
** USB converters for SATA - Connect a traditional hard drive to USB.  Great for the workbench
* Network access - Storage area Network (SAN)
** iSCSI (SCSI over IP)
** Network file system (NFS)
%/

!!! From the operating systems perspective:

Device files are all in {{File{/dev/}}}
Identify the devices:
 - Check the kernel boot logs.  Devices are detected on boot and mentioned in the log.  {{Command{ dmesg }}}
 - Scan the syslog logs: {{File{ /var/log/messages }}}
 - Physical drives: {{Command{ ls /dev/sd* }}}
 - Virtual drives {{Command{ ls /dev/vd* }}}
 - On Linux, the {{Command{lsblk}}} command will display all storage devices and where they are mounted


!!! Disk failure 

Disk failure is increasingly common
* [[Google disk failure research|http://static.googleusercontent.com/media/research.google.com/en/us/archive/disk_failures.pdf]] - Old now, but the problem is likely worse now.
* Backblaze hard drive reports:  https://www.backblaze.com/b2/hard-drive-test-data.html
** I use their data to inform my hard drive purchases.  Consumer disks seem to decrease in quality every year.
** When I first started tracking their data 10 years ago:
*** Drives tended to fail either very early (first few months) or after about 3 years
*** Generally there was only a 75% 5 year survival rate
*** It's important to have redundancy for anything that matters
* Traditional hard drives have moving parts
** Drive burn-in - early disk activity to catch failures before actual use
* Solid state
** Limited number of write cycles
** Wear leveling - distribute wear across the disk
*** Relocation of static data to higher-wear areas of the disk
** Firmware will keep track of disk usage to determine where to write the data
* {{Command{badblocks}}} command  
* SMART Monitoring
** https://en.wikipedia.org/wiki/S.M.A.R.T.
* A good backup and disk redundancy strategy will mitigate the effects of disk failure.  


!! RAID

Combine multiple physical storage devices into a single virtual device.

RAID Levels:
Linear (JBOD): Concatenate all disks into one large logical volume
Level 0, 1, 5, 6, 10
* 0: Striping - more for speed than resiliency.  Parts of a file are spread across multiple disks.
** A single disk failure means all data is lost
* 1: Mirroring - duplicate data across multiple disks
** A single disk failure does not result in data loss since the data also exists on another drive
** Increased performance on read but decreased on write
** Half of your drive capacity is lost to redundancy
* 5: Striping + Parity.  Files are written across multiple disks along with parity information on one drive
** If once disk fails it can be rebuilt from the parity data
** All data is lost of more than one disk fails.  Replace a failed drive quickly before another is lost.
** Performance penalty but maximizes the amount of available disk space since only one drive is dedicated to redundancy
* 6: Striping + Two Parity.  Files are written across multiple disks along with parity information on two drives
** Two drives of parity instead of one so the array can suffer 2 drive failures without data loss.
** Two drives worth of capacity are thus lost to support the redundancy.  This is a safer option for larger disks which may take a long time to rebuild or larger arrays with a lot of disks

Raid can be used to protect against hardware failure, not data corruption or online data loss!  This is not a substitute for file backups.

!!! Volume groups

Aggregate physical devices to form pools of storage (volume groups)
This pool can be divided into logical volumes for use

Linux LVM
* The primary way storage is handled in Linux
* Allocate space on demand and resize filesystems
* Supports snapshots
* Does not support raid 5 or 6.  Use linux software raid for redundancy then LVM to assign slices
* Access with {{Monospaced{pv}}}, {{Monospaced{vg}}}, {{Monospaced{lv}}} commands.  ie: {{Command{lvdisplay}}}
** The pv* commands display or manage physical volumes
** The vg* commands display or manage volume groups
** The lv* commands display or manage logical volumes

* Thin provisioning:
** Allocate more storage than you physically have available.
** Commonly used for virtual machines when you expect not all ~VMs will use the full amount of storage they have been allocated.  Here we see examples of the virtual storage allocation greatly exceeding the volume size.

[img[img/thinProvision.png]]
/%
Sun zfs
 - A mix of RAID and LVM

!!! Software RAID:
{{{
mdadm
Can monitor and send emails if there are any problems.

Use cfdisk to create partitions then mirror them
mdadm --create /dev/md0 --level=mirror --raid-devices=2 /dev/vdb1 /dev/vdc1

cat /proc/mdstat

mdadm --stop /dev/md0
mdadm --assemble --scan 

mdadm --detail --scan >> /etc/mdadm.conf
mdadm -As /dev/md0

mdadm /dev/md0 -f /dev/vdc1
cat /proc/mdstat
tail /var/log/messages

mdadm /dev/md0 -r /dev/vdc1
mdadm /dev/md0 -a /dev/vdc1

mdadm --stop /dev/md0
}}}

Use mdadm to create large raid5 or 6 arrays then LVM to allocate space into partitions
%/


!! Partitions

A fixed-sized division of the storage device

A way to organize files by type or access level.

Traditional partitioning, Master Boot Record (MBR) style
The first sector (512 bytes) of the disk contains the MBR.  First 440b for boot code and next 64b for partition info.
There is a 4 partition limit due to the 64b size constraints

The new way: GUID partition tables (GPT)
* Allows us to break the 2tb disk barrier.  Most spindle drives are larger than this now.
* Allows for more then 4 partitions per disk
* Partition info is stored at beginning and end of the disk (more resilient) 
* protective MBR
** Prevents non-GPT utilities from overwriting GPT partition info
** Allows non-GPT systems to boot GPT disks

Some tools for working with partitions:
* {{Command{fdisk -l}}}
* {{Command{cfdisk}}} for /dev/vdb
* {{Command{parted}}} / {{Command{gparted}}} (kali usb boot)


!! Filesystems

The interface between the data on the disks and the operating system.  

Linux Filesystem types:
* ext - extended filesystem
* ext2 - original and longtime standard
** extended further by new versions: ext3 and ext4
* xfs - 64bit with journaling - parallel I/O for high performance

{{Command{mkfs.*}}} - commands to create create filesystems, eg: {{Command{mkfs.xfs}}}

Filesystem mounting
* mount points - Where on the filesystem tree a storage volume is attached
* {{File{/etc/fstab}}} file to configure standard filesystems and mount on boot
* Standard {{Command{mount}}} and {{Command{umount}}} commands
** {{Command{mount}}} - mount a filesystem to a mount point
** {{Command{mount [-t type] [-o option[,option]...] device dir}}}
** {{Command{mount -a }}}
** Mount options
*** rw / ro
*** noexec  (Maybe for /tmp/?)
*** nosuid   (Maybe for /home/?)
*** noauto - Used in fstab to not mount the filesystem on boot
*** remount - remount a filesystem, changing its mount options.  Useful for remounting a ro filesystem as rw
** {{Command{umount}}}  - unmount a filesystem.  A filesystem that is in use cannot be unmounted


{{Command{fsck}}} - filesystem check - find and fix any filesystem errors
 * journaling speeds things up
/%
Root reserved space
* 8% of the volume is reserved for root (by default)
* Users cannot utilize this last 8%
* A safety valve to prevent users from choking out and crashing a system

Usage quotas:
* A way to limit disk utilization per user or per group
%/


!! Other filesystem types:

!!! ~RAM-backed filesystem

ramfs - RAM will be used to store files until it is exhausted.  No limit to the size of the filesystem.

tmpfs - Also RAM backed, but a max size can be specified.  Will use swap if physical memory is exhausted.  This filesystem type largely replaces the older ramfs.

{{Command{mount -t //TYPE// -o size=//SIZE// //DEVICE// //MOUNTPOINT//}}}

Note memory usage before creating RAM backed filesystems.  Don't starve your system for resources.
Check total and available memory with the {{Command{free}}} command

These type of filesystems are especially useful for logs on systems booted from flash drives (eg: Raspberry Pi) so you don't add extra wear to the drive


!! Additional filesystem commands

* {{Command{du}}} - Disk usage - Show how much space is being consumed by files or directories
** Useful for tracking down high disk usage
** Examples:
*** Display disk usage of all directories under {{File{/}}}: {{Command{du -sh /*}}}
*** Top usage directories sorted by size:  {{Command{du -sk * | sort -nr | head}}}
* {{Command{df}}} - Disk Free - Show the current utilization of all mounted filesystems


! Assignment

<<tiddler [[Lab 67 - Bring Files VM online]]>>
----
!! LVM Lab

Lab E is an extra credit lab.  It has been moved to [[Week E]]

----
<<tiddler [[Lab 68 - Storage Expansion]]>>

/%
----
<<tiddler [[Lab 69 - Monitoring disk usage with Nagios]]>>
%/
! Wrapping up
!! Closing out the semester

!!! VM deletion

Our lab environment for this class will be decommissioned on ''Thursday, December 22''.  If there is anything you would like to complete or back up, please do so by then.  Please let me know if there's anything you need help saving or run short on time.

!!! Additional Resources
!!!! A lab environment similar to ours can easily be replicated from open source tools:
* [[Proxmox|https://www.proxmox.com/en/]]: The hypervisor our ~VMs are running on.  Good if you have a spare server kicking around.
* [[Naemon|https://www.naemon.org/]]: Infrastructure monitoring
* [[SaltStack|https://www.saltstack.com/]]: Infrastructure Management & Orchestration - I used this to easily run commands on all class ~VMs and maintain baseline configurations
* [[Google Domains|https://domains.google/]]: Domain Registration - A simple, clean interface and free domain privacy.
* [[DigitalOcean|https://www.digitalocean.com/]]: Low cost cloud ~VMs - I use these for my personal infrastructure.  Good Linux ~VMs for $5 per month.
* [[Hetzner|https://www.hetzner.com/sb?country=us]]: Low-cost bare metal cloud servers.  This is the hosting provider for our class lab environment.

!!! Class website mirror

The entire class website runs from a single HTML file.  A zip file containing the HTML file along with linked images, videos, and lab ~PDFs can be downloaded from https://www.ncs205.net/ncs205.zip
/% * Last updated 4/30/22 @ 16:30 %/
@@Link will be live once classes end@@

!!! Feedback

I hope everyone enjoyed this class and got something useful from it.  The material I included is the highlights of what you'll need to be exposed to if you'll be using Linux in and beyond the NCS program.  If you have any feedback to offer, good or bad, please let me know.  I'm always looking for ways to improve the class for the next semester.  


! Final Exam

!! Part 1:

You may complete this part of the final exam as a group or individually.  If completed as a group, all group members must know how to complete the tasks and must submit their own write-up.  This portion of the Final Exam will be worth 20% of your Final Exam grade.
<<tiddler [[hack3 break-in]]>>

!! Part 2: 

We will schedule individual times during finals week to discuss this semester's material via Zoom.  This will be worth 80% of the Final Exam grade.  The goal here is for it to be more of a casual conversation on what you've learned than an actual exam.  I will ask a few questions from each of the three sections of the semester:
# Basic UNIX commands & the filesystem
# UNIX filters and manipulating text
# Linux Systems administration

Expect to be given approximately three questions to solve of increasing difficulty from each section.  This Zoom meeting should last approximately 30 minutes.

/% The schedule of available times will be posted on Dec 2 and each student will be able to choose their meeting time. %/
Please choose your meeting time for Part 2 of the Final Exam @@''by EOD Friday, December 9''@@:  https://doodle.com/meeting/participate/id/e0YkpXya
* Be sure to pick only one time
/%
| !Username | !Time |
|backers |   |
|caplanc |   |
|cormiej |   |
|esquitc |   |
|fitzgea2 |   |
|khanss |   |
|khandat |   |
|leonevj |   |
|lionht |   |
|marvinc |   |
|skip |   |
|penas |   |
|peralas |   |
|reynolz |   |
|stevendc |   |
|subedib |   |
|tajs |   |
|talasih |   |
|tankoud | 12/16 @ 6pm  |
|terronl |   |
|tut |   |
%/
/% awk '{print "|" $1 " |   |" }' user2201.txt %/

! Outstanding Labs

Any outstanding labs must be submitted by 6pm Saturday, December 17.  Please let me know if you expect a problem meeting that deadline for outstanding work and we'll discuss options.
! Material
!! Read:
* Chapter 3 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]

!! Watch:
* [[Navigating the filesystem|https://www.youtube.com/watch?v=j6vKLJxAKfw]]


! Working on the command line

!! File types

In the Unix world, everything is represented by a file.  Regular files and directories are files, but so is hardware.  Each piece of hardware connected to the system is associated with a file within the directory {{File{/dev/}}}.
There are three main types of files we will be working with:
* Ordinary files.  These are regular files on the system, divided into:
** Plain text - Files you can read if you were to open them in a text editor
** Binary - Machine code, such as programs intended to be executed.
* Directories - The organizational unit for files within the filesystem
* Symbolic Links - These are pointers to another file somewhere on the system.  The symbolic link only contains the path to its target.

The [[Week 1, Part 2]] page has a more complete breakdown.  Be sure to review the information on that page again.

!! Navigating the filesystem:

!!! Directory paths
* Paths in Unix are very similar to the command line on Windows
* Here, the path separator is a forward slash - {{File{/}}}
* You can change directories with the {{Command{cd}}} command
** eg: {{Command{cd /opt/pub/ncs205/submit}}}
* List the contents of the directory with the {{Command{ls}}} command
* List the contents of the directory in long format with the {{Commandls -l}}} command
** eg: {{Command{ls -l /opt/pub/ncs205/submit}}}
* Some directories of interest:
** {{File{/home/}}} - //Most// user home directories reside within this directory, but not all.
*** This is only a convention, not a requirement.  A user home directory can be anywhere on the system.
** {{File{/opt/pub/ncs205/submit/}}} - Lab/Homework assignments are uploaded to this directory
** {{File{/opt/pub/ncs205/data/}}} - Data files for labs are stored here
** {{File{/tmp/}}} - Temporary scratch space
** {{File{/bin/}}} & {{File{/usr/bin/}}} - Where most program files reside

!!! Listing files - {{Command{ls}}}
The {{Command{ls}}} command will list the contents of a directory.  Extra options can be used to alter the default behavior of the {{Command{ls}}} command:

!!!! Display in long list format:  {{Command{ls -l}}}
The {{Command{-l}}} option will display the contents of the directory in long listing format.  This displays additional metadata about a file, such as the file type, ownership information, size, and modification timestamp.
The first character on the line indicates the type of file:
* ''d'' for directory
* ''l'' for link
* ''-'' for regular file
{{{
[root@lab ~]# ls -l
total 20
total 152
drwx------   2 root root   173 Nov 23  2020 bin
-rw-------.  1 root root   903 Feb 18  2021 commands.txt
-rw-------   1 root root 57238 Aug 25  2020 packages.txt
-rw-------   1 root root   465 Sep  4 13:16 user2109.txt
}}}

!!!! Display hidden files:  {{Command{ls -a}}}
Files beginning with a dot are hidden and not normally displayed with the {{Command{ls}}} command.  Adding the {{Command{-a}}} option will allow them to appear:
{{{
[root@lab ~]# ls -al
total 152
dr-xr-x---.  6 root root  4096 Sep  4 13:16 .
dr-xr-xr-x. 17 root root   256 Aug 31 12:42 ..
-rw-r--r--.  1 root root   287 Dec 15  2020 .bash_profile
-rw-r--r--.  1 root root   176 Dec 28  2013 .bashrc
drwx------   2 root root   173 Nov 23  2020 bin
-rw-------.  1 root root   903 Feb 18  2021 commands.txt
-rw-r--r--.  1 root root   100 Dec 28  2013 .cshrc
-rw-------   1 root root 57238 Aug 25  2020 packages.txt
drwx------.  2 root root    71 Aug 28 17:38 .ssh
-rw-------   1 root root   465 Sep  4 13:16 user2109.txt
}}}

!!!! Sort by modification time:  {{Command{ls -t}}}
Adding the {{Command{ls -t}}} option will sort by modification time instead of by file name with the oldest files on the bottom.  The {{Command{ls -r}}} reverses the default sort to instead put the newest files on the bottom.

Combining all of these options to see a long listing sorted by reversed time (with the newest files on the bottom) is often handy:
{{{
[root@lab ~]# ls -lrt /opt/pub/ncs205/data
total 496
drwxr-xr-x. 3 root    10000     61 Feb  7  2018 lab7
drwxr-xr-x. 2 root    10000     74 May 27  2019 lab8
drwxr-xr-x. 2 root    10000  16384 May 31  2019 lab9
-rwxr-xr-x. 1 merantn users 471148 Jan 30  2020 whatami
drwxr-xr-x. 2 root    10000    136 Feb 13  2020 lab10
drwxr-xr-x  2 root    root      41 Feb 16  2020 lab15
d---------  2 root    root      18 May  4  2020 final
drwxr-xr-x  2 root    root     238 Oct 16  2020 filter_examples
drwxr-xr-x  2 root    users    207 Mar 13 12:31 lab21
}}}


!! Executing commands

!!! Structure of a command string:
* {{Command{command [options] [arguments]}}}
** options will begin with either a single dash (''&dash;'') or a double dash (''&dash;&dash;'')
** options and arguments may be optional or required depending on the command.
** Best practice is to enter the three components in that order
** The [ ] in the usage example above indicates that component is optional.  The command is always required.  Some commands also require arguments.  The synopsis in the command's man page will indicate which components are required for different ways to use the command.

!! Viewing files
* Display the contents of a file: {{Command{cat //filename//}}}
* Display the contents of a file one page at a time: {{Command{less //filename//}}}
* Edit a text file: {{Command{nano //filename//}}} -or- {{Command{vi //filename//}}}

!! Working efficiently - some shortcuts
* View your previously executed commands with the {{Command{history}}} command
* Tab completion - Press the tab key to autocomplete commands or file paths
** Enter the first few letters of a command or file, press tab, and the shell will fill in the remaining letters (if it can)
* Up / Down arrows - search up and down through your command history to rerun a previously executed command
* Page Up / Page Down - Use these keys to search through your command history for the last commands which begin with a given string
** Type a few letters from the beginning of a previously executed command and press Page Up.  The shell will return to the last command you executed which began with those letters.

!! Other useful commands:
* {{Command{touch}}} - create an empty file
* {{Command{file}}} - examine a file to identify its type
* {{Command{strings}}} - display the plain text strings within a binary file.  Often useful for forensics and identifying what a binary is or does.


! Deliverables

!! Read Chapter 3 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
- Complete [[Lab 3|labs/lab3.pdf]] & [[Lab 4|labs/lab4.pdf]]
! Material
!! First complete this lab for review:
- Complete [[Lab 5|labs/lab5.pdf]]
!! Read:
* Chapter 4 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
** You can gloss over the parts about wildcards (pp 26-27) for now.  We'll come back to them later.
** Focus on becoming familiar with the available commands.
!! Watch:
* Creating and Deleting files and directories: https://www.youtube.com/watch?v=91FhiTyEaCU
* Moving and copying files: https://www.youtube.com/watch?v=GKEGNdNIQrw


! Manipulating Files & Directories

Every operating system has a basic set of utilities to manipulate files and directories on the command line.  Our next reading assignment will introduce those commands in the Linux operating system.


!! Basic file manipulation commands

Rename and move files - {{Command{mv}}}
Copy files  - {{Command{cp}}}
* Recursive copy: {{Command{cp -R}}}
Create directories - {{Command{mkdir}}}
Delete files and directories  - {{Command{rm}}} / {{Command{rmdir}}}
* Difference between unlink & delete - Removing a file doesn't actually delete it from the system, it only marks the space it was occupying as available for reuse.
* This is more appropriately called "unlinking".  We're only removing the link to the data blocks.  The actual data will still reside on the disk and can be forensically recovered.
* The {{Command{srm}}} or {{Command{shred}}} commands will actually destroy a file by overwriting its data blocks before unlinking it.
** These commands are not installed by default on most systems
** Securely wiping a file is more resource intensive than simply unlinking it.
View the contents of files with {{Command{cat}}} and {{Command{less}}} (or {{Command{more}}})
Edit text files:
* The standard Unix text editors are {{Command{nano}}} (Basic and easy to use) and {{Command{vi}}} (more powerful, but harder to get used to)
* {{Command{vi}}} comes installed on every Unix/Linux system.  The {{Command{nano}}} editor may need to be installed separately.


!! Other useful commands:
* {{Command{touch}}} - create an empty file
* {{Command{file}}} - examine a file to identify its type


! Deliverables

!! Review Material:
- Complete [[Lab 5|labs/lab5.pdf]]

!! New Material:
- Complete [[Lab 6|labs/lab6.pdf]] & [[Lab 7|labs/lab7.pdf]]
! Material

* Links:
** Read: 
*** Chapter 3, pp 23 & 24
*** Chapter 4, pp 33 & 34

* File Globbing:
** Read Chapter 4, pp 25-27 (Wildcards)
** Watch: File Globbing: https://www.youtube.com/watch?v=QIysdjpiLcA


! Links & File Globbing

!! Links

There are two different types of links within the Unix environment:  ''Hard links'' and ''Symbolic links''.

Using the following directory listing as an example:

{{{
[merantn@shell dict]$ pwd
/usr/share/dict

[merantn@shell dict]$ ls -l
total 9680
-rw-r--r--. 2 root root 4953680 Jun 10  2014 linux.words
-rw-r--r--. 2 root root 4953680 Jun 10  2014 wordlist
lrwxrwxrwx. 1 root root      11 Feb  4 21:07 words -> linux.words
}}}

We can identify the file named {{File{words}}} as a ''symbolic link'' due to the ''{{Monospaced{l}}}'' character at the beginning of the line.  A symbolic link is a special type of file that only contains the path to the file it is pointing to.  Also note the small file size.  This is another hint it may be a symbolic link.

''Hard links'' are directory entries which point to the same inode.  An inode is a filesystem data structure which contains information about the file and where its blocks can be found on the underlying storage medium.  Thus, hard links point directly to the same place on the disk.  We can tell that the files {{File{linux.words}}} and {{File{wordlist}}} are hard links because of the number ''2'' in the third column.  This is the link count.  It will increase as more hard links are created.  A file isn't truly deleted until its link count reaches zero.

{{{
[merantn@shell dict]$ ls -li
total 9680
289731 -rw-r--r--. 2 root root 4953680 Jun 10  2014 linux.words
289731 -rw-r--r--. 2 root root 4953680 Jun 10  2014 wordlist
719507 lrwxrwxrwx. 1 root root      11 Feb  4 21:07 words -> linux.words
}}}

In the above output, adding the ''{{Monospaced{-i}}}'' flag to the {{Command{ls}}} command shows the inode number for the file.  We can see the files {{File{linux.words}}} and {{File{wordlist}}} are both hard links pointing to the same place on the disk because they both are pointing to the same inode number.

The textbook pages listed at the top will contain more information about these two link types.


!! File globbing & wildcards

So far, when working with files we've specified one filename at a time on the command line. Other shell metacharacters exist to identify files by patterns in their filenames and work with them as a group. Suppose we want to move all files that end in ''.jpg'' to a particular location, or delete all files that contain the string ''temp'' in their filename. If there's thousands of them, it's going to be very tedious to have to list each of the files individually. Or, we can instead use special file wildcard metacharacters to concisely identify these groups of files by common characteristics. This is referred to as ''filename substitution'' or ''file globbing''.


!! Filename substitution

Metacharacters associated with file names: &nbsp; {{Monospaced{''~ * ? [ ] [^ ] { }''}}}

* {{Monospaced{''*''}}} = match any sequence of 0 or more characters
* {{Monospaced{''?''}}} = match any single character.
** It's important to note the ''?'' is a mandatory position which must be filled. It's not optional like the ''{{Monospaced{''*''}}}'' is. So if you type {{Command{ls /bin/d??}}}, you'll see a list of all files in {{File{/bin/}}} which begin with a ''d'' and are exactly three letters in length. You will not see the files which are shorter then three characters, such as the {{Command{df}}} command or longer than three characters such as the {{Command{diff}}} command.  Both ''?'' must contain a character.
* {{Monospaced{''[ ]''}}} - match any of the enclosed characters in the set (eg: ''[abcd]''), or match a range (eg: ''[a-z] [~A-Z] [0-9] [e-q]'')
** The {{Monospaced{''[ ]''}}} brackets are similar to the ''?'' in that they specify a single, mandatory character. Where the ''?'' wildcard can represent any character, the brackets allow us to be a little more specific with what that single character may be.
** The {{Monospaced{''-''}}} within the {{Monospaced{''[ ]''}}} specifies the range of characters based on its position in the [[ascii chart|img/ascii-chart.gif]].  For example, {{Monospaced{''[4-6]''}}} or {{Monospaced{''[;-?]''}}} to match the characters {{Monospaced{''; < = > ?''}}} (ascii 59 to ascii 63).
*** Ranges and lists of characters can be combined.  The gobbing pattern {{Monospaced{''[ac5-8()]''}}} will match the letters {{Monospaced{''a''}}} and {{Monospaced{''c''}}}, the numbers {{Monospaced{''5''}}} through {{Monospaced{''8''}}}, and the two parenthesis.
** {{Monospaced{''[^ ]''}}} - match any character //not// enclosed in the set or range (eg: ''[^abcd]'' or ''[^a-z]'').  The notation ''[! ]'' is sometimes used but not universally recognized.  Use ''[^ ]'' instead.  The labs will all use ''[^ ]''.
* {{Monospaced{''{ }''}}} - Brace Expansion.  Expand comma separated strings to create multiple text strings from a pattern. Example: {{Command{mkdir -p {one,two,three}/examples}}} will create the directories {{File{one/examples}}}, {{File{two/examples}}}, and {{File{three/examples}}}.

{{Note{''Note:'' Negation should only be used when it is the best possible method for solving the problem, not as a way to be lazy.  If the question asks to list a particular set of files, try to find a way to target just those files.  Negation is ideal when the question includes a negation term, such as the wording //except// or //do not//.  When negation is abused, often files are matched which did not intend to be.  }}}
{{Warning{''Warning:'' Try to be as specific as possible when you are using wildcards.  It's best practice to type out the static text and only use wildcards for the dynamic part of what you are trying to match.  For example, if I am trying to match the files {{File{data1.txt}}}, {{File{data2.txt}}}, {{File{data3.txt}}}, and {{File{data4.txt}}}, the best file globbing pattern would be {{Command{data[1-4].txt}}}.  It is as specific as possible and includes the static portions of the filename.  Using {{Command{data?.txt}}} would inadvertently match {{File{data5.txt}}} and {{Command{*[1-4].txt}}} could match something else entirely.  Even if those files are not currently in the directory, they might be later.  Don't be lazy with your file globbing patterns!}}}

!!! Examples  - Display all files who's names:

Begin with the letter f: {{Command{ls f*}}}
&nbsp;&nbsp;&nbsp;(read as: list files which begin with an ''f'' followed by ''0 or more characters'')
Contain a number: {{Command{ls *[0-9]*}}}
&nbsp;&nbsp;&nbsp;(read as: list all files which may begin with ''0 or more characters'', followed by ''any number'', and end with ''0 or more characters'')
begin with an uppercase letter: {{Command{ls [~A-Z]*}}}
begin with the letter a, b, or c: {{Command{ls [abc]*}}}
begin with the letter a, b, or c and is exactly two characters in length: {{Command{ls [abc]?}}}
do not begin with the letter a, b, or c: {{Command{ls [^abc]*}}}
end with a number from 2 to 9 or a letter from w to z: {{Command{ls *[2-9w-z]}}}
are exactly two characters long and begin with a lowercase letter: {{Command{ls [a-z]?}}}
being with string one, end with string three, and contain string two somewhere in between: {{Command{ls one*two*three}}}


{{Warning{''Warning:'' Working on the command line requires an eye for detail. We're starting to get to the point where that detail really matters. There's a huge difference between the commands {{Command{rm *lab6*}}} and {{Command{rm * lab6*}}}. One stray space and you're going to be in for some missing labs. Take a second look at your commands before executing them and be very deliberate with what you're running. Remember - Working on the command line is precise. Every character matters and we must have an eye for detail!}}}


!! Substitutions

Through use of shell metacharacters, substitutions are transformations performed by the shell on command line input prior to executing a command string. File globbing is one of the 5 types of shell substitutions.

It's important to understand the order of operations here. In the math formula 5 + 6 * 7, our calculations are not automatically performed left to right. There is a set order of operations that calls for the multiplication to be performed first. The same idea applies to entering command line input. First, all substitutions are performed by the shell, then your command string is executed. 

Consider the command {{Command{ls *.jpg}}}

The shell recognizes that we're performing a substitution (eg: {{File{*.jpg}}}) and replaces {{File{*.jpg}}} in the command string with a list of all files that match the pattern.
Next, the {{Command{ls}}} command is executed with the list of files as arguments

A great way to preview the result of any substitutions is with the {{Command{echo}}} command. The {{Command{echo}}} command repeats back to the screen whatever you give it as an argument. For example:

{{{
[merantn@shell ~]$ echo hello ncs205
hello ncs205
[merantn@shell ~]$ cd /opt/pub/ncs205/data/lab9
[merantn@shell lab9]$ echo rm IMG_126?.jpg
rm IMG_1260.jpg IMG_1261.jpg IMG_1262.jpg IMG_1263.jpg IMG_1264.jpg IMG_1265.jpg IMG_1266.jpg IMG_1267.jpg IMG_1268.jpg IMG_1269.jpg
}}}

So if I have a complex or risky substitution, I may want to prefix the command string with the {{Command{echo}}} command to preview it before its executed:
eg: Change to {{File{/opt/pub/ncs205/submit/}}} and run: {{Command{echo ls *lab[1-3]*}}} to see what substitution is being performed and the actual command string about to be executed. Don't forget to prefix it with {{Command{echo}}}!

These file globbing substitution examples are pretty tame, but this trick with the {{Command{echo}}} command will come in very handy later on when we get to more complicated substitutions.


! Assignment

* Links:
** Read: 
*** Chapter 3, pp 23 & 24
*** Chapter 4, pp 33 & 34
** Complete: [[Lab 8|labs/lab8.pdf]]

* File Globbing:
** Read Chapter 4, pp 25-27 (Wildcards)
** Watch: File Globbing: https://www.youtube.com/watch?v=QIysdjpiLcA
** Complete:  [[Lab 9|labs/lab9.pdf]] & [[Lab 10|labs/lab10.pdf]]

! Improving soft skills

Labs 5 - 7 brought us to four soft skills which will be important for this course and for your future careers:

# Read the directions thoroughly
# Be thorough in your writing.
# Use proper terms
# Test your theories

!! 1. Read the directions thoroughly

Lab 7 contained this phrase in bold within the directions at the top of the page:  ''Commands executed must function from anywhere on the system.''

About half the class ignored that and lost points from several of the questions.

Question 1 was a good example:

<<<
1. //Create an empty file named ''source'' in your home directory using a single command.//
<<<
About half the class responded with this solution:  {{Command{touch source}}}.

That command gets the job done, but //only// if your current working directory is your home directory.  What if you're somewhere else in the filesystem?  The full path to the file we wish to create must be included to indicate exactly where on the system it should be created.  Either the solution {{Command{touch /home///username///source}}} or {{Command{touch ~/source}}} will properly include the path to your home directory and provide a command which will work anywhere on the system.

The directions for groups of questions or instructions within the questions themselves will have small details like that which will need to be followed.  Some questions will ask for the command and output.  Often, students overlook the need for output and only supply the command.

It's always wise to read the directions for the lab or instructions for a question, add your responses, then re-read the details to ensure your responses match what is requested.

!! 2. Be thorough in your writing

We're not in the classroom where I can easily ask you to clarify your responses.  In writing, you must be thorough so the reader understands your message.  This is important now for grading to convey that you fully understand what's going on and will be important later when it comes time for you to create documentation or explain things to colleagues.  I encounter far too much poor "professional" documentation which is either vague or omits critical details.  Lab 5, questions 8 and 9 highlighted this.  Too many points were lost unnecessarily do to incomplete explanations. 

I provided an example for the first command in question 8:
<<<
8.  //Summarize the actions performed by the following commands (don’t just copy the output)://
<<<
|ls&nbsp;&nbsp;&nbsp;|&nbsp;&nbsp;List the contents of the current directory&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;|

This explanation concisely lists the action which is taken (list the contents) and the object which is acted upon (the current directory).  

For the second command, {{Command{ls -a}}}, a common response I received was //list hidden files//.  This response omits two critical details:  Are //only// hidden files to be listed?  Which hidden files are we listing?  A thorough response would be: //list all files, including hidden files, in the currently working directory//.  Here we're concisely explaining all of the components of this command:  What the command does (list all files), the option provided (including hidden files), and the target (in the current working directory).

Even worse, for the command {{Command{ls /}}}, I received several responses which were simply //root directory//.  What about it?  What's the action that's being taken?  Here, the action taken upon the ''@@root of the filesystem@@'' is omitted completely.  A response for this command should be something like //list the contents of the ''@@root of the filesystem@@''//.  We're explaining both the command and the target.

Notice the highlighted text above and the output of the command {{Command{ls /}}}:
{{{
[merantn@lab ~]$ ls /
bin   dev      etc   lib    media  opt   root  sbin  sys  usr
boot  entropy  home  lib64  mnt    proc  run   srv   tmp  var
}}}
There is an entry in the output named {{File{//root//}}}.  Using //root directory// in the response to that question is ambiguous.  Does //root directory// mean {{File{/}}} or {{File{/root/}}}?  Referring to {{File{/}}} as //the root of the filesystem// helps eliminate that problem.


Another example:  For the command {{Command{ls .}}}, I received the response //lists the current directory//.  The command {{Command{pwd}}} will display the current directory.  The {{Command{ls}}} command lists the //contents of// its target.  There's a big difference between the two.

Yet another example:  For #9, I occasionally receive the terse response //Changes to tmp directory// as a response to the commands {{Command{cd /tmp/}}} and {{Command{cd /var/tmp/}}}.  Those are two different paths.  How can the answer be the same for both of them?  Be specific - you're provided with an absolute path in the question, so it might be a good idea to use the same absolute path in the responses.


We'll be in this situation throughout the semester.  Be sure your responses are thorough and do not omit the critical details.  Even if you never touch the Linux command line again, improving your writing will be a universal skill that will serve you well later.


!! 3. Use proper terms

Lab 5 questions 8 and 9 asked you to explain what the commands {{Command{ls ..}}} and {{Command{cd ..}}} will do.  I received a lot of responses that contained the phrase "//previous directory//" to refer to the {{File{..}}} portion of that command string.  //Previous directory// is ambiguous.  To me, that refers to the last directory you were in.  The directory {{File{..}}} is a special directory that refers to the ''//parent//'' of a directory, so the command {{Command{ls ..}}} will //display the contents of the parent of the current working directory//. 


!! 4. Test your theories

Lab 6, question 7 was a good example of this problem:

<<<
//7. Explain each of the three arguments and the result of executing the following command string:  {{Command{mv one two three}}}.//
<<<

I receive some pretty wild responses to this one.  The most common two incorrect answers are:

* //move file one to file two and file two to file three// 
* //move files one and two to directory three.  __Directory three will be created if it does not exist.__//

We have a lab environment available to us to use to practice the material and test your solutions. If these answers were tested, it would be very obvious they are incorrect.  Submitting untested solutions will especially be a problem later in the semester when we get to more complicated material.  Don't be lazy and just guess.  I tend to grade far more harshly when I encounter such obviously incorrect responses.


! Material

This is going to be a light section.  Please take the time to review any past work and ensure you're caught up.  If you're having trouble or are unsure of anything, please take advantage of the discussion boards or reach out to schedule a chat.

!! Home directories

I'm adding another lab (Lab 11) to give a little more practice working with home directories and the shortcut metacharacter involved with them.  The bottom of page 70 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] has the details.


!! Working with command documentation

Read Chapter 5, pp 44-47 on the {{Command{help}}} and {{Command{man}}} commands.  Pay particular attention to the part about notation and be sure to know how to interpret the documented usage of these commands:
{{{
cd [-L|[-P[-e]]] [dir]

vs. 

mkdir [OPTION]... DIRECTORY...
}}}


! Assignment

* Home directories:
** Complete:  [[Lab 11|labs/lab11.pdf]]
* Documentation
** Read Chapter 5, pp 44-47 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] to learn about the {{Command{help}}} and {{Command{man}}} commands.
! Material

* File Permissions:
** Read [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 9, pp 90 - 100 (Stop at //Some Special Permissions//)
** Watch: 
*** File Permissions: https://www.youtube.com/watch?v=8SkN7UofOww
*** Umask: https://www.youtube.com/watch?v=JYT7y_Pe9wE

! Notes

!! File Permissions

The Unix operating system has multiple levels of securing access to resources. We can restrict who can access the system through userids and login credentials, we can limit who can become the superuser and act as the administrator of the system, we can control who can access certain directories on the system, and we can control access to files. The first two are items for an administrator to configure, but the latter two regular users can control for files that they own. Being able to restrict access to certain files is a critical function of a multi-user system. For example, we restrict access to the lab assignments everyone is uploading so no one else peeks at your work. Certain sensitive system files are restricted to keep the system more secure.

Hopefully by now we're comfortable navigating the filesystem and identifying files by name, both individually and in groups. Next I'd like to examine how we can manipulate the file's permissions.

Permissions can be set based on three different tiers:

* User - the owner of the file
* Group - a group that has access to the file
* Others - everyone else on the system

And three different permissions can be set on each file

* Read - The ability to read a file or list the contents of a directory
* Write - The ability to modify content of a file or create files in a directory
* Execute - The ability to run a program or access a directory

Chapter 9 in the The Linux Command Line will discuss permissions in detail.

This youtube video is a good permission overview:  [[File Permissions|https://www.youtube.com/watch?v=8SkN7UofOww]]


!!! File & Directory Permissions

The following tables and graphics can serve as a quick reference:

!! File & Directory Permissions
|!Type|!File|!Directory|
| read (4) | read contents | List directory |
| write (2) | change / delete file | Add files |
| execute (1) | run executable | cd into |

!!!! chmod

The {{Command{chmod}}} command can be used to change permissions for existing files.
* using octal codes
** Read (4), Write (2), and Execute (1)
** Three positions:  user, group, and others
* using symbolic codes
** who:
*** u - user
*** g - group
*** o - others
*** a = all positions
** operator:
*** = explicitly set
*** + add permission
*** - remove permission
** permission:
*** r = read
*** w = write
*** x = execute

{{Note{''Note:'' Use symbolic abbreviations when making changes to permissions without consideration to what is already set, eg: when adding or removing permissions. The use of octal codes requires all permissions be completely reset - a user cannot set, add, or remove individual permission settings.

For example, suppose I only want to __add__ write permissions for the group. Without knowing what the permissions currently are, I have to use symbolic notation to modify the permissions on the file. In this case with {{Command{chmod g+w //file//}}}

If the lab question asks you to ''set'' permissions, use __octal codes__. If it asks you to ''add or remove'', use __symbolic__ abbreviations.
}}}

<html><center><img src="img/chmod1.png" alt=""><BR><BR><HR width="75%"><img src="img/chmod2.png" alt=""></center></html>


!!! umask

The {{Command{umask}}} command can be used to establish default permissions for all newly created files.

* umask - user mask - which permissions to restrict. (mask = remove)
* start with full permissions 777
* The umask value is which bits to remove.
* The execute bit (1) will automatically be subtracted from all positions for regular files
* Making a new regular text file executable must be a manual task

A mask refers to bits to be removed. If we do not want newly created files to have write permissions for the group or others, we need to mask 2 from the group and others positions, resulting in a umask of 22.

Examples:

A umask value of 22 will set default permission for new files to 644 (777 - 22 - 111) and directories to 755 (777 - 22)
A umask value of 77 will set default permission for new files to 600 (777 - 77 - 111) and directories to 700 (777 - 77)
''Note:'' Newly created files are not granted execute automatically despite the umask value.


!! Misc:

The book creates empty files for its examples with {{Command{> foo.txt}}}.  This is the same as executing {{Command{touch foo.txt}}}.


! Assignment

* File Permissions:
** Read [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] Chapter 9, pp 90 - 100 (Stop at //Some Special Permissions//)
** Watch: File Permissions: https://www.youtube.com/watch?v=8SkN7UofOww
** Complete:  [[Lab 12|labs/lab12.pdf]] & [[Lab 13|labs/lab13.pdf]]
! Material

* Read Chapter 6 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
** Watch Linux Sysadmin Basics 04 -- Shell Features -- Pipes and Redirection - https://www.youtube.com/watch?v=-Z5tCri-QlI

* Read Chapter 20 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].  Stop with {{Command{tr}}} at the bottom of page 299.
** Focus on the filters listed below
** We won't be working with paste, join, patch or aspell.  You can skip over these commands if you'd like.
** Save tr and sed for later.  They're too complex for right now.


Please make use of the discussion boards if you run into any trouble with these commands, especially next week when we need to start combining them in more complex ways.

Most shell metacharacters (the symbols on your keyboard)  have a special meaning.  Compiling a list of them with an explanation and example usage in this [[Shell Metacharacter Table|handouts/ShellMetacharacterTable.pdf]] as they are introduced might be helpful.  


! Notes

A lot of the power of the Unix environment comes from single purpose commands. The filter commands we are about to introduce are great examples. By combining these single-purpose commands we can build flexible and customized solutions to solve a wide range of problems.

By default, output from a command such as our text filters is displayed to the screen. By redirecting where that output is sent, we can chain commands together to creatively solve more complex problems.

Redirecting standard I/O is how we move data between filters and files. The following diagram illustrates our options:
[>img[img/stdout.png]]

This video explains the basics: https://www.youtube.com/watch?v=-Z5tCri-QlI

!! Standard input and standard output:
* Every filter should be able to accept input from any source and write output to any target
* Input can come from the keyboard, from another file, or from the output of another program
* Output can be displayed to the screen, can be saved to a file, or sent as input to another program
* This gives us great flexibility when combined with the simple approach to our tools

The standard source, Standard Input, is commonly abbreviated as STDIN.  The two output destinations, Standard Output and Standard Error, are commonly abbreviated as STDOUT and STDERR, respectively.  Collectively, all three are abbreviated as STDIO.

{{Warning{''Warning:'' Not every utility will accept input on STDIN, not every utility will output to STDOUT!  It is important to keep this in mind.  Generally, most system utilities such as {{Command{ls}}}, {{Command{mkdir}}}, and {{Command{cp}}} do not accept input on STDIN.  Only some of them will send output to STDOUT.  All tools which manipulate text (text filters) will utilize both STDIN and STDOUT.}}}


!! Redirection - moving input or output between a command and a file

We have new shell metacharacters to assist with the management of input and output:
* > : Redirect output - Send a command's output to a file, overwriting existing contents
** {{Command{ users > userlist }}}
** {{Command{ who > loggedin }}}
* {{Monospaced{>>}}} : Redirect output - Send a command's output to a file, appending to existing data
** {{Command{ who >> loggedin }}}
** {{Command{ (date ; who) >> loggedin }}}
* {{Monospaced{ < }}} : Redirect input - Take a command's input from a file
** {{Command{ tr  ' '  ,  <  userlist }}}
* Disable output by redirecting it to {{File{/dev/null}}}, the unix garbage can
** {{Command{ make > /dev/null }}}


!! Standard Error (STDERR)

Some commands use a separate data stream, STDERR, for displaying any error or diagnostic output.  Having this extra output on a separate stream allows us to handle it differently.  We can send STDOUT to one destination and STDERR to another. 

We can prefix our redirection symbols (''>'', ''>>'', or ''|'') with a ''2'' (the STDERR file descriptor) to send STDERR to a different destination.

For example, notice how the error message from the second command is discarded:

{{{
[root@shell ncs205]# id username
id: username: no such user

[root@shell ncs205]# id username 2> /dev/null

[root@shell ncs205]# id merantn 2> /dev/null
uid=7289(merantn) gid=100(users) groups=100(users),205(ncs205)

[root@shell ncs205]# id merantn 2> /dev/null 1> /dev/null
}}}


!! Command Execution
* Chaining commands (Pipelines):
** Workflows can be completed as a pipeline of simple tools
** Glue multiple commands together to perform complex tasks out of simple tools
** Send STDOUT of one command as STDIN to another with the  |  (pipe)  symbol
** First command must be able to send output to STDOUT and second command must be able to read input from STDIN
** Examples:
*** {{Command{ who | sort | less }}}
*** {{Command{ who | wc -l }}}
*** {{Command{ last | cut -d ' ' -f 1 | sort | uniq }}}
*** ''Does not work!  See yellow box above.'':  {{Command{ ls * | rm }}}
**** File manipulation utilities like rm do not work with STDIN and STDOUT
* Send to STDOUT and save to a file with the {{Command{tee}}} command
** {{Command{ df | grep mapper| tee fs }}}
** {{Command{ df | tee fs | grep mapper }}}
*Sequenced commands:  {{Command{ command1 ; command 2 }}}
**No direct relationship between the commands
**Do not share input or output.  Simply combined together on the same line
** {{Command{ echo Today is `date` > Feb ; cal >> Feb }}}
*Grouped commands: {{Command{ (command1 ; command2) }}}
** {{Command{ (echo Today is `date` ; cal ) > Feb }}}
** Run in a sub-shell - Launch commands in a new shell (any new settings or shell variables are not sent back to parent shell)
*** Observe the current directory after running this command sequence: {{Command{ ( cd / ; ls ) ; pwd }}}


!! Chaining Commands with text filters:

Build flexible and customized solutions to solve wide range of problems.
Unix filter tools are very useful for manipulating data
Filter definition:  any command that takes input one line at a time from STDIN, manipulates the input, and sends the result to STDOUT
To most effectively solve a problem, you must know the available tools.  Know the commands and be familiar with the options available.

When working with the filters to solve problems:
* Break the problem down into small parts
* Choose your tools
* Experiment
* Perfect and simplify your solution


!!! Core Filters:
* {{Command{cat}}} - concatenate one or multiple files
** {{Monospaced{-n}}} option - numbered lines
** create text files by redirecting output to a file
* {{Command{head}}} - display lines from the beginning of a file
** {{Monospaced{-n}}} - display first //n// lines
* {{Command{tail}}} - display lines from the end of a file
** {{Monospaced{-n}}} - display last //n// lines
** {{Monospaced{+n}}} - Begin display at line //n// 
** {{Monospaced{-f}}}  - do not stop at eof, continue displaying new lines.
* {{Command{grep}}} - pattern matching : //pattern// //files(s)//
** {{Command{grep //pattern// file1 file2 file3}}}
*** Example: {{Command{grep dog //file(s)//}}}
*** {{Command{w | grep ^d}}}
** {{Command{//command1// | grep //pattern//}}}
** Anchors: 
*** {{Monospaced{^}}} = begins with
*** {{Monospaced{$}}} = end with
**Useful options:
*** {{Monospaced{-v}}} : Invert the match
*** {{Monospaced{-i}}} : Case insensitive
*** {{Monospaced{-l}}} : list only file names
*** {{Monospaced{-H}}} : list file name with matched pattern
**Examples:
*** {{Command{grep -v '^$' /etc/printcap}}}
*** {{Command{ls -l | grep ^d}}}
*** {{Command{grep init /etc/rc*}}}
*** {{Command{cp `grep -l init /etc/rc*` scripts/}}}
*** words containing the string //book//
*** lines containing dog at the end of the line
* {{Command{sort}}} - sort lines of text files
**sort passwd file
**Options:  
*** {{Monospaced{-n}}} : Numeric
*** {{Monospaced{-r}}} : Reverse
*** {{Monospaced{-k}}} : sort on field #
*** {{Monospaced{-t}}} : Specify delimiter (default whitespace)
** Examples:
*** {{Command{sort  /etc/passwd}}}
*** {{Command{sort -t : -k 5 /etc/passwd}}}
*** {{Command{sort -n -t : -k 3 /etc/passwd}}}
* {{Command{uniq}}}  - filter out repeated lines in a file
**Must be sorted before showing unique values
**{{Monospaced{-c}}} : Count number of matches
* {{Command{wc}}} - word, line, character, and byte count
** {{Monospaced{-w}}} = word count
** {{Monospaced{-l}}} = line count
* {{Command{cut}}} - cut out selected portions of each line of a file, either range of characters or delimited columns
** Two main usage options: 
*** By delimited columns:
**** {{Monospaced{-d}}} : Specifies the delimiter (defaults to tab)
**** {{Monospaced{-f}}} : Specifies the field(s)
*** Range of characters:
**** {{Monospaced{-c}}} : Extract character ranges
** Examples: 
*** Extract field 2 through 4 from file data.txt, delimited by a semi-colon: {{Command{cut -d ';' -f 2-3 data.txt}}}
*** Extract characters 65 through end of line from the ~Fail2Ban log:  {{Command{cut -c 65- fail2ban.log}}}
* {{Command{strings}}} - Searching for strings in binary files
*Compare files
** {{Command{cmp}}} - compare two files
** {{Command{diff}}} - compare files line by line
** {{Command{comm}}} - select or reject lines common to two files


! Assignment

*  Complete [[Lab 14|labs/lab14.pdf]], [[Lab 15|labs/lab15.pdf]], and [[Lab 16|labs/lab16.pdf]]
** ''Note:'' Lab 16 is not due until next Wednesday.
! Review

Complete [[Lab 17|labs/lab17.pdf]] for some additional practice with the basic filters.


! Material

!! Read:
* Read Chapter 20, pp 299-307 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].  
** Stop after the grey box at the top of 307.
** The book uses advanced regular expressions for some of its sed examples.  Don't worry about understanding what the regular expressions do.  Stick with the simple examples like the ones on pages 301 to top of 303.
*** The first regular expression is at the top of page 305 and looks like this:  @@s/\([0-9]\{2\}\)\/\([0-9]\{2\}\)\/\([0-9]\{4\}\)$/\3-\1-\2/@@

!! Watch:
* {{Command{tr}}}:
** How to Use tr, sed, and aspell: Linux Terminal 201 - https://www.youtube.com/watch?v=F7Brrn-L1Zg
** Mostly for tr, but there's some talk about sed too
** Hak5 has a lot of great content.  Check out their other videos.
* {{Command{awk}}}: 
** Learning awk - https://www.youtube.com/watch?v=9YOZmI-zWok
** This video goes into advanced usage at the 11:30 mark that won't be covering in this class.  You can stop at that point if you'd like.
* {{Command{sed}}}:
** SED Tutorial Basic Substitution - https://www.youtube.com/watch?v=32waL1Z9XK0&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=1
** SED Substitute Beginning and End of Line: https://www.youtube.com/watch?v=8T5azKqYAjc&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=2
** SED Remove Lines When Match is Found: https://www.youtube.com/watch?v=37r5Ykdnlkk&list=PLcUid3OP_4OW-rwv_mBHzx9MmE5TxvvcQ&index=13
** The entire sed series has great content, but those three are the highlights


! Notes

!! More complex filters

The {{Command{tr}}}, {{Command{awk}}}, and {{Command{sed}}} commands are a little more complex than the others we've introduced, but all three are important tools to have in your toolbox.  {{Command{awk}}} is easier to work with than cut for extracting columns of text.  {{Command{sed}}} is especially useful for search and replace operations and extracting particular rows of text from a file.

* {{Command{tr}}} - Translate characters
**Works only on stdin, does not alter the file, only the data stream
**Two arguments for translating characters  (set1/from)  (set2/to)
**Input characters in ''//set1//'' are mapped to corresponding characters in ''//set2//''.
**If the length of the two sets are unequal:
***''//set1//'' larger then ''//set2//'': ''//set2//'' is extended to the length of set1 by repeating ''//set2//'''s last character as necessary.
***''//set2//'' larger then ''//set1//'': Excess characters in ''//set2//'' are ignored.
**Options:
*** {{Monospaced{-d}}} : delete  (one argument for which characters to delete)
*** {{Monospaced{-s}}} : Squeeze multiple consecutive occurrences of a character down into a single instance.
** Character classes:
*** Another way to identify groups of characters
*** Page 260 & 261 in //The Linux Command Line// 
*** {{Monospaced{[:digit:]}}}
*** {{Monospaced{ [:alpha:] [:lower:] [:upper:] }}}
*** {{Monospaced{[:space:] [:blank:]}}}
**Examples:
*** {{Command{tr '[:upper:]' '[:lower:]' < /etc/printcap}}}
*** {{Command{tr '[:upper:]' '*' < /etc/printcap}}}
*** {{Command{tr -s '[:upper:]' '*' < /etc/printcap}}}
** Special characters
*** On the unix command line, {{Monospaced{''\t''}}} will represent a tab and {{Monospaced{''\n''}}} will represent a newline.  {{Command{tr}}} supports using these for substitutions.

{{Warning{''Important note:''  Most students have trouble with the {{Command{tr}}} command and interpret its actions incorrectly.  {{Command{tr}}} stands for translate, and as such it translates ''characters'' individually.  It does ''NOT'' translate strings.  There is a big difference between the two.

{{Command{tr}}} will individually translate the characters from the first argument into the characters in the second argument according to their placement.  The first character in argument 1 will be translated into the first character in argument 2.  The translation will proceed for each character in the first argument to the corresponding position in the second argument.

{{Command{cat data.txt | tr one two}}}  does not convert the string //one// to the string //two// in the output of {{File{data.txt}}}.  It converts each ''o'' to ''t'', each ''n'' to ''w'', and each ''e'' to ''o''.  Each of those characters in the output of {{File{data.txt}}} is changed individually.

When completing labs involving {{Command{tr}}}, it is important that your responses indicate these translations are happening //character by character//.  Additionally, the translation does not occur in the //file// {{File{data.txt}}}.  Our source files are not modified by the filters.  It is important to indicate the translation is occurring in the //output// of the file {{File{data.txt}}}.

Providing a response which is less then clear on these important points will be considered incorrect.}}}

!! {{Command{''sed''}}} & {{Command{''awk''}}}

I like using {{Command{''awk''}}} instead of {{Command{''cut''}}}.  Everything {{Command{''cut''}}} can do {{Command{''awk''}}} can do better.  Often our delimiters are variable lengths of whitespace, such as several spaces or several tabs.  {{Command{''cut''}}} can only delimit on a single character, but {{Command{''awk''}}}'s default delimiter is whitespace, regardless how long it is.  {{Command{''awk''}}} can also use multiple characters as a delimiter at the same time.

There's an [[Oreilly book|https://www.oreilly.com/library/view/sed-awk/1565922255/]] for just these two commands.  They're pretty powerful, but we're only going to scratch the surface.  We'll mostly work with {{Command{''awk''}}} but {{Command{''sed''}}} is good to know too.  Both come in very handy.


* {{Command{awk}}}
** {{Command{awk}}} is a fully functional programming language written for processing text and numbers.
** {{Command{tr}}} works byte by byte (1 character at a time)
** {{Command{grep}}} works line by line
** {{Command{awk}}} works field by field
** Terminology:
*** Record = line of input
*** Field = a column of data, separated by a delimiter
** basic usage:  {{Command{awk [-F//delim//] '{ action ; action ; action }' }}}
*** default action is to print the entire record
*** {{Monospaced{ ''-F'' }}} = specify alternate field separator (default is whitespace)
*** Multiple delimiters can be used.  For example, the option {{Monospaced{ ''-F'[-:]' '' }}} will set the delimiter to be either a colon or a dash.
*** ''Note:'' {{Command{cut}}} uses a single character for a delimiter where {{Command{awk}}}'s default is any amount of whitespace.  This is especially handy if a sequence of spaces or tabs is used between columns, such as in the output of the {{Command{w}}} command.
** advanced usage:  {{Command{ awk [-F//delim//] [ -v var=value ] '//pattern// { action ; action ; action }' }}}
*** //pattern// is an optional way to specify which lines to operate on
*** {{Monospaced{ ''-v'' }}} = define a variable and its value to be used within awk.  ex:  {{Monospaced{ ''-v start=10'' }}}
** Useful awk variables:
*** {{Monospaced{ ''$0'' }}} - The entire line of text
*** {{Monospaced{ ''$//n//'' }}} - The //n//^^th^^ data field of the record
*** {{Monospaced{ ''$0'' }}} - Entire line
*** {{Monospaced{ ''NR'' }}} - record number
** Patterns can be (advanced use only, I will not give problems in labs or tests that require this):
*** Relational expressions  ( {{Monospaced{ ''<=, <, >, >=, ==, !='' }}} )
**** ex:  {{Monospaced{ ''$1 == $2'' }}}
*** Regular expressions /regex/
**** Must be enclosed in {{Monospaced{ ''/ /'' }}}
**** When specified, the regex must match somewhere on the line.  example: {{Monospaced{ ''/[0-9]+/'' }}}
**** Or use a pattern matching expression ( {{Monospaced{ '' ~, !~'' }}} ) to match regex to a specific field.  example:  {{Monospaced{ ''$1 ~ /^string/'' }}}
** Examples:
*** Show only the username and tty from the output of the {{Command{w}}} command: {{Command{w | awk '{print $1 " " $2}' }}}
**** Same output, but skip the first two header lines:  {{Command{w | awk ' NR > 2 {print $1 " " $2}' }}}
*** Set the delimiter to be the string {{Monospaced{ ''", "''}}} (comma then space), then invert the first and last names: {{Command{awk -F", " '{print $2, $1}' names }}}


* {{Command{sed}}}:  Stream editior  //commands// //file(s)//
**Works mainly on streams, but can also be used to modify files in place when used with the {{Monospaced{ -i }}} option.
*** Be sure you are clear about this in your labs.  A response that indicates a change or deletion is occurring in the file will not be correct.  By default, changes are happening to the output of the file.
**We use {{Command{sed}}} to change the text in a stream.
**For each line in the //file//, check to see if it is addressed. If so, perform //command//
**[address1[,address2]] command [options]
***Addresses can be line numbers:  start[,stop]
***simple patterns:  {{Monospaced{ /pattern/ }}}
***The pattern can contain our ^ and $ anchors
***or regular expressions:  {{Monospaced{ /regex/ }}}
***Defaults to all lines if none are addressed
**Most used sed commands
*** {{Monospaced{s}}} - substitute - {{Monospaced{s/find/replace/flags}}}
**** flags:
**** {{Monospaced{g}}} - all instances on the line
**** {{Monospaced{p}}} - print lines containing a substitution
*** {{Monospaced{d}}} - delete line
*** {{Monospaced{p}}} - print line
*** {{Monospaced{y}}} - translate characters on the line (similar to {{Command{tr}}} command)
**Options:
*** {{Monospaced{-n}}} : suppress normal behavior and only show lines addressed and given {{Monospaced{p}}} command.
**sed examples:
*** {{Command{sed 7p file1}}} - print line 7 twice (notice absence of {{Monospaced{-n}}} option)
*** {{Command{sed '7d' file1}}} - delete line 7 from the output
*** {{Command{sed '/pattern/d' file1}}} - delete all lines containing //pattern// from the output
****Pattern can contain ^ and $ anchors and [sets]
****[sets] examples:  [abc]  [aeiou]  [~A-Z]  [a-z]  [A-z]   [0-9]
*** {{Command{sed -n '1,6p' file1}}} - only print lines 1 through 6 (notice the inclusion of the {{Monospaced{-n}}} option)
*** {{Command{sed 's/Sam/Bob/' file1}}}  -  All lines with Sam changed to Bob  (just once)
*** {{Command{sed 's/Sam/Bob/g' file1}}}  -  All lines with Sam changed to Bob  (all matches on the line)
*** {{Command{sed 's/Sam/Bob/gp' file1}}}  -  All lines with Sam changed to Bob  (all matches on the line).  Notice the lack of the {{Monospaced{-n}}} option.
*** {{Command{sed -n 's/Sam/Bob/gp' file1}}}  -  All lines with Sam changed to Bob  (all matches), only printing lines where the substitution occurred
*** For addressing lines, {{Monospaced{$}}} = last line in the output

{{Note{''Note:'' Always put your awk & sed commands (the first argument), within single quotes, for example:  {{Command{sed -n '4,6p' file1.txt}}} }}}


! Assignment

!! Review lab:
* [[Lab 17|labs/lab17.pdf]]

!! Complete:
*  [[Lab 18|labs/lab18.pdf]], and [[Lab 19|labs/lab19.pdf]]
** Lab 19 is not due until Saturday.
! Material

* Read, [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]:
** grep:  Chapter 6, pp 63 
** Anchors:  Chapter 19, pp 255


! Notes

!! Pattern Matching with {{Command{grep}}}:

The {{Command{grep}}} filter is one of the most useful; I use it almost daily.  It's worth spending a little more time working with and ensuring we're all on the same page with our terminology.  ''From here on, I'm going to start being a little more strict with wording, so be thorough with your explanations.''


!!! {{Command{grep}}} - Display all lines from a file or stream containing a specified pattern
* Usage: {{Monospaced{grep //pattern// [file1] [file2] [file3] [file//n//]}}}
* Search for //pattern// in each of the specified files
* Useful options:
** {{Monospaced{''-v''}}} : Invert the match; display lines which ''do not'' contain the specified pettern
** {{Monospaced{''-i''}}} : Case insensitive search
** {{Monospaced{''-l''}}} : list only names of files which contain a match
** {{Monospaced{''-H''}}} : include file name with matched pattern
* Examples:
** {{Command{grep dog data.txt}}} - Display all lines from the file ''data.txt'' containing the string ''dog''
** {{Command{grep ssh /etc/*.conf}}} - Display all lines from files ending with ''.conf'' within the directory ''/etc/'' containing the string ''ssh''

{{Note{
!!! ''Important Notes:''
!!!! 1)  The following all have distinct meanings.  Be sure to use them properly.
* ''Line'':  The entire line
* ''String'':  A sequence of characters
* ''Word'':  A sequence of characters with whitespace or punctuation on either side or at the beginning or end of a line.
* ''Characters'':  Individual characters, not necessarily together


!!!! 2)  By default, the grep filter will display all lines which match a particular pattern or string.  Be specific when describing its actions in the labs. 
For example, if you are asked to describe what the following command is doing:
<<<
{{Command{grep ab data.txt}}}
<<<
and your response is something vague and generic like "//finds ab in the file//" you will not receive full credit.  Be ''through and specific''!  What happens when a match is found?  Which file is being examined?  Where is the output going?  I've allowed vague descriptions in previous labs, but that must end as our familiarity with these tools is increasing.

A proper response will cover all points:

Display to ''STDOUT'' all ''lines'' containing the string ''ab'' from the file ''data.txt''


!!!! 3)  If multiple commands are chained together, don't just itemize what each command in the pipeline is doing.  Be sure to also describe its final outcome.  We must appreciate the big picture as well.
}}}


Chapter 10, pp 63 has more information on the grep command.


!!! Anchors:

When trying to match a pattern in a tool like grep, anchors allow us to specify where on the string a pattern must occur.  This is useful if we're trying to match something which appears at either the beginning or end of a line instead of somewhere in the middle.  

Anchors can be utilized with two anchor metacharacters:

* ^ = begins with
* $ = end with
* Examples:
** {{Command{grep '^string' data.txt}}} - Display lines from the file ''data.txt''  beginning with ''string''
** {{Command{grep 'string$' data.txt}}} - Display lines from the file ''data.txt''  which end with ''string''

Chapter 19, pp 255 contains more information on anchors.



! Assignment

* Complete:
** Labs:  [[Lab 20|labs/lab20.pdf]] & [[Lab 21|labs/lab21.pdf]]

If you have time, peek ahead to the [[Week 6, Part 1]] page and check out lab 22.  Lab 22 is a little tricky, so some extra time to chat about it in Discord may be helpful.
! Review:

!! I/O Practice

Lab 22 is a practice lab for I/O and moving output from one command to another.  It will leverage material from the last two weeks to solve a real-world problem and is a good example of using these tools and concepts.  This lab will introduce the openssl command with a couple examples and then ask you to use it to return useful data.

!! Complete:
* Lab [[Lab 22|labs/lab22.pdf]]


! Material:

!! Quoting:
* Read Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
* Watch:  Linux shell quotes: https://www.youtube.com/watch?v=1Ukw0IjGKsI

This half of the week is lighter than the second half.  Getting a head start on [[Week 6, Part 2]] would be wise.


! Notes:

!! Escaping & Quoting:

!!! Quoting - ' " \

Some special characters, such as the space and most symbols, have a special meaning to the shell. Occasionally we need to use those special characters literally without allowing the shell to interpret their special meanings.

Quoting allows us to protecting these special characters from the shell. It is necessary in order to use a metacharacter literally, to disable its special shell meaning.

For example, consider the scenario where you need to display the contents of a file which contains a space in the name.  The space has a special meaning to the shell; it is our argument separator.

If my file is named {{File{my notes.txt}}}, and I try to execute the command {{Command{cat my notes.txt}}} to display it, the space in the file name will cause cat to try to display the file {{File{my}}} and the file {{File{notes.txt}}}, neither of which actually exist.

I need to protect that special symbol, the space, from the shell to ensure the cat command get it.  There are three ways I can do so:

* {{Command{cat "my notes.txt"}}}
* {{Command{cat 'my notes.txt'}}}
* {{Command{cat my\ notes.txt}}}

Each of the options work a little differently.  Knowing these differences allows you to choose the best method for the task.


!!!! Three ways to quote:

* Backslash ({{Monospaced{\}}}) - Changes the interpretation of the character that follows
** {{Monospaced{\}}} is the escape character, which will disable special meaning of a shell special character.
** Converts special characters into literal characters and literal characters into special characters
** n vs \n
*** The {{Monospaced{\}}} will //enable// the special meaning of a regular character.  
*** Newline - {{Monospaced{\n}}}
*** Tab - {{Monospaced{\t}}}
** {{Command{printf "Home is %s\n" $HOME}}}
** {{Monospaced{\}}} followed by return - suppress the special meaning of the return key
* Double Quote (weak) - Will remove the special meaning of //some// metacharacters
** {{Monospaced{"}}} quoting will still evaluate variable, command, and history substitution.
* Single Quote (strong) - Will remove the special meaning of //most// metacharacters
** {{Monospaced{'}}} is stronger then {{Monospaced{"}}}, which means it will protect more metacharacters from the shell
** {{Monospaced{'}}} quoting will only evaluate history substitution in //some// shells.  The single quote will not evaluate history substitution in {{Command{bash}}}
* You can alternate quotes to include the other type: {{Command{echo "Today's date is `date`"}}}

You can read about them in Chapter 7, pp 75-79 (Quoting) in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] or watch this video:  [[Linux shell quotes|https://www.youtube.com/watch?v=1Ukw0IjGKsI]].


! Assignment:

!! Complete:
* Labs:  [[Lab 22|labs/lab22.pdf]] & [[Lab 23|labs/lab23.pdf]]
! Material

* Read Chapter 10 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].
* Watch:
** [[Linux Terminal 101: Controlling Processes|https://www.youtube.com/watch?v=XUhGdORXL54]]
** [[Linux Terminal 101: How to View Processes|https://www.youtube.com/watch?v=Udr-qE0NEO0]]

! Notes

We're going to take a break from filters and managing text for a moment to review some system utilities and concepts.  Our next material contains information on process management and job control.  This will become especially useful once we start shell scripting and managing systems.  This material will assist you with running multiple simultaneous tasks on the system and monitor system resources to ensure your scripts are not impacting performance.

Read Chapter 10 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].

!!! Note:
* In Chapter 10 they refer to the practice command {{Command{xlogo}}} which is not installed on the shell server.  Instead of using that command, run {{Command{less /var/log/messages}}} instead.  
* Don't get too bogged down on the Signals section, pages 117 through the top of page 120.  We'll revisit this later.

Watch: 
* [[Linux Terminal 101: Controlling Processes|https://www.youtube.com/watch?v=XUhGdORXL54]]
** Use a command like {{Command{less}}} or {{Command{tail -f}}} to test her examples.  We do not have {{Command{gedit}}} available.
* [[Linux Terminal 101: How to View Processes|https://www.youtube.com/watch?v=Udr-qE0NEO0]]


''Processes'' and ''Jobs'' have simple, fundamental differences:
 - Processes refer to all tasks currently running on the Linux system.  Every running task is considered a process and has a unique process ID.
 - Jobs are relative to your current login session.  They refer to current tasks you are running in that shell instance that may be running in the background, running in the foreground, or paused.  This presents an easy way to run multiple tasks at the same time from the same shell session or easily move between two tasks (eg: a text editor and a man page)


These are my notes from the last time I taught this class as a lecture.  Be sure to also read the chapter.

!! Processes
Everything on the system is represented by either a file or a process - something that physically exists or something that's running.
A process is a program that is executing
Files provide us with data, processes make things happen
The kernel manages processes - all processes are assigned a Process ID number (PID)
The kernel also maintains a process table to keep track of everything, indexed by that PID.

Processes are a mix of system and user processes.  In the process list, kernel process are listed within  [ ]
The kernel contains a scheduler to manage the workload and share system resources
The scheduler chooses a waiting process and grants it a short slice of time to run along with any needed resources (processor, memory, I/O, other devices, etc)
If the process is not completed within it's time slice, it goes back into the scheduling list and waits for additional processing time
time slice = short interval a process is allowed to run for.  

Every process is created (forked) from another process
Killing the parent process will (in most cases) kill the child processes
When a process dies or completes, it's resources are reallocated
init/systemd process, PID 1 - parent to all processes on the system
created early in the boot procedure to set up the kernel and complete booting functions

!! ps command
*ps - process status
*will show useful process information
*BSD options versus UNIX options
* These are some BSD options (also available in Linux)
**{{Command{ps}}} - Show processes for your current login session
**{{Command{ps -a}}} - Show all user processes
**{{Command{ps -u}}} - Display additional process information
***%CPU - percentage CPU usage
***%MEM - percentage memory usage
***VSZ - virtual size in Kbytes
***RSS - resident set size
***TT - control terminal name (tty//x////x//)
***STAT - symbolic process state
***TIME - accumulated CPU time, user + system
**{{Command{ps -x}}} - Display system processes
**{{Command{ps -aux}}} - Show extended information on all processes  - This is often the most useful way to use the command.
**{{Command{ps -U //username//}}}  - Display processes associated with //username//
**{{Command{ps -P //PID//}}}  - Display processes associated with process ID //PID//

*top - display and update information about the top cpu processes
**THR - 
**PRI - current priority of the process
**NICE - nice  amount  (in  the  range -20 to 20)
**SIZE - total size of the process (text, data, and stack) (in K)
**RES - current amount of  resident memory (in K)
**STATE - current process state (START, RUN, SLEEP, STOP, ZOMB,  WAIT, LOCK)
**C -  processor number on which  the  process  is  executing
**TIME - number of system and user cpu seconds that the process has used
**WCPU - weighted  cpu percentage

!! Killing Processes
*kill [-signal] //pid//
* Common Signals:
| !Signal Number | !Signal Abbreviation | !Description |
| 1 | HUP |Hangup (restart process, reload config)|
| 2 | INT |Interrupt (~CTRL-C)|
| 3 | QUIT |Quit|
| 9 | KILL |Immediate kill.  Not catchable or ignorable.|
| 15 | TERM |Request to gracefully terminate (default)|
SIGINFO = ~CTRL-T  (~FreeBSD Only)

*Killing large groups of processes
**{{Command{pkill}}} command

[>img[img/jobs.png]]
!! Jobs & job control

*Jobs - a command or set of commands entered in one command line.
*jobs are related to the user's session and are not global.
*STDIN is locked while a job is running in the foreground - it is only available to the current job until it completes.
*running background jobs allow the user to access these resources and have control of the shell.
*background jobs will still send their output to the screen and must be brought back to the foreground if they request input from the user
*a job may have one of three states - foreground, background, and stopped.

* append ''&'' to the command string to run it directly to the background
*~CTRL-Z - suspend a running foreground process
*Related commands:
** {{Command{jobs}}}
** {{Command{fg}}}
** {{Command{bg}}}
** {{Command{kill -STOP %//id//}}}
* Manipulate jobs with ''%'' and the job number
** Examples:  {{Command{fg %1}}} - resume the first background job in the foreground
** {{Command{bg %2}}} - resume the second job in the background
*currency flags: ''+'' and ''-''
** ''+'' most recently accessed job, default job if no arguments are specified.
** ''-'' second most recently accessed job, default job when ''+'' flagged job completes.

!! Two additional recommended videos:

* Kill command video: https://www.youtube.com/watch?v=fT-h45L9RAY
* Difference between processes and jobs: https://www.youtube.com/watch?v=eqtiw8S8GZw


! Assignment

* Complete [[Lab 24|labs/lab24.pdf]]
! Material

!! History Substitution:
* Chapter 8 - The entire chapter has good info, but pay particular attention to pages 85 (bottom), 86 & 88
* Watch: Linux Shell Variables: https://www.youtube.com/watch?v=MbXofShhMv8

!! Variable Substitution:
* Read: Chapter 7, pp 74-75 and Chapter 25, pp 377-378
*Watch:  Linux History Explained:  https://www.youtube.com/watch?v=3BZzFRPYU_I

!! Command Substitution:
* Read: Chapter 7, pp 74-75
* Watch: Command substitution using backticks: https://www.youtube.com/watch?v=VOOeXV4HYSA


! Notes

!! Shell Substitutions

Substitutions are transformations the shell performs on input before a command string is fully executed.  When the Unix shell encounters a substitution metacharacter, it will evaluate it to perform any substitutions before executing the full command string. These substitutions allow us to expand filenames, evaluate variables, recall previous commands, or use the result of one command as an argument to another. We already discussed filename substitution (file globbing). History substitution is very useful for recalling previous commands without having to retype it. Variable and command substitution are used extensively in shell scripting and have a useful place on the command line.

As you work with these substitutions, keep in mind the echo command can be used to preview the command string the shell will be executing after all substitutions are performed.  Simply start your command string with {{Command{echo}}} to test it.  We did this in Lab 23, #4 with the cat dog rabbit wombat question.


!!! History substitution

History substitution allows us to quickly recall previously executed commands. Previous commands are saved in a buffer which is written to the file ~/.bash_history upon logout. This allows history to be preserved across sessions and is useful for an administrator who needs to inspect activity of users on the system.

* Read:
** Chapter 8 - The entire chapter has good info, but pay particular attention to pages 85 (bottom), 86 & 88
*Watch:
**Linux History Explained:  https://www.youtube.com/watch?v=MbXofShhMv8


!!! Variable substitution

Variable substitution allows data to be stored for later use, much like any other programming language. The main application here is for shell configuration settings and for use in shell scripting. Variable substitution is not used as much as the other substitution forms when working directly on the command line.

* Read:
** Chapter 7, pp 74-75 and Chapter 25, pp 377-378
* Watch:
** Linux Shell Variables: https://www.youtube.com/watch?v=3BZzFRPYU_I


!!! Command substitution

Command substitution allows us to use the result of one command as an argument to another. Backticks or {{Command{$( )}}} are used to execute an inner command first. That inner command (including the backticks) is replaced by its output. The full command string is then executed.

''Important note:''  The backtick ''`'' and the single quote ''''' look rather similar.  Be sure to approach this section with an eye for detail so you don't confuse the two.

Consider this example. I often work remotely and need to remotely power on my home Windows PC to retrieve some files or continue working with them. The wake-on-LAN function built into many motherboards allows for remote wake-up by broadcasting a specially crafted packet containing the system's MAC address to the broadcast address of the local subnet. Unix utilities exist to facilitate this. Their syntax is usually {{Command{//command// //~MAC-address//}}}.

I log into my home unix fileserver from a remote location via SSH. I have my PC's MAC address saved in a text file within {{File{/tmp/}}}:

{{{
# I can see that my PC's MAC address is saved in the text file named win7
root@trillian:/tmp # cat win7
c7:62:00:a2:25:55

# Rather then copy and paste, command substitution is a faster way to get that MAC address added to the command line as an argument to the wake command.  
# The shell will first perform the substitution, replacing `cat win7` with the output of the cat command.  Next, the full command string will be executed.
root@trillian:/tmp # wake `cat win7`

# I can preview the full result of my substitution by prefixing the command string with echo to see what will really be executed by the shell
root@trillian:/tmp # echo wake `cat win7`
wake c7:62:00:a2:25:55
}}}

An even better way involves combing history and command substitution:

{{{
# Preview my file, make sure the MAC address looks good
root@trillian:/tmp # cat win7
c7:62:00:a2:25:55

# History substitution will be used first to replace !! with the last executed command (cat win7).  
# Next, command substitution will replace the backticks with the result of executing the enclosed command (the MAC address)
# Finally, the full wake command string with the MAC address added as an argument will wake up my Windows PC.
root@trillian:/tmp # wake `!!`
}}}

* Read:
** Chapter 7, pp 74-75
* Watch:
** Command substitution using backticks: 
*** https://www.youtube.com/watch?v=VOOeXV4HYSA


! Assignment

!! Complete:
*  Complete [[Lab 25|labs/lab25.pdf]] & [[Lab 26|labs/lab26.pdf]]
! Material

!! The {{Monospaced{vi}}} Editor

{{Monospaced{vi}}} is the standard Unix text editor.  Extremely powerful and available on every system.

!!! Read :
 - Chapter 12 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
 - Review notes on [[The vi Editor]] page

!! The {{Monospaced{nano}}} Editor

{{Monospaced{nano}}} is easier to use than {{Monospaced{vi}}}, but not nearly as powerful.


!!! Read
 - Chapter 11 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]] beginning on the bottom of page 136


!! Shell scripting

!!! Read:
 - Chapter 24 and 27 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]

!!! Watch:
 - [[Bash scripting basics|https://www.youtube.com/watch?v=NSu4IWlOU7k]]
 - [[Exit Status|https://www.youtube.com/watch?v=Fghnnrbag-w]]
 - [[If statements 1|https://www.youtube.com/watch?v=elrbjYdL-8c]] & [[If statements 2|https://www.youtube.com/watch?v=XDi9W0V-ibA]]


! Notes

!! Unix text Editors

The two main Unix command-line text editors are {{Command{vi}}} and {{Command{nano}}}.  The {{Command{vi}}} editor is the standard and is universally available on every Unix system.  It's extremely powerful, but has a bit of a learning curve and takes some time to get used to it.  The {{Command{nano}}} editor isn't as universally available, but can be installed on most Unix systems, is easier to learn, but not nearly as powerful.

If you will be working with the Unix command line in the future, especially professionally, becoming familiar with {{Command{vi}}} will be worthwhile.  Otherwise {{Command{nano}}} will be sufficient for this course.  

I have material available on the {{Command{vi}}} editor on [[this page|The vi Editor]] if you would like to review it.  There is an extra credit lab available there as well.

Using one of the text editors will be necessary to complete the next material:  shell scripting.  Pick whichever one you'd like.


!! Basic Script Building

We don't have a enough time to go deep into Shell Scripting, but it's an important topic to at least mention.  We'll spend this week on the highlights.  All of the chapters in Part 4 are worthwhile if you have the time and interest in further developing your shell scripting skills.  The [[Linux Command Line and Shell Scripting Bible|https://www.amazon.com/Linux-Command-Shell-Scripting-Bible/dp/111898384X]] is another good resource I've used for other classes.


Watch: [[Bash scripting basics|https://www.youtube.com/watch?v=NSu4IWlOU7k]]


!!! Outline
A shell script is an executable file that contains a list of shell commands
* It's an automated way to run a series of commands.
* Need to run a bunch of commands often?  Script it.
* Need to run a few complicated commands only occasionally?  Script it.

The file is interpreted by the shell specified, it is not compiled

There are [[two main shell families|img/shells.jpg]] - bourne and ~C-Shell
* We will be writing our scripts for the bourne/bash shell.
* The first line of the script contains the shebang, {{Command{#!}}}, specifying the path for the interpreter to use.  

A script could be as simple as a single command or a series of commands
eg: Instead of having to type out the find command to scan our home directory for old, large files, we can put it in a script.

{{{
#!/bin/sh
# ./oldcrud.sh

find ~ -atime +52w -size +50M -ls
}}}

- or -

{{{
#!/bin/sh
# ./oldcrud.sh
# This version accepts a command line argument to delete the files it finds.

if [ "$1" = "-d" ]
then
	find ~ -atime +52w -size +50M -delete
else
	find ~ -atime +52w -size +50M -ls
fi
}}}

!!! Executing a shell script:
* Scripts must at least be readable in order to run.  
* A program not in your path must be executed by prefixing the filename with path information, eg:  {{Command{./script.sh}}}
** Execute permission is required when executing scripts in this fashion.
* Scripts can also be executed as arguments to an interpreter, eg:  {{Command{sh script.sh}}}
** If the file is prefixed with the name of the interpreter, only read permission is required.

Three options to run a script:
# Place in $PATH (needs read & execute)
# {{Command{./scriptname.sh}}} (needs read & execute)
# {{Command{sh scriptname.sh}}} -or- {{Command{bash scriptname.sh}}} (needs only read)


!!! Variables
User variables can be set with {{Monospaced{''=''}}}
{{Monospaced{variable=value}}}  (''No spacing!''  Adding a space will result in a syntax error)
and referenced with {{Monospaced{$variable}}} or {{Monospaced{${variable} }}}.  Useful for supplying text immediately after the variable expansion:  {{Monospaced{echo ${size}mb }}}
{{Monospaced{variable=$(command)}}} or {{Monospaced{variable=`command`}}}  can save output from a command to the variable, eg: {{Monospaced{files=$(ls | wc -l)}}}
 -- We will be using a lot of command substitution.
{{Command{unset variable_name}}} - remove a variable
The {{Command{read}}} command will accept input from the keyboard:  {{Command{read //variablename//}}}
 -- Use {{Command{read}}} with the ''-p'' option to supply a prompt.
{{Command{export //variable_name//}}}  - export a variable to the shell for future use
{{Command{readonly //variable_name//}}} - prevent variable from being changed

Example script.  Prompt for user input and then compute a value.
{{{
#!/bin/sh
# ./nums.sh

read -p "Enter a number: " number1
echo you entered $number1

echo -n "Enter a number: " 
read number2
echo you entered $number2

echo
echo -n "Sum is: "
expr $number1 + $number2
}}}


{{Note{It's a best practice for variable names to describe what they contain.  Notice I used {{Monospaced{''$number1''}}} and {{Monospaced{''$number2''}}} for my variables above.  These variable names both describe what should be stored within them.  Using non-descriptive variable names such as {{Monospaced{''$a''}}} or {{Monospaced{''$var1''}}} will result in lost points.}}}


!!! Exit Status

Next we're going to begin to introduce three inter-related concepts: command exit status, the test command, and if statements in that order. We're going to use them in practice in the reverse order.

When constructing an if statement, most often we'll be using the test command to evaluate the conditions (eg, whether a file exists, or whether one number is greater than another). The test command will return an exit status to indicate whether that evaluation was true or false. The if command will then use that exit status to determine what to do.

Every command we execute on the system returns an exit status to indicate whether it ran successfully or not. The exit status is stored in the special shell variable {{Monospaced{''$?''}}}.

Exit status values fall within the range 0 - 255. An exit status of 0 always indicates success. A exit status greater than 0 indicates some form of failure. Having many possible values to indicate failure (any positive integer) allows the program to indicate either the type of failure or where the failure occurred.

Notice the difference between the two executions of the {{Command{id}}} command:

{{{
[user@shell ~]$ id root
uid=0(root) gid=0(root) groups=0(root)
[user@shell ~]$ echo $?
0

[user@shell ~]$ id root2
id: root2: no such user
[user@shell ~]$ echo $?
1
}}}

The first instance completed successfully, so we received an exit status of 0. The second instance returned an unknown user error and an exit status of 1.

Watch:  https://www.youtube.com/watch?v=Fghnnrbag-w


!!! Useful commands for shell scripting

!!!! {{Command{test}}} command:

The {{Command{test}}} command (also known as {{Command{[}}}), allows us to perform comparisons, check strings, or evaluate files on the system. It works by returning an exit status that an if statement checks to determine whether something completed true (successful, exit status of 0) or false (not successful, exit status > 0).
''Note:'' The test command has two names: {{Command{test}}} and {{Command{[}}} (square bracket). Both files exist on a unix system and you may see scripts written using either. {{Command{[}}} is the common way to represent the command. When the {{Command{[}}} is used, a closing ] must be placed at the end of the line. Remember: The {{Command{[}}} is an actual command! And like all commands, spaces must separate the command from its arguments.

With this example, we're first checking to see whether an item is a regular file. It fails (returns an exit status of 1) because it is not a regular file. Next we check to see whether the item is a directory. An exit status of 0 indicates success, confirming the item is a directory.

{{{
[user@shell ~]$ [ -f /tmp/lab23 ]
[user@shell ~]$ echo $?
1

[user@shell ~]$ [ -d /tmp/lab23 ]
[user@shell ~]$ echo $?
0
}}}

The {{Command{test}}} manpage will be a great resource for learning about the different comparisons or tests available.

{{Warning{''Warning:''  be sure to use quotes with string evaluation }}}


!!!! The {{Command{expr}}} command can evaluate an expression
perform integer math & comparisons
verify string input against a regular expression

:expr 5 + 5


!!! Control structures:

Shell scripts can utilize control structures common to all programming languages. This allows us to construct more complex scripts which can evaluate conditions or iterate over lists. The most basic of our control structures is the if statement. An if statement has three parts:
* the initial if test
* followed by one or more optional elif statements
* and ending with an optional else condition.

If statement synopsis:
{{{
if condition
then
   commands
elif condition
then
   commands
else
   commands
fi
}}}

If statement example:
{{{
#!/bin/sh
# ./exists.sh

if [ $# -ne 1 ]
then
	echo give me a file
	exit 1
fi

if [ -f $1 ] 
then
	echo "it's a regular file"
elif [ -d $1 ]
then
	echo "it's a directory"
elif [ -e $1 ]
then
	echo "don't know what this is"
else 
	echo "it doesn't even exist"
fi
}}}



!!! Script Writing

When writing your scripts, the following header ''must'' be placed at the top of the file, immediately after the shebang:
{{{
#!/bin/sh
# File name:
# Author:
# Date Written:
# Assignment:
# Purpose:
# Description:
#
#
#
}}}

Tips and good habits to start developing now:  
* Comment your script with {{Monospaced{#}}}.  Comments throughout your script make it easier for others to understand what's going on.
* Long lines should be wrapped. Cut long lines at about column 60. (makes it easier to read and print)
* Using meaningful variable names makes it easier to understand their purpose.  Use of generic variable names (eg: var1) is bad form and will result in lost points.


! Assignment:

!! The vi Editor:
 - Complete [[Lab 30|labs/lab30.pdf]] & [[Lab 31|labs/lab31.pdf]] 
 - These labs are optional for additional vi practice and will be accepted for extra credit.  There is no firm due date, but please try to submit before the end of October.

!! Shell scripting:
- Complete [[Lab 32|labs/lab32.pdf]] & [[Lab 33|labs/lab33.pdf]]


//''Note''//: Labs 27 - 29 were skipped.
! Material

!! Read:
* Chapter 28 - Reading Keyboard Input
* Chapter 32 - Positional Parameters


! Notes

This portion will cover four main concepts:

 - Obtaining input from the user
 - Positional Parameters (obtaining input from command-line arguments)
 - for loops
 - Input Validation (optional)

!! Obtaining input from the user:

Chapter 28 covers the {{Command{read}}} command, along with a lot of advanced detail that is beyond the scope of this class.  

The {{Command{read}}} command will accept input from the keyboard and save it to the specified variable:  {{Command{read //variablename//}}}
 - Use {{Command{read}}} with the ''-p'' option to supply a prompt.

Examples: 

{{{
#!/bin/sh
# ./nums.sh

read -p "Enter a number: " number1
echo you entered $number1

echo -n "Enter a number: " 
read number2
echo you entered $number2

echo
echo -n "Sum is: "
expr $number1 + $number2

read -p "Enter two numbers: " number1 number2

echo
echo -n "Sum is: "
expr $number1 + $number2
}}}


!! Positional Parameters

The read command will allow us to prompt for input.  Positional parameters (variables) will allow our scripts to obtain input from command-line arguments.  Chapter 32 will discuss them in more detail.


Special shell variables:
| !Variable | !Description |
| $0 |Name of the script|
| $1 - $9 |Values of command line arguments 1 - 9|
| $# |Total number of command line arguments|
| $* |Value of all command line arguments|
| $@ |Value of all command line arguments; each quoted if specified as "$@"|
| $? |Exit status of most recent command|
| $$ |Process ID of current process|
| $! |PID of most recent background process|


Command line arguments can be passed to a shell script and stored in $1 through $9
{{{
#!/bin/sh
# ./vars.sh
echo $1 - $2 - $3
echo All command line arguments: $*
echo Number of command line arguments: $#
echo Name of the current command: $0
echo Exit status of the previous command: $?

[root@shell ~]$ sh vars.sh first orange third wolf
first - orange - third
All command line arguments: first orange third wolf
Number of command line arguments: 4
Name of the current command: vars.sh
Exit status of the previous command: 0
}}}


! Assignment

!! Complete
* Complete [[Lab 34|labs/lab34.pdf]] & [[Lab 35|labs/lab35.pdf]] 
* Complete [[Lab 36|labs/lab36.pdf]]
** ''Be sure to follow the [[Shell script submission requirements]] to submit your shell script labs''

{{Note{''Note:'' Lab 35 pro tip - avoid typing out long commands or scripts.  Typing out long commands manually is a great way to introduce typos and break things.  Use copy/paste instead.  If you haven't noticed yet, the [[Linux Shortcuts]] page covers how to copy/paste in putty/Linux.}}}
! Material

!! Read:
* Chapter 33 - For Loops
* Chapter 30 - Troubleshooting
** This is optional, but may be helpful.  Especially the first five pages in the chapter.
!! Watch:
* [[Bash for-loops 1|https://www.youtube.com/watch?v=sIYmF32Ic8s]] & [[Bash for-loops 2|https://www.youtube.com/watch?v=HLFenK13VDY]]

! Notes

!! For loops

The bash for-loop is a control structure which allows us to iterate through items in a list.  The list can be populated statically (from strings you define directly) or as a result of any form of shell substitution (variable, file, or command).  Within the for loop, your commands will then be executed for each item in the list. 

Watch:  [[Bash for-loops 1|https://www.youtube.com/watch?v=sIYmF32Ic8s]] & [[Bash for-loops 2|https://www.youtube.com/watch?v=HLFenK13VDY]]

For Loop synopsis:
 - //list// is a list of items to iterate over
 - //variable// is a variable to store the current list item in as we run through the commands within the for loop.   
{{{
for variable in list
do
   commands
done
}}}

For example, this small script will iterate over a list of PNG files in the directory to change their extension from .png to .jpg
{{{
#!/bin/sh
# ./renpng.sh

# list of the contents of the directory.  We will disable STDOUT and STDERR because we're only interested in the exit status.
ls *.png >&- 2>&-

# This is a short-circuited conditional to evaluate the exit status of the ls command.  Essentially, if there are no PNG files in the current
# directory, the exit status from the ls command will be 0.  This operates like a truth table, executing the commands in the list until one 
# returns a positive exit status.  If there are no files in the directory (exit status != 0), display a error message and exit the script. 
# This basic test ensures our script exits gracefully if there are no files
[ $? -ne 0 ] && echo Error: No PNG files && exit 1

# Iterate over each png file in the directory, saving the file to operate on in the variable $png
for png in *.png
do
	# The basename command removes an extension from a filename.  We're stripping off the png extension so we can add .jpg later
        filename=$(basename $png .png)
        mv $png ${filename}.jpg
        echo renaming $png to ${filename}.jpg
done
}}}


The {{Command{break}}} command executed within a loop will terminate it.   
The {{Command{continue}}} command will start the next iteration of a loop.
These are commonly used combined with if-statements to skip an item in the list or end the loop early.

For example, my script to collect your labs contains this at the top of the for-loop.  It will cause processing of a lab to stop if I already have it.

{{{
cd /opt/pub/ncs205/submit/
for file in ncs205-lab*.pdf
do
        hash=$(md5sum "$file" | cut -c 27-32)
        base=$(basename "$file" .pdf)
        [ -f $RSL_DIR/ncs205/collected/"$base"-v?-${hash}.pdf ] && continue

	# more processing commands follow
done
}}}


!! While loops

Here's a brief synopsis and example of a while loop.  We don't have time to cover them and none of our scripts will require them.  If-statements and for-loops are the two most useful to know.
 

While Loop:
{{{
while  condition
do
   commands
done
}}}

{{{
#!/bin/sh
# Create 100 png files

count=100
while [ $count -gt 0 ]
do
        string=$(printf "%04i" $count)
        dd if=/dev/urandom of=IMG_${string}.png bs=5k count=1 2>&-
        count=$(expr $count - 1)
done
}}}

The {{Command{break}}} command executed within a loop will terminate it.   
The {{Command{continue}}} command will start the next iteration of a loop.


!! Extra Commands:

* The {{Command{md5sum}}} command can be used to calculate file checksums
* The {{Command{stat}}} command can pull extra details about a file and allow for extra formatting options:
* The {{Command{date}}} command can display the current date and time in various formats.  Check it's manpage for a list of format codes.

{{{
# Obtain a unix timestamp integer for the file's modification time:
[user@lab ~]$ stat -c '%Y' ncs205-lab10-browng.pdf
1478659277

# Convert a unix timestamp to a human-readable format:
[user@lab ~]$ date +"%m/%d/%y @ %H:%M" --date=@1478659277
11/05/16 @ 21:44

# Display the current date & time as a unix timestamp integer:
[user@lab ~]$ date +"%s"
1478659280   


# Use in a shell script with command substitution:
mtime=$(stat -c '%Y' $file)
mdate=$(date +"%m/%d/%y @ %H:%M" --date=@$mtime)

# Display them:
echo "$mtime / $mdate" 
11/05/16 @ 21:44 / 1478659280
}}}


!! Input Validation

We've demonstrated two ways to gather script input directly from the user:  as command line arguments and via the read command.

Basic error checking of user input is always preferred.  Never rely upon your users to enter input appropriately.  It is far better to catch any potential issues yourself instead of having the script run into a syntax error.  For example, if your script requires a command line argument but one is not provided, things may break down.  If you ask for a number, what will your program do if a user enters a string of letters instead?  Not properly validating input is also the basis for a lot of attacks, especially against web servers.

A basic test to ensure a command line argument was provided and exiting the script gracefully with an error message if it wasn't would be good.

Basic tests should be done any time you are gathering input from a user.  Some examples:

* Does a file actually exist?
* Is a username valid on the system?
* Is an IP address in the proper format?
* Is a number actually only composed of digits?

The test command can evaluate whether a file exist.  The id command can evaluate whether a user is valid on the system.  

The expr utility can be used to validate input in our scripts based on a regular expression.  File globbing is a way to concisely match a group of files based on wildcards.  Similarly, regular expressions are a concise way to represent strings using wildcards and operators.  Understanding and constructing regular expressions is beyond the scope of this course.  The ideas are similar to file globbing but the implementation is different.  If you're curious, regular expressions are discussed in the textbook in Chapter 19.  

We'll be using the {{Command{expr}}} utility to compare two strings.  Addition with {{Command{expr}}} is composed of two operands (our input) and an operator (the + symbol), eg:  expr 3 + 5

The {{Command{expr}}} utility can also be used for string comparisons.  When comparing strings with {{Command{expr}}}, the first operand is the string we are checking.  This can either be a static word or come from a variable.  The {{Command{expr}}} operator for string comparisons is a : (colon).  The second operand is a pattern resembling the string we are expecting.

For example, the following command performs a comparison between the string on the left and the pattern on the right:

{{{
expr 123 : '^[0-9]\{1,\}$'
}}}

The pattern {{Command{^[0-9]\{1,\}$}}} requires a string that contains 1 or more numbers. The anchors ^ and $ are used to specify that no other characters can be present in the string. The command above will return the number of characters matched by the pattern, in this case 3.

{{{
$ expr 123 : '^[0-9]\{1,\}$'
3
}}}


If I add something besides a number to my string, the pattern will not match so the character count will be 0

{{{
$ expr a123 : '^[0-9]\{1,\}$'
0
}}}

We can then evaluate the output of the expr command to determine whether we received the correct type of input. An example of this follows:

{{{
#!/bin/sh
# ./adder.sh

if [ $# -ne 2 ]
then
        # Display proper script usage and terminate the script with an exit status of 1 if two command-line arguments are not provided.  
        echo "Usage: $0 integer1 integer2"
        exit 1
fi

# Test to make sure our arguments only contain digits.  Notice the use of anchors.  
# A positive number will be saved to the variables if there is a match
match1=$( expr $1 : '^[0-9]\{1,\}$' )
match2=$( expr $2 : '^[0-9]\{1,\}$' )

# Make sure both matches contain positive numbers
if [ $match1 -gt 0 -a $match2 -gt 0 ]
then
        sum=$(expr $1 + $2)
        echo
        echo $1 + $2 = $sum
        echo
else
        echo "You did not enter 2 integers"
        echo "Usage: $0 integer1 integer2"
        exit 1
fi
}}}

! Assignment

* Complete [[Lab 37|labs/lab37.pdf]] & [[Lab 38|labs/lab38.pdf]] - Due Wednesday
* Complete [[Lab 39|labs/lab39.pdf]] & [[Lab 40|labs/lab40.pdf]] - Due Saturday

* An extra credit scripting lab is available - [[Lab A3|labs/labA3.pdf]]
** Follow the normal shell scripting process to submit it.  Use the uppercase ''A'' instead of a lab number in the file name you submit.
** There is no due date for this lab.  
! Material

* Read Chapter 19 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]].

This material is a little more difficult to understand than most of what we'll be working on.  Make use of the discussion boards if you're running into trouble!  We can also spin up another online meeting if anyone would like to chat.

We only have enough time to skim the surface.  Regular expressions are very handy tools, especially when combined with our filters or when you need to perform complex text manipulations in vi.  I'd like to at least introduce the topic.

If anyone would like to dive deeper into this topic, I recommend [[Mastering Regular Expressions|https://www.oreilly.com/library/view/mastering-regular-expressions/0596528124/]].  It's an excellent technical book.


! Regular Expression Intro

* What is a regular expression?
** Compact way of specifying a pattern of characters
** text is evaluated against the pattern to determine if there is a match.
*** We can then perform actions based on that test
** We can use a regular expression to examine the contents of an entire file, such as with grep
** Or we can examine just a string (or variable), such as with input validation
* What are they used for?
** pattern matching - instead of searching for a simple word, search for more complex patterns - 
** Similar to how file globbing works.  Only here, we're examining the content of the files and using the metacharacters a little differently
** Complex substitutions (search & replace)
*** Helpful in vi or a Windows text editor such as notepad++
** Input verification
*** Make sure the user is providing an appropriate input.  For example, if we prompt for an IP address, ensure what's provided is a valid IP address.
* How are they used?
** {{Command{grep}}} - basic regular expressions 
** {{Command{egrep}}} or {{Command{grep -E}}} - extended regular expressions
** {{Command{sed}}}
** {{Command{vi}}}
** {{Command{perl}}}
** any other programming language

What to match:  a combination of ''atoms'' and ''operators''
''atom'' = something we're trying to match
''operator'' = how we're trying to match it
 
''Atoms'' - Any regular character, or:
| !Symbol | !Meaning | !Escape | !Not supported by |
| {{Monospaced{'' [ ] ''}}} |Character Classes (match any one character listed) | | |
|~|Characters may be specified singly or in ranges| | |
| {{Monospaced{'' [^ ] ''}}} |Negated character class (match any one character not listed| | |
| {{Monospaced{'' . ''}}} |Any single character| | |

''Operators'' - Modify what they follow
These operators act as quantifiers, specifying the number of times an atom is matched.

| !Symbol | !Meaning | !Escape | !Not supported by |
| {{Monospaced{'' ? ''}}} |Optional item.  Match 0 or 1. | | sed |
| {{Monospaced{'' * ''}}} |Repetition: 0 or more| | |
| {{Monospaced{'' + ''}}} |Repetition:  1 or more. | | sed |
| {{Monospaced{'' { } ''}}} |Repetition: Defined range of matches {//min//,//max//} or {//min//,} or {//exactly//}| * | |

| !Symbol | !Meaning | !Escape | !Not supported by |
| {{Monospaced{'' ( ) ''}}} |Grouping - next operator applies to whole group| | |
|~|Alternation (match any one of the sub-expressions)| | |
| {{Monospaced{'' {{{|}}} ''}}} |Or.  Match either expression it separates.  Use with ( )| | |

Grouping with the ''( )'' allow us to apply an operator to a group of atoms.  For example, to make the entire string ''abcde'' optional:  ''(abcde)?''.  The ''?'' will apply to the entire sequence of characters within the ''( )''.

{{Note{''Note:''  We use file globbing to identify groups of files and regular expressions to identify groups of characters.  File globbing is for file //names// and regular expressions are used for the file's //contents// or any other strings of characters.  }}}
{{Warning{''Warning:'' Regular expressions use many of the same metacharacters as file globbing, but they work differently here.  For example, if we want to list files which begin with the letter a, I would use the command {{Command{ls a*}}}.  But with regular expressions, if I want to find a string that begins with an a, I need to use the regular expression {{Monospaced{ ''^a.*'' }}}.  In file globbing, the {{Monospaced{'' * ''}}} stands alone.  In regular expressions it is a modifier and changes the behavior of what comes before it.  With regular expressions, the {{Monospaced{'' . ''}}} is used to specify any single character and the {{Monospaced{'' * ''}}} modifies it to match 0 or more occurrences.}}}


''Anchors'' can also be used to specify where our match must occur:
| !Symbol | !Meaning |
| ^ |Start of line|
| $ |End of line|
| \< |Beginning of word|
| \> |End of word|


Typically, when we are searching for a pattern, we are searching for a sub-string (a smaller string within a larger series of characters).  For example, if I want to display lines which contain //only// numbers, the command {{Command{egrep '[1-9]+' data.txt}}} will return the output:

12345
a1234
b1234c

But I want lines which contain //only// numbers.  I don't want to display the lines which also contain a letter.  The solution to prevent other characters from sneaking into your output is to use the anchors to ensure your regular expression is ''//completely matched//''.  This command will anchor the numbers to the beginning and end of the line so no other characters can be matched:  {{Command{egrep '^[1-9]+$' data.txt}}} 
 



[[Regular expression metacharacters]]
[[ASCII Chart|handouts/ascii-chart.gif]]

Regular expressions and file globbing have common metacharacters that have different meanings in the two contexts.  ''Be sure you know how the metacharacters differ.''


!! Examples

!!!! Match:
* grey with either spelling - {{Monospaced{'' gr[ea]y ''}}}
* color with American or British spelling - {{Monospaced{'' colou?r ''}}}
* variations of third street (3rd street or third street) - {{Monospaced{'' (3|thi)rd street''}}}
* The month June either full or abbreviated - {{Monospaced{'' June? ''}}}

* find words with 5 or more vowels in a row - {{Command{'' egrep '[aeiou]{5}' /usr/share/dict/words ''}}}

* Find words in the unix dictionary file that begin with book and end with y - {{Command{ grep '^book.*y$' /usr/share/dict/words }}} - Don't forget the anchors!
* Find words that are 4 characters beginning with a b and ending with a k - {{Command{ grep '^b..k$' /usr/share/dict/words }}}
* Find words in the dictionary file that begin with or end with the string book - {{Command{ egrep '(^book|book$)' /usr/share/dict/words }}}
* Match Kunsela ~C-Wing B&W printer queue names - {{Command{ grep '^c...lpr' /opt/pub/ncs205/data/filter_examples/printcap | cut -d '|' -f 1}}}
* Extract the mail sender and subject for the last 4 emails received - {{Command{egrep "^(From|Subject): " /var/mail/$USER | tail -n 8}}}

!!!! Input verification:
Completely match -
* a valid campus username - {{Monospaced{'' [a-z]{1,8}([0-9]{1,2})? ''}}} - 1 to 8 lowercase letters optionally followed by 1 or 2 numbers.
* a phone number - {{Monospaced{'' '^[0-9]{3}[-. ][0-9]{3}[-. ][0-9]{4}$' ''}}}
* time (12hr clock) -  {{Monospaced{'' '^(1-9|1[0-2]):[0-5][0-9] [ap]m$' ''}}}
* a Dollar amount 

What is the difference between [ab]* and (a*|b*)


!!! More complex examples:

!!!! Search the ssh configuration files for all configuration directives that are enabled with a yes:  

Read this from bottom to top
{{{
egrep '^[^#]+ yes' /etc/ssh/ssh{,d}_config
       ^  ^ ^^---              ----
       |  | || ^                 ^--- Match both ssh_config and sshd_config
       |  | || |--------------------- Followed by the string yes
       |  | ||----------------------- There must be a space between the configuration item and its value
       |  | |------------------------ The + modifies the [^#] to allow for any length of characters here.
       |  |-------------------------- Followed by any character which is not a #.  We don't want comments.
       |----------------------------- The line begins 
}}}
This part is the regular expression that searches for the text:  {{Monospaced{'' ^[^#]+ yes ''}}}

This part is the file globbing pattern that identifies the files to search:   {{Monospaced{''/etc/ssh/ssh{,d}_config''}}}


!!!! Search all networking configuration files on the class shell server for an IP address:

{{Command{egrep '((1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])' /etc/sysconfig/network-scripts/ifcfg-*}}}

It's important to understand how the metacharacters differ between file globbing and regular expressions.  In regular expressions, the {{Monospaced{'' ? ''}}} or {{Monospaced{'' * ''}}} modify the atom immediately before them.  For example, in the IP address above, the {{Monospaced{'' ? ''}}} in {{Monospaced{'' 1?[0-9]?[0-9] ''}}} means both the {{Monospaced{'' 1 ''}}} and the {{Monospaced{'' [0-9] ''}}} range are optional.  In file globbing, the {{Monospaced{'' ? ''}}} or {{Monospaced{'' * ''}}} stand alone and represent either one single character or 0 or more characters by themselves.

The regular expression to identify an IP address is rather complex.  We can't just do {{Monospaced{'' [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} ''}}}  if we're only trying to match IP addresses and call it a day.  This one would work to identify a proper IP like 192.168.0.1 but would also match an invalid IP address like 555.555.555.555.


To break this regex down piece by piece:
{{{
((1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9]?[0-9]|2[0-4][0-9]|25[0-5])
 ^ ^     ^  ^   ----------- -------^ ^  ^  ---------------------------------
 | |     |  |        ^         ^   | |  |                ^-- Repeat the first group again to identify a single octet without the dot at the end.
 | |     |  |        |         |   | |  |--- Repeat the previous group within the ( ) three times for the first three octets 
 |---------------------------------|-------- These ( ) are used for alternation with the |, eg: (This|or|that) will match one of those three words.
   |     |  |        |         |     |------ Escape the dot so it's actually just a dot and cannot represent any single character
   |     |  |        |         |------------ Match an octet from 250-255
   |     |  |        |---------------------- Match an octet from 200-249
   |     |  |------------------------------- Notice the lack of the ?.  We must have at least a single digit in each octet
   |     |---------------------------------- The ? makes the [0-9] optional.  This would allow for a two digit IP address
   |---------------------------------------- This optional 1 allows for octets in the 100-199 range
}}}


! Assignment

* Complete labs [[Lab B1|labs/labB1.pdf]], [[Lab B2|labs/labB2.pdf]], [[Lab B3|labs/labB3.pdf]], & [[Lab B4|labs/labB4.pdf]]
** These all are extra credit labs.
** Be sure to use an uppercase {{Monospaced{''B''}}} as part of the lab number
** We haven't done much with regular expressions and this is a complex topic.  Please make use of the discussion boards if you have any questions or run into any trouble.
! Material

!! Read:
* Chapter 11 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
* Chapter 26 in [[The Linux Command Line|http://www.merantn.net/reference/TLCL-19.01.pdf]]
** The goal from Ch 26 is to get an understanding for functions.  The whole thing can be summed up by the example on page 385 and the contents of the //Shell Functions In Your .bashrc File// box on page 391.

!! Watch:
* [[Customizing Your Terminal: .bash_profile and .bashrc files|https://www.youtube.com/watch?v=vDOVEDl2z84]]
* [[Customizing Your Terminal: Adding Color and Information to Your Prompt|https://www.youtube.com/watch?v=LXgXV7YmSiU]]
* [[Creating Aliases for Commands|https://www.youtube.com/watch?v=0liXeoADU6A]]


! Notes

!! Working with the shell

A user automatically executes a shell when they log into a Unix system.  A shell is a special type of program that receives and interprets commands.  This shell is what users interact with on the command line.  You are then logged out of the system when your shell terminates.

The shell you are using is specified as the last column in the {{File{/etc/passwd}}} file.  Bash is the standard default, but many others exist.

The different shells available on the system are usually listed in the file {{File{/etc/shells}}}.
* {{Command{/sbin/nologin}}} : A special shell used for disabled accounts which should not be able to log in.
** You'll see service accounts in {{File{/etc/passwd}}} with this shell.  Users should never be able to log into those service accounts
** A service account with a valid shell is a major red flag and a sign your system has been tampered with.


!!! Bash Customization

RTFM!  {{Command{man bash}}}

!!!! Shell initialization and configuration scripts - executed upon login
Your shell environment can be customized by the system administrator and by the user.  The sysadmin may have some site-specific changes to make.  For example, I change the default umask for everyone on our class shell server.  Each user may customize their shell environment either cosmetically, such as by changing the shell prompt, or functionally, such as by changing the PATH or adding command aliases.

The shell environment is customized through a series of script files.  They are executed in the following order so the admin can set defaults that the users can override with their own customizations.  These scripts work like any of the scripts we've been writing for this class.  Any commands entered will be executed when these scripts run at login or logout.

Interactive login shell execution sequence.  When you first log in to the system, the following are executed (if they exist):
* {{File{/etc/profile}}} contains general system defaults 
* All scripts in the directory {{File{/etc/profile.d/}}}
** Putting individual settings in their own files makes it easier to maintain the changes
** The file {{File{/etc/profile.d/umask.sh}}} sets our default umask
* {{File{~/.bash_profile}}} is controlled by the user for their custom changes
** Put things you want to run during a new login session in this file.  Items in this file will not be executed if a new shell instance is executed.
* {{File{~/.bashrc}}} is executed only in interactive shells.  This file may contain extra functions and aliases.
** Put settings (like aliases and prompt changes) in this file so they will be activated if a new shell session is run
* {{File{~/.profile}}} may exist instead of {{File{~/.bash_profile}}} on some systems
* //User disconnects//
* {{File{~/.bash_logout}}} will execute when the user logs out.

{{Command{ source //file// }}} (or {{Command{ . //file// }}})
<<<
Read and execute commands from //file// in the current shell environment.  Apply changes within an environment script file to the current login shell.
<<<

!!!! Example:
Suppose each time a user logs in, we want to display their last three logins to the screen.  The following would be added to either the site-wide {{File{/etc/profile.d/}}} directory or appended to their {{File{~/.bash_profile}}}.  We would choose {{File{/etc/profile.d/}}} if we didn't want the users to be able to remove it.  We would choose the user's {{File{~/.bash_profile}}} if we wanted users to be able to override it.  We would not put it in {{File{~/.bashrc}}} because we only want this information displayed when the users log in, not when they just run a new shell.

{{{
last -3 $USER
}}}


!!!! Default user dotfiles
The directory {{File{/etc/skel}}} contains default copies of  {{File{~/.bash_profile}}}, {{File{~/.bashrc}}}, and {{File{~/.bash_logout}}}.  These can be copied to the home directories of new users so they have defaults available for their accounts.


!!!! Other shell configuration files:
Readline library - A library for reading a line of input from the terminal 
* Configured by {{File{/etc/inputrc}}} and {{File{~/.inputrc}}}
* These files mostly control additional key bindings
* I like to enable the ~PageUp and ~PageDown keys on other systems for fast command recall.  They're not enabled by default on Debian.

{{File{/etc/~DIR_COLORS}}}
<<<
Configure directory listing colorization
<<<
Disable ls colorization on the ~VMs.  Sometimes color makes it hard to read the text
* Edit {{File{/etc/~DIR_COLORS}}}
* change {{Monospaced{ ''tty'' }}} to {{Monospaced{ ''none'' }}}

{{File{/etc/motd}}} - A ''m''essage ''o''f ''t''he ''d''ay to display to users after they log in


!!! Aliases

Command aliases provide a way to execute long command strings with fewer keystrokes.  Additional options and arguments can be added to an alias.  For example, running {{Command{ l. -l }}} will display all files which begin with a dot in long-listing format.  The {{Command{ l. }}} alias will be translated to {{Command{ ls -d .* }}} and then the {{Monospaced{ -l }}} option will be added.

Display currently defined aliases:  {{Command{alias}}}

Set an alias:  {{Command{alias name='long_cmd -abcd | next_command -efgh'}}}

Standard aliases
* {{Command{ll}}} - Aliased to {{Command{ ls -l }}} on most systems
* {{Command{l.}}} - Aliased to {{Command{ ls -d .* }}} - Display //only// files which begin with a dot.

Override aliases:
* The {{Command{ rm }}} command is usually aliased to {{Command{ rm -i }}} on most systems so you are prompted before deleting each file.  
* Prefix your command with a \ (backslash) to suppress this alias expansion and execute {{Command{ rm }}} normally:  {{Command{ \rm foo }}}

Remove an alias for the current login session:  {{Command{unalias //alias//}}}

{{Command{which}}} and {{Command{type}}}
* These commands will display how each argument would be interpreted if executed as a command
* Aliases will be translated to their actual commands so you know what is really being executed


!!! Core shell options:

Stored in the {{Monospaced{$SHELLOPTS}}} variable
Manipulated with the set command

Enable a shell option:  {{Command{set -o //option//}}}
Disable a shell option:  {{Command{set +o //option//}}}

Examples: 

Toggle command line input method between vi and emacs:
{{Command{set -o vi}}}
{{Command{set -o emacs}}}

Enable noclobber:
With noclobber enabled, an existing file will not be overwritten by redirecting STDOUT to a file 
{{Command{set -o noclobber}}}
{{Command{set +o noclobber}}}

!!! Extra shell options:

{{Command{shopt}}} - Display a list of available options
 ''-s'' to enable an option
 ''-u'' to disable an option

Examples:
* {{Command{ shopt -s cdspell }}} - minor errors in the spelling of a directory component in a cd command are corrected.
* {{Command{ shopt -s checkjobs }}} - lists the status of any stopped and running jobs before exiting an interactive shell.


!!! Environment & Shell variables

In bash, variables are defined on the command line with this syntax:  {{Command{variable=value}}}
By default all variables are local and will not be inherited by child processes

The {{Command{export}}} command will make a variable global and accessible to any child process
{{Command{export}}} can be used when defining a global variable.  eg:  {{Command{export foo=bar}}}
Or, can be used to elevate a currently defined variable to global.  eg:  {{Command{foo=bar ; export foo}}}

{{Command{set}}} will display all currently set variables

{{Command{unset}}} can be used to unset a variable


The shell environment can be manipulated through variables:

For example, the {{Monospaced{$PATH}}} and the prompt variable, {{Monospaced{$~PS1}}}:

The prompt:

* ~PS1 - Primary prompt string is stored in this variable
* Other secondary PS variables exist.
** See https://ss64.com/bash/syntax-prompt.html for more details.

Display your current prompt string: {{Command{ echo $~PS1 }}}

The last character in your prompt - {{Monospaced{ ''#'' }}} vs {{Monospaced{ ''$'' }}}
* {{Monospaced{ ''$'' }}} at the end of the prompt means the user is a regular, unprivileged user.
* {{Monospaced{ ''#'' }}} at the end of the prompt means the user is a superuser.  
* This tagging makes it easier to see your privilege level.

Customized prompts I like for this class.  This prompt makes it easier to see the full path to the current directory and show long command strings on the projector.  The second version adds color.
{{{
PS1='\n[\u@\h \w] :\n\$ '
PS1='\n[\e[1;31m\u\e[m@\e[1;33m\h\e[m \w] :\n\$ '
}}}
Changing the ~PS1 variable by running one of the above commands applies the change immediately to your login session.  It will be reset when a new shell executes.  Add the change to your {{File{~/.bashrc}}} to make it permanent.


!!! Functions:

Functions can provide a shortcut to more complicated command sequences.  They can be used in shell scripts or directly from the command line.

Append to your {{File{~/.bashrc}}}:
{{{
function bak() {
        # This function creates a backup in the current working directory of any single file passed as an argument.
        # Example: bak test.sh
        cp "$@" "$@".`date +%y%m%d:%H%M`.bak
}
}}}

After adding this function to your {{File{~/.bashrc}}}, activate the new version by running  {{Command{ . ~/.bashrc}}} or reloading the shell.


!!! History substitution:

* Your command history is saved in a buffer for the current login session
* By default, the buffer is appended to {{File{~/.bash_history}}} upon logout
* You can then display the current session's history buffer with the {{Command{history}}} command.

There are history configuration variables to change this behavior:
  - {{Command{set | grep HIST}}}
 

! Assignment

* An extra credit environment / scripting lab is available - [[Lab C1|labs/labC1.pdf]]
** Be sure to use an uppercase {{Monospaced{''C''}}} as part of the lab number
** There is no firm due date for this lab.  Please try to have it in by the end of November
@@ This is an extra credit lab, available if you would like to learn more about LVM.  The [[Lab E Instructions|labs/labE-instructions.pdf]] will walk you through different LVM configuration scenarios.  @@

<<tiddler [[Lab E - Logical Volume Manager]]>>
! Material
!! Read - Linux Administration
* Chapter 24 - NFS - Required
* Chapter 11 - Virtual File Systems - Optional, though there's some good info here


! Notes

!! Outline:  Other filesystem and mount types

The LVM lab had us working with an xfs filesystem on standard internal storage.  Other filesystem types exist to bring a great deal of flexibility to how we can access our data.

* NFS: Network file system - Mount filesystems from other servers.
** Allows for central file servers to provide data to other systems
** A common way to make home directories accessible to all systems in the network
* FUSE - Filesystem in Userspace
** A kernel module and user tools to allow users to mount various filesystems.
* bind / nullfs mounts
** Mount a directory from one location to another.  This is handy to have a directory appear in two locations on the filesystem.  
** {{File{/opt/pub/ncs205/queue}}} & {{File{/opt/pub/ncs205/graded}}} are bind mounts on the class shell server
* Cloud storage - outsourcing and offloading storage to cloud providers
** Mount an Amazon storage bucket or similar
** [[BlackBlaze B2|https://www.backblaze.com/b2/cloud-storage.html]] storage is a fast and low cost option I've been experimenting with lately for offsite backups
*** This is what I use for offsite backups of my personal servers & our class lab environment between semesters
* File-backed file system - Store an entire filesystem in a large file
** Makes transport or backup of the filesystems pretty simple since everything is contained in a single file.
** This also simplifies encryption
* Filesystem encryption, Linux Unified Key Setup (LUKS)
** Keep your files secure.  Especially handy for USB storage
** For example, securing financial data on your computer


!! Network file system (NFS)

Developed by Sun Microsystems in 1984 for diskless clients

NFS employs a client / server model
* The NFS server contains the files you wish to share
* The NFS clients mount shares from the NFS server
Exports list (list of shared directories) on the NFS server
* Stored in the file {{File{/etc/exports}}}
* The exports list contains the directories to share, who can access them, and what permissions are available
File locking
* A way to ensure a file isn't being modified by two users at the same time
root access 
* A means to quasi-block the root user from accessing the files on the volume.
* A good safety measure to prevent accidental deletion of an entire NFS filesystem
quota management
* Limit the amount of disk space certain users or groups of users can consume


!! NFS Versions:  

* v2:
** Stateless (over UDP)
*** Lightweight and faster at the expense of reliability
*** Fully deprecated 
* v3:
** 64-bit file sizes (files larger than 2gb)
** Still stateless (UDP) by default
** A TCP option was added
** Performance improvements
** Required auxiliary protocols to manage filesystem exporting (mountd), server status (statd), and file locking (lockd).
*** All coordinated by RPC & the portmapper - {{Command{rpcinfo -p}}}
*** Portmapper:
*** Difficult to work with through firewalls and NAT since many services and ports are used
** Force a downgrade to NFS version 3 mount:  {{Command{ mount -t nfs -o nfsvers=3  //host:filesystem mount_point// }}}
*** Can bypass some of the ~NFSv4 security measures or for troubleshooting
* v4: 
** Additional performance improvements
** Stateful - TCP only, runs on port 2049
*** Eliminates the need for those auxiliary protocols
*** Traverses firewalls and NAT much easier since it only uses 1 port
*** Easy to setup firewall option
** Improved security
** Mandates security and ACL
*** Kerberos support for authentication


!! Configuration

Server configuration:  {{File{/etc/exports}}}
* see {{Monospaced{exports}}} man page for options
* Each line contains a directory to share with other systems
* Usage:  {{Monospaced{//directory_to_share//  ~IPs_to_share_with(options)}}}
** Example:  {{Monospaced{/home  *(rw,root_squash)}}}
** {{Monospaced{root_squash}}} (default) vs. {{Monospaced{no_root_squash}}}
*** {{Monospaced{root_squash}}} - The root user on the client system does not have superuser privileges on the NFS volume.  This ''squash''es the superuser privileges so root is treated as a regular user and must obey the file & directory permissions that are set.
*** {{Monospaced{no_root_squash}}} - Allow the root user on the client system to have superuser privileges on the NFS volume.

nfs options (client side)
* Typically added when the filesystem is mounted or provided in {{File{/etc/fstab}}}
* see mount and nfs man pages
* nosuid for home directories
** suid programs are those which run as the user that owns them
** The {{Command{passwd}}} is a suid program.  Its the only way a regular user would be able to modify a protected system file (eg: {{File{/etc/shadow}}})
** enable this option for a security improvement
* noauto - do not mount automatically on boot (/opt/backups filesystem)
** Add to the options section in the fstab file
Use the {{Command{ mount }}} command to show what is currently mounted on the system and the settings used to mount the filesystem
Display available NFS mounts offered by an NFS server:  {{Command{ showmount -e //~NFS_server// }}}

User id mapping:
* NFS clients map unknown users to nobody
* Big differences between NFS v3 and v4
** NFS v4 requires a separate service to properly map ~NFSv4 names (user@domain string) to local UID & GID
** May need to set the domain in {{File{ /etc/idmapd.conf }}} if DNS is not fully configured (forward and reverse)
* To properly map user ids:
** revert to NFS v3, use the option: {{Monospaced{''-o nfsvers=3''}}}
** or, start idmapd (with nfs service)
* To clear the idmapd cache: {{Command{ nfsidmap -c }}}
** This may be helpful while troubleshooting an initial NFS setup

!!! Our storage server setup

Packages to install:
* NFS Server: {{Monospaced{nfs-utils nfs4-acl-tools portmap}}}
* NFS Clients: {{Monospaced{nfs-utils }}}
Configure exports file on NFS server:
* {{Command{ printf "/home\t\t192.168.12.%d/29(rw,sync,root_squash,secure)\n" $ip >> /etc/exports }}}
** This command will write the configuration to your {{File{ /etc/exports }}} file.  Cat it out to display the contents.
** Replace $ip with the last octet of the first IP address of your range
Start services
* On the NFS server:
** {{Command{systemctl enable nfs-server}}}
** {{Command{systemctl start nfs-server}}}
** Tests: 
*** {{Command{ exportfs -rv }}}
*** {{Command{ showmount -e }}}
*** {{Command{ rpcinfo }}}
/%
*On Client
**systemctl enable netfs
**systemctl start netfs
%/

Attach NFS clients:
* This assumes you have DNS and the {{File{resolv.conf}}} file set up properly to use short hostnames instead of FQDN.
* Temporarily mount the NFS home directory to /mnt:  {{Command{ mount -t nfs4 files:/home /mnt }}}
** Copy any necessary files from it to the home directories in {{File{/home/}}}
** Umount from the temporary mount point once all files have been copied:  {{Command{umount /mnt}}}
* Unmount any existing local /home/ filesystem you may have, eg: from the LVM lab: {{Command{umount /home}}}
* Comment out the line for the LVM /home/ filesystem in the {{File{/etc/fstab}}} so it does not conflict with the NFS mount.
* Add the new NFS mount to fstab:
** {{Command{ printf "files:/home\t\t/home\t\t\tnfs4\tsoft,intr,rsize=8192,wsize=8192,nosuid\t0 0\n" >> /etc/fstab }}}
* Test fstab configuration.  Mount the new NFS home directory filesystem:  {{Command{ mount /home }}}
** It's important to ensure your fstab file is error free.  If it contains an error your system may not fully boot to a multiuser runlevel on next system start.


Test
* On NFS Server:
** {{Command{ showmount -e }}}
** {{Command{ nfsstat }}}
* On NFS Client:
** {{Command{ showmount  -e files }}}
** {{Command{ mount }}}

{{Warning{Warning:  The ''rpc-bind'' and ''mountd'' services must be added to the firewall on your files VM in order for the {{Command{showmount}}} command to function.}}}


! Assignment
<<tiddler [[Lab F1 - Monitoring disk usage with Nagios]]>>
----
<<tiddler [[Lab F2 - NFS]]>>
 @@ this page may need some edits.  It wasn't reviewed for the Fall semester. @@



! Material
!! Read:
* Linux Administration Chapter 9, pages 197-199 (cron) 
* Linux Administration Chapter 31 (Backups)


! Notes
!!Scheduled tasks

Tasks may be scheduled using these two services
* cron - run periodically at a specified interval
* at - run once at a scheduled date & time

!!! Examples of tasks to schedule:
* Log rotation
* SSL certificate renewal
* Daily reports
* Garbage collection
* Vulnerability checks
* System updates
* Source code updates
* mysqldump - database backups
** create a read-only mySQL account
** save backup to /opt/work/backups
* System backups

* Scheduling a one-time job for a more convenient time
** ie: something that may be bandwidth or CPU intensive.


!!! cron - run periodically
* crond
** A service that is installed with the OS and running by default
** Started on system boot 
** Permission granted via {{File{/etc/cron.{deny,allow&#125;}}} files
* System cron configuration:
** {{File{/etc/crontab}}} file
** {{File{/etc/cron.*}}} directories
* User cron configuration: 
** {{Command{crontab}}} command for scheduled user jobs
** {{Command{crontab}}} [-e|-l]
** User cron config files are stored in {{File{/var/spool/cron/}}} if you'd like to review them

!!!Crontab file format
*See {{File{/etc/crontab}}}
* Declare variables
* Output optionally sent to the owner via email (on by default) 
* Command execution fields:
** Time to execute command, (minute, hour, day, month, weekday) 
** User to run as (when running from system crontab)
** Command string to execute
* Special time nicknames:
** @reboot
** @daily
** Complete list in man page
* Special time formatting:
** */2 : Every 2 hours, months, days, etc
** 1-4 : From 1 to 4
** 1-6/2 : Every other between 1 and 6 
** 2,5 : 2 and 5

!!! Cron man pages:
* Check man page for the cron command
* Notice the ''See Also'' section at the bottom where crontab is listed in two different manual sections
* {{Command{man man}}}


!!! at - run once
* System dependent:
** ~FreeBSD - The atrun utility is executed via cron every 5 minutes to check for and run {{Command{at}}} scheduled tasks
** ~CentOS - atd - Daemon running in the background for processing tasks scheduled tasks via the {{Command{at}}} utility
** Not installed or running by default (on ~CentOS)
** Must be set to start on boot and be manually started after installation, just like any other new service we add.
* {{Command{at}}} user command with time for task to execute specified as an argument
** flexible time formatting
*** at +15 minutes
*** at 4:15
*** at 4pm tomorrow
*** at 4pm October 15
* Display scheduled job with {{Command{at -c}}}
** Scheduled jobs stored in /var/spool/at files
** atq - display scheduled at jobs
** atrm - remove scheduled at job
** Can use {{File{/etc/at.{allow,deny&#125;}}} files to control access


!! Backups

Traditional backup mediums
* Tapes
* Optical (cd/dvd)
* Disk 
* Cloud
** S3 buckets, Google Drive, Blackblaze, Nextcloud, etc.

Backup Goals:
* Protect from disk failure (catastrophic loss)
* Recover a previous version of a file (revision history)

Backup strategies
* What to back up
* Interval (how often)
* When to run (day/time)
* Full or incremental
** Full - All files
** Incremental - Files which have changed since the last full backup
* Destination: Onsite or offsite
* Security: Physical & electronic
** Don't let your backups be stolen
** Encrypt your backups so others cannot read them
* Testing
** Make sure any backup scheme works before you really need it
** Periodically test recovery from backup to ensure your strategy is sound and everything works

Standard UNIX Tools:
* tar - Create an archive of a directory structure, similar to zip in the Windows world
* rsync - Syncronize directories between two systems
* dump/restore - Create a single backup file of a filesystem or save a filesystem to tape
** These tools depend upon filesystem:
*** Standard {{Command{dump}}} & {{Command{restore}}} will work for for ext filesystems
*** The  {{Command{xfsdump}}} & {{Command{xfsrestore}}} commands will be required for our xfs filesystems
* rsnapshot - A more intelligent way to synchronize directories while keeping old versions of files.  A good disk-to-disk backup system for Unix/Linux.

LVM Filesystem Snapshots:
* sync - flush filesystem buffers first
* Snapshot - record a filesystem at a point in time
** {{Command{lvcreate -s -L //size// -n testsnapshot /dev/Storage/work}}}
*** size can be fixed or variable.  See man page.  ( -l 50%FREE )
** Backups - use for backing up data
*** run dump against the snapshot device then delete it
** Testing: Use for tests & debugging.  Discard snapshot after use.
*** Mount the snapshot as the original mountpoint and merge/destroy when finished
*** Merge with: {{Command{lvconvert &dash;-merge //snapshot//}}}

Snapshot demo on the files VM to show how files can be recovered from snapshot if they are lost.
 - This is a useful way to take a backup of a system before making changes.  If something goes wrong, you can easy revert from the backup.
 - This assumes you set up the /home filesystem in the LVM lab
<<<

Create some data in the home volume and verify your new file:
{{Command{dd if=/dev/zero of=/home/zeros bs=10M count=3 }}}
{{Command{ls -l /home }}}

Capture the hash of the sample file.  We'll compare this hash to the file after we recover it from snapshot:
{{Command{ md5sum /home/zeros }}}

Check the amount of free space in the volume group.  Note the ~VFree value for the Storage VG
{{Command{vgs}}}

Create a snapshot of the home logical volume
{{Command{lvcreate &#045;-size 100M &#045;-snapshot &#045;-name home_ss_fall20 /dev/Storage/home }}}

Check the amount of free space after snapshot creation.  Note the ~VFree value for the Storage VG
{{Command{vgs}}}

Display logical volume information.  Note the Data% value for the snapshot.  If this reaches 100% the snapshot will be destroyed. 
Only changes will be written to the snapshot.
{{Command{lvs}}}

Display logical volume information, including snapshots associated with a LV
{{Command{lvdisplay /dev/Storage/home }}}

Display additional snapshot information:
{{Command{lvdisplay /dev/Storage/home_ss_fall20 }}}

Delete your file from the /home/ filesystem and verify it is no longer there:
{{Command{ rm /home/zeros }}}
{{Command{ls -l /home }}}

Mount the snapshot somewhere
{{Command{mount /dev/Storage/home_ss_fall20 /mnt }}}

''Note'':  You may receive a //wrong fs type// error from the mount command here.  If you do, run the next command to generate a new UUID for the snapshot filesystem:
{{Command{xfs_admin -U generate /dev/mapper/Storage-home_ss_fall20}}}

Just because space is shown as available, doesn't mean it really is.  We're still limited by the size of the snapshot logical volume
{{Command{df}}}

We can see the lost file is still on the snapshot:
{{Command{ls -l /mnt/ }}}

Recover your lost file from the snapshot and verify it was successfully recovered:
{{Command{ cp /mnt/zeros /home/ }}}
{{Command{ls -l /home/ }}}

Verify the hash of the recovered file to the hash you obtained above:
{{Command{ md5sum /home/zeros }}}

Unmount the snapshot:
{{Command{umount /mnt}}}

We can remove a snapshot we no longer need:
{{Command{lvremove /dev/Storage/home_ss_fall20 }}}
<<<
Other useful snapshot commands:
* Enlarge the snapshot:  {{Command{lvextend &#045;-size 200M /dev/Storage/home_ss_fall20 }}}
* Fully restore the snapshot to the filesystem, discarding all changes since the snapshot was taken:
** Unmount the filesystem:  {{Command{umount /home}}}
** Restore the snapshot:  {{Command{ lvconvert &#045;-merge /dev/Storage/home_ss_fall20 }}}


System backups with dump
* The {{Command{dump}}} and {{Command{restore}}} utilities are not installed by default.  They're in the ''dump'' package.
* sync - flush any open filesystem buffers
* Back up filesystems with {{Command{dump}}}
** Track backups with -u option
*** Saved to /etc/dumpdates
*** Access backup history with {{Command{dump -[wW]}}}
** Backup levels: full or incremental
*** full (level 0) : All files
*** incremental (levels 1 to 9) : Files changed since last level of a lesser or equal value
** Compression?  gzip or bzip2 available.
** Mark files to skip with nodump flags
*** Display a file's flags:  {{Command{lsattr /opt/src/zeros}}}
*** Add the nodump flag:  {{Command{chattr +d /opt/src/zeros}}}
** Examples:
*** Take a level 0 backup of /home, saving it to {{File{/opt/work/backup/home.200425.lvl0.dump}}}:  {{Command{dump -0auf /opt/work/backup/home.200425.lvl0.dump /home}}}
*** Take a level 1 backup of /home, compressing it and saving it to {{File{/opt/work/backup/home.200425.lvl0.dump.bz2}}}:  {{Command{dump -1auf - /home | bzip2 > /opt/work/backup/home.200425.lvl0.dump.bz2 }}}
**** The - after the -f option instructs the dump command to send its output to STDOUT.  May unix commands will accept a - like this instead of a file name to utilize the STDOUT stream instead.
* Restore files with {{Command{restore}}}
** -x : extract specified files (or all)
** -r : rebuild filesystem (for incremental chaining)
** -i : interactive restore
* The {{Command{xfsdump}}} & {{Command{xfsrestore}}} tools work similarly but have a different set of options:
** These tools are available in the xfsdump package
** Usage:  {{Command{xfsdump -F -l level -f file filesystem}}} 
*** //level// is the backup level to take (0 through 9)
*** //file// is the file to save your backup to
*** //filesystem// is the filesystem you wish to back up
***  The -F option will suppress prompts for session and media labels.  These can be provided with the -L and -M options, respectively.  
*** The -o option may be necessary to overwrite an existing file if the -F option was provided.


You can also combine with pgp / gpg to encrypt the backup files.


A couple other backup tools worth mentioning
* tar
* dd - block level duplication of a device
* clonezilla
* bacula - Cross platform full backup solution
* ntfsclone - Capture a NTFS filesystem
* cloud services
** amazon cloud - (S3 or glacier storage)
** tarsnap - secure backups
** Backblaze - Cross platform carbonite clone

! Assignment

<<tiddler [[Lab G - Backups & Cron]]>>
Like most wikis, TiddlyWiki supports a range of simplified character formatting:
| !To get | !Type this |h
| ''Bold'' | {{{''Bold''}}} |
| ==Strikethrough== | {{{==Strikethrough==}}} |
| __Underline__ | {{{__Underline__}}} (that's two underline characters) |
| //Italic// | {{{//Italic//}}} |
| Superscript: 2^^3^^=8 | {{{2^^3^^=8}}} |
| Subscript: a~~ij~~ = -a~~ji~~ | {{{a~~ij~~ = -a~~ji~~}}} |
| @@highlight@@ | {{{@@highlight@@}}} |
| Tiddler Comments | {{{/%}}} text {{{%/}}}. |
| [[Make me a tiddler]] | {{{[[Make me a tiddler]]}}} |
| ~NoTiddler | {{{~NoTiddler}}} |
| {{{This is monotype}}} | {{{{{{This is monotype}}}}}} |

*sample:
|!th1111111111|!th2222222222|
|>| colspan |
| rowspan |left|
|~| right|
|bgcolor(#a0ffa0):colored| center |
|caption|c
For advanced effects, you can control the CSS style of a table by adding a row like this:
{{{
|cssClass|k
}}}


<<<
The highlight can also accept CSS syntax to directly style the text:
@@color:green;green coloured@@
@@background-color:#ff0000;color:#ffffff;red coloured@@
@@text-shadow:black 3px 3px 8px;font-size:18pt;display:block;margin:1em 1em 1em 1em;border:1px solid black;Access any CSS style@@
<<<
!!@@display:block;text-align:center;centered text@@

//For backwards compatibility, the following highlight syntax is also accepted://
{{{
@@bgcolor(#ff0000):color(#ffffff):red coloured@@
}}}
@@bgcolor(#ff0000):color(#ffffff):red coloured@@

/*{{{*/

@@color(yourcolorhere):colored text@@
@@color(fuchsia):colored text@@
@@bgcolor(yourcolorhere):your text here@@

[img[title|filename]]
[img[filename]]
[img[title|filename][link]]
[img[filename][link]]
[[text|url]]
[[Existing Tiddler Name|UglyTiddlerName]]

<<macro>>
<hr> = ----

*Entry One
**Sub-entry A
***Sub-sub-entry i
***Sub-sub-entry ii
**Sub-entry B
*Entry Two
*Entry Three
Use number signs (#'s) instead of asterisks for <OL type=1>

Tables:
|!Headings: add an exclamation point (!) right after the vertical bar.|!Heading2|!Heading3|
|Row 1, Column 1|Row 1, Column 2|Row 1, Column 3|
|>|>|Have one row span multiple columns by using a >|
|Have one column span multiple rows by using a ~|>| Use a space to right-align text in a cell|
|~|>| Enclose text in a cell with spaces to center it |
|>|>|bgcolor(green):Add color to a cell using bgcolor(yourcolorhere):|
|Add a caption by ending the table with a vertical bar followed by a c|c

!Header 1
!!Header 2
!!!Header 3
!!!!Header 4
!!!!!Header 5


Here's the code for a blockquote:
<<<
Here's the quoted text.
<<<

/*}}}*/

!Links
[[Calendar generator|http://zrenard.com/tiddlywiki/cal.php]]



Entities in HTML documents allow characters to be entered that can't easily be typed on an ordinary keyboard. They take the form of an ampersand (&), an identifying string, and a terminating semi-colon (;). There's a complete reference [[here|http://www.htmlhelp.com/reference/html40/entities/]]; some of the more common and useful ones are shown below. Also see [[Paul's Notepad|http://thepettersons.org/PaulsNotepad.html#GreekHtmlEntities%20HtmlEntitiesList%20LatinHtmlEntities%20MathHtmlEntities]] for a more complete list.

|>|>|>|>|>|>| !HTML Entities |
| &amp;nbsp; | &nbsp; | no-break space | &nbsp;&nbsp; | &amp;apos; | &apos; | single quote, apostrophe |
| &amp;ndash; | &ndash; | en dash |~| &amp;quot; | &quot; | quotation mark |
| &amp;mdash; | &mdash; | em dash |~| &amp;prime; | &prime; | prime; minutes; feet |
| &amp;hellip; | &hellip; |	horizontal ellipsis |~| &amp;Prime; | &Prime; | double prime; seconds; inches |
| &amp;copy; | &copy; | Copyright symbol |~| &amp;lsquo; | &lsquo; | left single quote |
| &amp;reg; | &reg; | Registered symbol |~| &amp;rsquo; | &rsquo; | right  single quote |
| &amp;trade; | &trade; | Trademark symbol |~| &amp;ldquo; | &ldquo; | left double quote |
| &amp;dagger; | &dagger; | dagger |~| &amp;rdquo; | &rdquo; | right double quote |
| &amp;Dagger; | &Dagger; | double dagger |~| &amp;laquo; | &laquo; | left angle quote |
| &amp;para; | &para; | paragraph sign |~| &amp;raquo; | &raquo; | right angle quote |
| &amp;sect; | &sect; | section sign |~| &amp;times; | &times; | multiplication symbol |
| &amp;uarr; | &uarr; | up arrow |~| &amp;darr; | &darr; | down arrow |
| &amp;larr; | &larr; | left arrow |~| &amp;rarr; | &rarr; | right arrow |
| &amp;lArr; | &lArr; | double left arrow |~| &amp;rArr; | &rArr; | double right arrow |
| &amp;harr; | &harr; | left right arrow |~| &amp;hArr; | &hArr; | double left right arrow |

The table below shows how accented characters can be built up by subsituting a base character into the various accent entities in place of the underscore ('_'):

|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>|>| !Accented Characters |
| grave accent | &amp;_grave; | &Agrave; | &agrave; | &Egrave; | &egrave; | &Igrave; | &igrave; | &Ograve; | &ograve; | &Ugrave; | &ugrave; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| acute accent | &amp;_acute; | &Aacute; | &aacute; | &Eacute; | &eacute; | &Iacute; | &iacute; | &Oacute; | &oacute; | &Uacute; | &uacute; | &nbsp; | &nbsp; | &Yacute; | &yacute; | &nbsp; | &nbsp; |
| circumflex accent | &amp;_circ; | &Acirc; | &acirc; | &Ecirc; | &ecirc; | &Icirc; | &icirc; | &Ocirc; | &ocirc; | &Ucirc; | &ucirc; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| umlaut mark | &amp;_uml; | &Auml; | &auml; |  &Euml; | &euml; | &Iuml; | &iuml; | &Ouml; | &ouml; | &Uuml; | &uuml; | &nbsp; | &nbsp; | &Yuml; | &yuml; | &nbsp; | &nbsp; |
| tilde | &amp;_tilde; | &Atilde; | &atilde; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Otilde; | &otilde; | &nbsp; | &nbsp; | &Ntilde; | &ntilde; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| ring | &amp;_ring; | &Aring; | &aring; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| slash | &amp;_slash; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Oslash; | &oslash; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; |
| cedilla | &amp;_cedil; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &nbsp; | &Ccedil; | &ccedil; |

<HTML><a href="http://checkettsweb.com/#%5B%5BCSS-Colors%20and%20Backgrounds%5D%5D%20%5B%5BCSS-Text%20and%20Fonts%5D%5D%20OldStyleSheet%20Rin%20%5B%5BTiddlyWiki%20Structure%5D%5D">CSS Info</a></html>

<<version>>
[[Plugins]]
[[Styles]]
[[TagglyTagging]]
[[systemConfig]]
[[systemTiddler]]
[[excludeSearch]]
[[excludeLists]]
! Material

This page will discuss two topics:

1. Authenticating to Unix systems with SSH keys
2. Terminal multiplexing with GNU {{Command{screen}}}

These are both optional, but good to know and will make working with our lab systems much easier.


!! 1. Authenticating to Unix systems with SSH keys

Two mechanisms exist for SSH authentication:  
# normal passwords 
# key pairs used in asymmetric encryption
A key pair contains a private key that you keep secure and a public key that is distributed to the systems you have permission to connect to.  The private key you have is used to establish your identity. The presence of your public key on a remote system is used to establish your authorization to access it.  Private keys should be secured with a passphrase to ensure they cannot be maliciously used if they are captured by an attacker.  SSH authentication with passphrase-protected key pairs is much safer than passwords, since now an attacker must also capture the private key file in order to impersonate you.  For this reason, it is common to minimally block password authentication to a server when logging in as root or ideally only allow key authentication for all users.  More sensitive systems should require key-based authentication as part of general system hardening.  

Forcing key-based authentication gives us multi-factor authentication (MFA) when the key is properly secured with a passphrase:
# Something you have (the private key)
# Something you know (the key's passphrase)


We begin by creating a SSH keypair on the class shell server.  

{{Command{cd ~/.ssh/}}}
<<<
Change to the ~/.ssh/ directory, the default location for a user's ssh configuration files.
<<<

{{Command{ssh-keygen -t ed25519 -f  ncs205 }}}
<<<
Create a SSH key pair using default settings, except for changing the key type to ed25519 and naming the key ncs205.  The algorithm and key size can also be adjusted via flags.  The remaining defaults are reasonable.  You will be prompted to set a passphrase.  Choose something secure which you can remember.  This [[xkcd cartoon|https://xkcd.com/936/]] may be helpful.  The more entropy the better.
<<<

{{Command{ssh-copy-id -i ncs205 root@192.168.12.''//x//''}}}
<<<
Copy your public key to each of your ~VMs.  It will be saved to the file {{File{~/.ssh/authorized_keys}}} on the remote system (your VM).  The administrator may have to add the key for you on systems you're not able to log into yet.
<<<

{{Command{ssh -l root 192.168.12.''//x//''}}}
<<<
Try to connect to your test VM.  You should be prompted for a password since our private key is not in the default location and was not specified on the command line.
<<<

{{Command{ssh -i ncs205 -l root 192.168.12.''//x//''}}}
<<<
You should now be prompted for your SSH passphrase instead of password.  If an available and authorized SSH key is found it will be offered for use instead of your password.  Authentication will fall back to regular password if key-based fails.
<<<

{{Command{exit}}}
<<<
Disconnect from your VM
<<<

Having to specify the username and key file to use for each login to your ~VMs can be eliminated by using a ssh client configureation.  Edit {{File{~/.ssh/config}}} on the shell server and set a default username and ssh key for class ~VMs

Edit the file {{File{~/.ssh/config}}} and add the following:
{{{
Host test
	HostName 192.168.12.x
        
Host www
	HostName 192.168.12.x

Host *
	IdentityFile ~/.ssh/ncs205
	User root
}}}
Be sure to change the x above to your actual IP address.  This addition will also eliminate the need for specifying full IP addresses for each connection.  You'll be able to then connect with just {{Command{ssh //hostname//}}} and the IP address, user, and key file will be added for you.  Add new ~VMs to the config as they are issued to you.


!!! SSH agent - Unlock your key once for multiple connections

The SSH agent is a keyring which your SSH private keys can be attached to.  Once set up, future connections will look to that key ring when an authentication request is made instead of prompting you for your SSH passphrase each time.  The idea is one authentication event for many remote connections.

{{Command{ssh-agent > ~/.ssh/env}}}
<<<
Create a SSH agent, saving the environment information to the specified file.  This environment must be imported in order to make use of the agent.
<<<

{{Command{eval `cat ~/.ssh/env`}}}
<<<
Import the environment settings into the current shell environment
<<<

{{Command{ssh-add ~/.ssh/ncs205}}}
<<<
Add your ncs205 private key to your ssh agent keyring.  You should be prompted for its passphrase.
<<<

Once the SSH agent is established you may communicate to your lab systems without being prompted to authenticate each time.  Notice the lack of passphrase prompts:

{{Commands{
[merantn@shell ~]$ ''ssh test''
Last login: Mon Oct 19 15:18:26 2020 from 192.168.12.10

[root@test ~]# ''exit''
logout
Connection to 192.168.12.24 closed.

[merantn@shell ~]$ ''ssh www''
Last login: Mon Oct 19 15:19:51 2020 from 192.168.12.10

[root@www ~]# ''exit''
logout
Connection to 192.168.12.25 closed.
}}}


!! 2. Terminal multiplexing with GNU screen

GNU {{Command{screen}}} is a very useful tool for those working with the command line on many systems from different locations on a daily basis.  From within {{Command{screen}}}, connections can be made to many systems.  The user can detach from the screen session, change physical locations, and reconnect to their screen session continuing work where they left off.  GNU {{Command{screen}}} and ssh agents make a great combination for connecting to multiple machines over the course of your work day.

This video might help get you started:  https://www.youtube.com/watch?v=Mw6QvsChxo4

{{Command{cp ~merantn/.screenrc ~/}}}
<<<
Copy this default screen configuration file to your home directory.  This will establish some baseline settings.
<<<

If you first run the steps in Section 1 to set up ssh-agent and then launch {{Command{screen}}} to start your screen instance, your SSH Agent will be established for all screen windows.  You thus will not need to authenticate to your ~VMs as you move between them.  You will only need to run the {{Command{screen}}} command without any options once.  It will stay active with your tasks running in the background until you either terminate it or the class shell server restarts.

Screen commands:
| !Key Sequence | !Action |
| ~CTRL-a , 0 |Switch to window 0|
| ~CTRL-a , 1 |Switch to window 1|
| ~CTRL-a , 2 |Switch to window 2|
| ~CTRL-a , //n// |Switch to window //n//|
| ~CTRL-a , c |Create a new screen window|
| ~CTRL-a , " |Display available screen windows|
| ~CTRL-a , ' |Switch to a screen window by number|
| ~CTRL-a , A |Title the current screen window|
| ~CTRL-a , ? |Display screen help|

With screen now running, enter these screen commands to get things set up:
* Create a new window:  {{Command{~CTRL-a, ~CTRL-c}}}
* Switch to window 1: {{Command{~CTRL-a, 1}}}
** Connect to your test VM with ssh
* Create a new window:  {{Command{~CTRL-a, ~CTRL-c}}}
* Switch to window 2: {{Command{~CTRL-a, 2}}}
** Connect to your www VM with ssh
* Switch to window 0:  {{Command{~CTRL-a, 0}}}
** Use this window to work on the class shell server
* Detach from screen (as if you're done working for the day):   {{Command{~CTRL-a, d}}}
* Reconnect to your screen session (as though you're coming back later to continue work):  {{Command{screen -dr}}}

Now, when you disconnect from the shell server, all of your tasks will stay running in the background.  Log in again and run   {{Command{screen -dr}}} to continue where you left off.  Create new windows inside of screen as you need them for new ~VMs or to run additional tasks concurrently. 

More screen commands:
| !Key Sequence | !Action |
| ~CTRL-a , &#124; |Split window vertical|
| ~CTRL-a , S |Split window horizontal|
| ~CTRL-a , TAB |Switch between split windows|
| ~CTRL-a , X |Close a split window|
|>|>|
| ~CTRL-a , d |Detach from screen|
| ~CTRL-a , :password |Set a password for your screen session|


{{Note{[[This video|SSH]] may be a helpful demonstration}}}


3. Defeating firewalls with SSH to access protected resources

See the [[Tunnels & Proxies with SSH]] page.


/%

!! 1. Authenticating to Unix systems with SSH keys

Two different sets of keys are used with SSH:  one for securing communication between the client and server and, optionally, a set to authenticate remote users.  

!!! SSH Host keys

* Public key crypto is used for encrypting communication between client and server
* Server keys are stored in the files {{File{/etc/ssh/ssh_host_*}}}
* Fingerprints for new systems are shown and stored in the user's {{File{~/.ssh/known_hosts}}} file.  This keeps a record of trusted systems.
** This file can leak identities of systems you are communicating with
** Hash your current known hosts file if you'd like to mask the systems: {{Command{ ssh-keygen -H }}}
* Fingerprints for known systems are compared on each login to identify MITM attacks
** The user is alerted if a mismatch is found
*** This is the warning you see if you connect to a new system for the first time or there's a server change when connecting to an existing system.
** The user should take steps to verify the host key has legitimately changed.  If this change is due to a MITM attack, the attacker could capture your credentials
** Display the fingerprint of a SSH public key: {{Command{ssh-keygen -lf  //file//.pub}}}

!!!! Demo:

{{Monospaced{
[merantn@shell ~]$ ''ssh head.ncs205.net''
The authenticity of host 'head.ncs205.net (192.168.12.15)' can't be established.
ECDSA key fingerprint is ~SHA256:bHKouQIItQNr5r1Im3tI0uk2ArpfYU1Yvop0SQhOLVY.
ECDSA key fingerprint is ~MD5:9f:0d:9c:2d:f6:2c:ef:9e:6a:bb:ab:e5:4b:c5:55:e4.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'head.ncs205.net' (ECDSA) to the list of known hosts.

# You can't log into this system, so press ~CTRL-C to abort:
merantn@head.ncs205.net's password:

# Here's the fingerprint of this system:
[merantn@shell ~]$ ''grep head ~/.ssh/known_hosts''
head.ncs205.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBhZIx/NElfvUL0nI/KwOotqk5Fypf01LQpn8YIe7FfXI8xnwEzESmqZTOiC791SrvOaoIxIFu9WW9xO7+BcgSw=

# Hash the hosts in the file:
[merantn@shell ~]$ ''ssh-keygen -H''
/home/merantn/.ssh/known_hosts updated.
Original contents retained as /home/merantn/.ssh/known_hosts.old
WARNING: /home/merantn/.ssh/known_hosts.old contains unhashed entries
Delete this file to ensure privacy of hostnames

Now a grep returns no results:
[merantn@shell ~]$ ''grep head ~/.ssh/known_hosts''
}}}

%/

! Additional Details

This video is a deep dive into SSH and has a lot of great info:  https://www.youtube.com/watch?v=fnkG9_jy2qc
Type the text for 'excludeLists'
Type the text for 'excludeSearch'
!!!! [[hack3 break-in]]
A VM named ''hack3'' has just been added for each of you.  It's presently powered on and assigned to the 7th IP address of your range via DHCP.

This VM has information which can be found and implants which can be exploited to gain further access.  All required knowledge is based on our class material.  Properly utilizing SSH will yield the first flag and an implant can be exploited to ultimately obtain root-level access.   

Access hack3 for a little CTF (capture the flag).  These CTF challenges are a fun way to demonstrate skill, creativity, and understanding of the material.
* This VM is currently running on the 7th IP address of your range
** Create an A record so host name hack3 points to this IP address
* The ultimate objective is to break into the system and gain full root privileges.
* Flags to capture are in the following files:
** {{File{flag1.txt}}}
** {{File{/root/flag2.txt}}}
** The "flag" to capture is the string of text located inside each of those four files.  It will look something like this:  {{Monospaced{ ''flag{//sometext//}'' }}}

What's the highest number flag you can access?  Capturing both shows you have root-level access and have fully taken over the system.

The flags can only be obtained by interacting with services running on the VM.  
* Don't overthink it; the flags only require basic interaction with the system
* Everyone seems to first gravitate towards brute-force tactics. This is the path of the unskilled. These are actually rarely successful when targeting a specific system or account and will not help you here.

This challenge will primarily draw from material covered in [[Week 14, Part 1]] (Linux Firewalls), [[Week 13, Part 2]] (Access control and user management), [[Week 13, Part 1]] (Scheduled Tasks), and Section 1 of &nbsp;[[Working more efficiently with GNU screen & SSH keys]] (Authenticating to Unix systems with SSH keys)
* And will require a bit of creativity
* Linux Administration Chapter 23 also contains a lot of good info on SSH

Most system intrusions take advantage of misconfiguration to gain a higher level of access.  Developing an understanding of how things work is necessary for a defender to properly configure (and thus secure) their systems.  Understanding how things work also makes it easier for an attacker to exploit any misconfigurations.  

The first two flags will be found through basic system discovery & information disclosure.  Flag 2 will be accompanied by your key into the VM. The final flag and full system compromise will be obtained by exploiting a system misconfiguration.

The path to full system compromise is linear.  All flags will need to be obtained in order.  There is only one route to obtain the first three flags.  There are two different ways to obtain the fourth.  Can you find both misconfigurations which will grant root access?

!!!! Grading
The entire Final Exam will be worth 100 points.  Part 1 will account for 20 of those points.  The point breakdown for the flags and responses in Part 1 is:

* Flags 1 & 2: 5 points each
* Flag 3: 2 points
* Flag 4: 3 points
* Page 3 question: 5 points

Standard rules for lab assignments apply.  The deliverable PDF must contain your name at the top of the first page and must be properly submitted; it must be uploaded to the correct directory on the class shell server and given the correct filename.  These skills were part of the material this semester and are thus in scope for the Final exam content.  No points will be awarded for Part 1 if these requirements are not completed correctly.  The grading queue is visible and can be used as verification that the deliverable PDF was properly collected for evaluation.

!!!! Deliverable:
Complete and submit [[Final Exam Part 1|exam/final-part1.pdf]] with your steps and flag contents.  Upload the PDF to the class shell server to the directory {{File{/opt/pub/ncs205/submit/final/}}}.  The file name must be {{File{ncs205-final-//username//.pdf}}}.

@@ ''The Part 1 write-up will be due by EOD on Thursday, May 5'' @@

/%
Record your steps and the contents of the flags to your home directory on the class shell server: {{File{ ~/ncs205/labs/ncs205-lab56-//username//.txt }}}
 - File line #1 is your name
 - File line #2 is the output of the highest flag you were able to obtain
 - Then list your steps on the following lines

Be sure to chmod the file 600
%/
!!! [[hack3 break-in]]

A VM named {{Monospaced{''hack3''}}} has just been added for each of you.  It's presently powered on and assigned to the 7th IP address of your range via DHCP.  

You do not have console access and will not see it in your list of ~VMs in the Proxmox web UI.

This VM has information which can be found and vulnerabilities which can be exploited to gain root access to the system.  All required knowledge is based on our class material.  

There's flags to capture as your intrusion progresses to show you have increasingly gained access and show how far you were able to get.  

!!!! Scenario:
In professional attack organizations (eg: nation states), phases of the cyber kill chain may be completed by separate, more specialized teams.  One team may carry out reconnaissance of the target, another may perform the initial intrusion, who will hand off to another team to maintain persistence and perform data exfiltration.

An intrusion team who broke into this system left a basic implant for later use by the command & control team.  Notes can be found in Jimbo's home directory.  Examine their implant and utilize it to gain root privileges on the system.

jimbo's password can be found in your home directory on the class shell server in {{File{~/hack3.txt}}}.
* File format is {{Monospaced{ username }}} / {{Monospaced{ password }}}


!!!! Objective
Access {{Monospaced{''hack3''}}} for a little CTF (capture the flag).  These CTF challenges are a fun way to demonstrate skill, creativity, and understanding of the material.
* This VM is currently running on the 7th IP address of your range
** Create an {{Monospaced{A}}} record so host name {{Monospaced{''hack3''}}} points to this IP address
* The ultimate objective is to access the system and gain full root privileges.
* Flags to capture are in the following files:
** ''Flag 1'': Examine the contents of the home directory belonging to user {{Monospaced{jimbo}}} to find the first flag.
** ''Flag 2'': The previous file will also contain a hint for where you can find the second flag.
** ''Flag 3'': Examine the file {{File{/root/flag3.txt}}}
** The "flag" to capture is the string of text located inside each of those three files.  It will look something like this:  {{Monospaced{ @@ ''flag{//sometext//}'' @@ }}}

What's the highest number flag you can access?  Capturing all three shows you have root-level access and have fully taken over the system.
 - File format is {{Monospaced{ username }}} / {{Monospaced{ password }}}


!!! Grading
The entire Final Exam will be worth 100 points.  Part 1 will account for 20 of those points.  The point breakdown for the flags and responses in Part 1 is:

* Flags 1, 2, & 3: 5 points each
* Page 3 question: 5 points

Standard rules for lab assignments apply.  The deliverable PDF must contain your name at the top of the first page and must be properly submitted; it must be uploaded to the correct directory on the class shell server and given the correct filename.  These skills were part of the material this semester and are thus in scope for the Final exam content.  ''No points will be awarded for Part 1 if these requirements are not completed correctly.''  The grading queue is visible and can be used as verification that the deliverable PDF was properly collected for evaluation.

!!!! Deliverable:
Complete and submit [[Final Exam Part 1|exam/final-part1.pdf]] with your steps and flag contents.  Upload the PDF to the class shell server to the directory {{File{/opt/pub/ncs205/submit/final/}}}.  The file name must be {{File{ncs205-final-//username//.pdf}}}.

@@ ''The Part 1 write-up will be due by EOD on Thursday, December 15'' @@


!!! Continued work

If you enjoyed the puzzle and challenge, there's plenty more to try out:

* https://overthewire.org/wargames/ - Learn and practice security concepts through their online wargames.  Bandit fits into the material for this course.
* https://www.vulnhub.com/ - Vulnerable virtual machines created by the community that you can download, set up locally, and break into.  Or create and contribute one of your own.
* https://tryhackme.com/ - Hands-on security training through real-world scenarios
* https://www.hackthebox.com/ - Online security challenges and training
* In person ~CTFs at security conferences, like [[B-Sides Rochester|https://bsidesroc.com/]]

/%
Record your steps and the contents of the flags to your home directory on the class shell server: {{File{ ~/ncs205/labs/ncs205-lab56-//username//.txt }}}
 - File line #1 is your name
 - File line #2 is the output of the highest flag you were able to obtain
 - Then list your steps on the following lines

Be sure to chmod the file 600
%/
config.macros.listTags = { text: "Hello" };
config.macros.listTags.handler = function(place,macroName,params)
{
	var tagged = store.getTaggedTiddlers(params[0],params[1]);
//<< Second parameter is field to sort by (eg, title, modified, modifier or text)
	var ul = createTiddlyElement(place,"ul",null,null,"");
	for(var r=0;r<tagged.length;r++)
	{
		var li = createTiddlyElement(ul,"li",null,null,"");
		createTiddlyLink(li,tagged[r].title,true);
	}
}
/***
|''Name:''|Plugin setDefaults|
|''Version:''|1.0.1 (2006-03-16)|
|''Source:''|http://tiddlywikitips.com/#%5B%5BPlugin%20setDefaults%5D%5D|
|''Author:''|Jim Barr (jim [at] barr [dot] net)|
|''Licence:''|[[BSD open source license]]|
|''TiddlyWiki:''|2.0|
|''Browser:''|Firefox 1.0.4+; Firefox 1.5; InternetExplorer 6.0|
!Description

These settings simply set "default" values for several system features and Plugins.
***/

/***
Standard settings:
***/
//{{{
config.options.chkRegExpSearch         = false;         // default false
config.options.chkCaseSensitiveSearch  = false;         // default false
config.options.chkAnimate              = false;          // default true
//config.options.txtUserName             = "Nick";    // default "YourName"
config.options.chkSaveBackups          = false;          // default true
config.options.chkAutoSave             = false;          // default false
config.options.chkGenerateAnRssFeed    = false;         // default false
config.options.chkSaveEmptyTemplate    = false;         // default false
config.options.chkOpenInNewWindow      = true;          // default true
config.options.chkToggleLinks          = false;         // default false
config.options.chkHttpReadOnly         = true;         // default true
config.options.chkForceMinorUpdate     = false;         // default false
config.options.chkConfirmDelete        = true;          // default true
config.options.txtBackupFolder         = "";            // default ""
config.options.txtMainTab              = "tabTimeline"; // default "tabTimeline"
config.options.txtMoreTab              = "moreTabAll";  // default "moreTabAll"
config.options.txtMaxEditRows          = "30";          // default "30"
config.options.chkInsertTabs = true;    		// tab inserts a tab when editing a tiddler

//}}}

/***
Custom Plugin settings:
***/
//{{{
config.options.chkSinglePageMode       = false;          // default "true"
config.options.chkSearchTitlesFirst       = true;
config.options.chkSearchList           = true;           // default "false"
config.messages.messageClose.text      = "X";           // default "close"
// config.views.wikified.defaultText      = "";            // default "The tiddler '%0' doesn't yet exist. Double-click to create it"
config.options.chkStepWiseNavigationOn = true;           // default "false"
config.options.chkDisableAutoSelect       =true;
config.options.chkTextAreaExtensions    =true;
//}}}
Type the text for 'systemConfig'
Type the text for 'systemTiddler'