commit 7e493aeae4cd8d209de627536301fb3774587afc parent 12aae4ebcdb2863a654c2723fb4956877763e64d Author: pyratebeard <root@pyratebeard.net> Date: Mon, 12 Nov 2018 11:00:24 +0000 moved all of content from old wiki Diffstat:
47 files changed, 1935 insertions(+), 4 deletions(-)
diff --git a/coding/git.md b/coding/git.md @@ -0,0 +1,16 @@ +[fork_and_branch_workflow](fork_and_branch_workflow) + +undo merge that hasn't been pushed +```zsh +git reset --merge HEAD~1 +``` + +roll back hard +``` +git reset --hard <commit/tag> +``` + + +## ref +- :1: https://www.atlassian.com/git/tutorials/making-a-pull-request +- :2: https://git-scm.com/book/en/v2/Git-Branching-Rebasing diff --git a/coding/svn.md b/coding/svn.md @@ -0,0 +1,14 @@ +# svn +[cheatsheet](https://www.cheatography.com/davechild/cheat-sheets/subversion/) +[cheatsheet2](https://www.abbeyworkshop.com/howto/misc/svn01/) + +svn checkout --username=dudley.burrows http://code.ward.ie/svn/support-tools/trunk support-tools + +svn status + +svn update /path + +# only need add for new directories or files +svn add /path|file + +svn commit -m "message" /path|file diff --git a/coding/vim.md b/coding/vim.md @@ -0,0 +1,94 @@ +# vim + +## toc +1. [copy](#copy contents to/from files) +2. [text wrap](#text wrap) +3. [spell](#spell) +4. [buffers](#buffers) + +## copy contents to/from files +to copy the contents into a file + ``` + :r <filename> + ``` +or + ``` + :"qY # yank out of first file + :"qP # put into second file + ``` +or read range of lines + ``` + :r! sed -n <n>,<m>p /path/to/file.md + ``` + +to copy the contents to a new file + ``` + :<n>,<m> w <filename> + ``` + where `<n>` and `<m>` are numbers or symbols that designate range of lines + + +## text wrap +(wrap to column) +wrap current line +``` +gqq +``` + +wrap entire file +``` +gqG +``` + +wrap paragraph using visual selection +``` +V}gq +``` + +also use visual or visual block with `gq` + +## spell +[ref_1](#ref#1) +check spelling +``` +Spell +``` + +move to word +``` +]s [s +``` + +and see results +``` +z= +``` + +turn off highlighting +``` +set nospell +``` + +## buffers +- buffer [faq](#ref#2) +- using vanilla vim [answer](#ref#3) +- buffers over tabs [answer](#ref#4) +- open buffer + ``` + :e <filename> + :new + ``` +- view buffers + ``` + :ls + ``` +- switch buffers + ``` + :buffer <num> + ``` + +## ref +- :1: https://www.linux.com/learn/using-spell-checking-vim +- :2: http://vim.wikia.com/wiki/Vim_buffer_FAQ +- :3: https://stackoverflow.com/questions/16082991/vim-switching-between-files-rapidly-using-vanilla-vim-no-plugins +- :4: https://stackoverflow.com/questions/26708822/why-do-vim-experts-prefer-buffers-over-tabs diff --git a/documentation/pandoc.md b/documentation/pandoc.md @@ -0,0 +1,9 @@ +# pandoc + +## curl webpages to man [ref 1](#ref#1) +``` +curl http://webpage.com | pandoc -s -f html -t man | man -l - +``` + +## ref +- :1: https://nixers.net/showthread.php?tid=1679&pid=15789&highlight=pandoc#pid15789 diff --git a/documentation/vimwiki.md b/documentation/vimwiki.md @@ -0,0 +1,115 @@ +# vimwiki + +1. [commands](#commands) + +## commands +`=` increase header +`-` decrease header +`C-space` check todo box +`glh` step down list +`gll` step up list +`\w\t` day diary in new tab +`\w\i` on diary index generate new links +`\wt` open wiki index in tab (only after mapping `nmap <Leader>wt <Plug>VimwikiTabnewLink`) +`\wi` open diary index + +syntax highlighting + +```bash +echo hello friend +``` + +pandoc -s -f markdown -t html -o uofu/azure_ad_requirements.html uofu/azure_ad_requirements.md && firefox uofu/azure_ad_requirements +.html + +anchors + use [title] (#anchor#subanchor) + i'm using :tags: under a #ref header to jump to links in page + - switch to [pandoc markdown][] + +## gollum +- install gollum + ``` + sudo gem install gollum + ``` + - requires 'icu' + ``` + sudo dnf install libicu-devel + ``` +- navigate to wiki dir and run gollum + ``` + gollum --css --h1-title + ``` + - using custom css file and setting first h1 header as page title + - see [gollum config][] docs for options +- look at [omnigollum][] for user auth +- [gollum reverse proxy][] + +## installing gollum on jump-test +https://github.com/gollum/gollum # use --bare + +- configure omnigollum https://github.com/arr2036/omnigollum +- using azuread https://github.com/AzureAD/omniauth-azure-activedirectory + +### steps +- to enable [git server][] + - install git (duh!) + - create 'git' user + ``` + adduser git + mkdir ~git/.ssh + chmod 700 ~git/.ssh + touch ~git/.ssh/authorized_keys + chmod 600 ~git/.ssh/authorized_keys + ``` +- mkdir 'wiki' dir + ``` + mkdir /wiki + chown git. /wiki + ``` +- create git repo - [getting git on server][] + ``` + su - git + cd /wiki + git init --bare --shared + ``` +- clone repo and initial commit (on local machine) + ``` + git clone git@gitserver:/wiki wiki + cd wiki/ + echo "# wiki" > README.md + git add README.md + git commit -m "Initial commit" + git push -u origin master + ``` +- install gollum + ``` + apt install ruby ruby-dev build-essential zlib1g-dev libicu-dev + gem install gollum + ``` + + ``` + yum group install "Development Tools" + yum install ruby ruby-devel libicu libicu-devel zlib zlib-devel + gem install gollum + ``` +- enable gollum as a service + ``` + vi /etc/systemd/system/gollum.service + + [Service] + ExecStart=/usr/local/bin/gollum --show-all "/wiki" + ``` +- allow traffic + ``` + iptables -I INPUT -p tcp --dport 4567 -j ACCEPT -m comment --comment "Allow access to wiki" + ``` + + +## ref +[pandoc markdown]: http://pandoc.org/MANUAL.html#pandocs-markdown +[gollum config]: https://github.com/gollum/gollum#configuration +[omnigollum]: https://github.com/arr2036/omnigollum/blob/master/config.rb.example +[gollum reverse proxy]: https://gist.github.com/spinpx/c46ea0b24157ca5f731f +[git server]: https://git-scm.com/book/en/v2/Git-on-the-Server-Setting-Up-the-Server +[getting git on server]: https://git-scm.com/book/en/v2/Git-on-the-Server-Getting-Git-on-a-Server diff --git a/index.md b/index.md @@ -1,14 +1,75 @@ # pyratewiki ## content -1. [technology](#technology) -2. [projects](#projects) +1. [coding](#coding) +2. [documentation](#documentation) 3. [misc](#misc) +4. [projects](#projects) +5. [technology](#technology) + 1. [authentication](#authentication) + 2. [cloud](#cloud) + 3. [communication](#communication) + 4. [databases](#databases) + 5. [encryption](#encryption) + 6. [linux](#linux) + 7. [monitoring](#monitoring) + 8. [networking](#networking) + 9. [virtualisation](#virtualisation) +6. [wardwiki](wiki1:index) + +## coding +- [git](coding/git) +- [svn](coding/svn) +- [vim](coding/vim) + +## documentation +- [pandoc](documentation/pandoc) +- [vimwiki](documentation/vimwiki) + +## misc +- [music](misc/music) +- [naming_schemes](misc/naming_schemes) +- [unicode_characters](misc/unicode_characters) +- +## projects +- [pyratenet](projects/pyratenet) +- [startpage](projects/startpage) ## technology +### authentication +- [ldap](technology/authentication/ldap) + +### cloud +- [azure](technology/cloud/azure) +- [docker](technology/cloud/docker) +- [openshift](technology/cloud/openshift) + +### communication +- [ftp](technology/communication/ftp) +- [mutt](technology/communication/mutt) + +### databases +- [mariadb](technology/databases/mariadb) +- [oracle](technology/databases/oracle) + +### encryption +- [gpg](technology/encryption/gpg) + ### linux +- [archlinux](technology/linux/archlinux/index) +- [audio_visual](technology/linux/audio_visual/index) +- [debian](technology/linux/debian/index) +- [general](technology/linux/general/index) +- [redhat](technology/linux/redhat/index) -## projects +### monitoring +- [snmp](technology/monitoring/snmp) + +### networking +- [subnet_cheatsheet](technology/networking/subnet_cheatsheet) +- [wifi_connect](technology/networking/wifi_connect) + +### virtualisation +- [esxi](technology/virtualisation/esxi) -## misc diff --git a/misc/music.md b/misc/music.md @@ -0,0 +1,15 @@ +# music + +- twilight force "flight of the sapphire dragon" +- old wolf "howl" +- old wolf "trail of tears" +- tarchon fist "metal detector" +- vinide "another dimension" +- odr "notte alcolica" +- baphomets blood "command of the inverted cross" +- twins crew "ghost of the seven seas" +- dave esser "in ewigkeit amen" +- when nothing remains "reunited in the grave" +- demons & wizards "the whistler" +- serenity "caught in a myth" +- majesty "die like kings" diff --git a/misc/naming_schemes.md b/misc/naming_schemes.md @@ -0,0 +1,36 @@ +# naming schemes + +## hackers +| name | media | device | +|-----------|-------------------|--------| +| grishenko | (boris) goldeneye | | + +- kaludis (frederick - die hard 4.0) +- salander (lisbeth - tgwtdt) +- nedry (dennis - jurassic park) +- bishop (martin - sneakers) +- flynn (kevin - tron) +- simon j (one point o) + +## heren istarion (order of wizards) + +| middle-earth name | colour | maia name | +|-------------------|---------------------|-----------| +| saruman | white (many colors) | curumo | +| gandalf | grey (white) | olórin | +| radagast | brown | aiwendil | +| morinehtar | blue | alatar | +| rómestámo | blue | pallando | + +## pirate ships +| name | device | +|----------------------|--------| +| black pearl | | +| queen anne's revenge | | +| ranger | | +| walrus | | +| flying dutchman | | +| troubadour | | +| venganza | | +| wicked wench | | +| jackdaw | | diff --git a/misc/unicode_characters.md b/misc/unicode_characters.md @@ -0,0 +1,4 @@ +# unicode characters + + * black skull and crossbones U+1f571 [link](https://unicode-table.com/en/1F571/) + * alchemical symbol for night U+1f76f [link](https://unicode-table.com/en/1F76F/) diff --git a/projects/pyratenet.md b/projects/pyratenet.md @@ -0,0 +1,50 @@ +# pyratenet + +- web +- git +- ftp +- wiki + +## choices +- one vps with docker +- multi vps + +### one vps with docker +look at using [jenkins ci/cd][] for auto deployment + +### multi vps +use [git hooks][] for deployment - see refs + +#### wiki +- using markdown files in git repo +- following 'makefile' to build html files + ```c + MD_FILES=$(shell find . -name \*.md) + HTML_FILES=$(MD_FILES:.md=.html) + BUILD_HTML_FILES=$(HTML_FILES:%=build/%) + + all: $(BUILD_HTML_FILES) + + build/assets/%: assets/% + @mkdir -p $$(dirname $@) + cp $? $@ + + build/%.html: %.md + @mkdir -p $$(dirname $@) + pandoc -o $@ $< + + # run with `make deploy` + deploy: + rsync --recursive --human-readable --delete --info=progress2 \ + build/* user@host:dir + ``` + - run with `make deploy` + +## ref +[jenkins ci/cd]: https://blog.harveydelaney.com/jenkins-build-test-deploy-node-app/ +http://joemaller.com/990/a-web-focused-git-workflow/ +http://codesamplez.com/source-control/deployment-with-git-hook +https://www.digitalocean.com/community/tutorials/how-to-use-git-hooks-to-automate-development-and-deployment-tasks +[git hooks]: https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks +https://www.digitalocean.com/community/tutorials/how-to-set-up-automatic-deployment-with-git-with-a-vps +http://toroid.org/git-website-howto diff --git a/projects/startpage.md b/projects/startpage.md @@ -0,0 +1,7 @@ +# startpage + +## appearance +- comic + +## links to add +- stashmycomics diff --git a/technology/authentication/ldap.md b/technology/authentication/ldap.md @@ -0,0 +1,4 @@ +# ldap + +ldapsearch -v -x -LLL -h host -D "bind DN" -w password -b "OU=" +ldapsearch -v -x -LLL -h tristan.ward.ie:389 -D "CN=apache_svn_ro,OU=Automated System Accounts,OU=Ward Users,DC=ward,DC=ie" -w [password] -b "OU=Ward Users,dc=ward,dc=ie" diff --git a/technology/cloud/azure.md b/technology/cloud/azure.md @@ -0,0 +1,184 @@ +# azure +[aries](https://aries.ward.ie/x/YgFk) + +## toc + + +## ward accounts (keepass) + * dudley@wardazureward.onmicrosoft.com + - non-prod (uat/dev) + * dburrows@warddemo.onmicrosoft.com + - prod + +## personal accounts + * blahthing@icloud.com + * dudleyburrows@ward.ie + +## az cli +[docs](https://docs.microsoft.com/en-gb/cli/azure/get-started-with-azure-cli?view=azure-cli-latest) + +### useful cmds + - show list of resource groups + ``` + az group list --output table + ``` + - list resources in a resource group + ``` + az resource list -g <group_name> --output table + ``` + + +### create an Ubuntu vm called PP-WRD-TEST01 in the group P-WRD-RGRP on the PP-WRD-VNET vnet with no additional network security group, using username dburrows and public key for ssh +az vm create -g P-WRD-RGRP -n PP-WRD-TEST01 --vnet-name PP-WRG-VNET --nsg "" --image UbuntuLTS --ssh-key-value .ssh/dburrows.pub --admin-username dburrows + + + + +install the azure cli command `{{az}}` by running the following +``` +curl -L https://aka.ms/InstallAzureCLI | bash +``` + +once installed login in to your account with +``` +az login +``` + +to switch to a different account run +``` +az logout +``` + +then run the login command again. + +all the following steps _can_ be run from the portal cli as well as your local machine once you have installed {{az}}. + +### changing subscriptions + +check your subscriptions +``` +az account list --output table +``` + +show which subscription you're currently using +``` +az account show +``` + +then to change subscriptions run +``` +az account set --subscription "Ward MS Partner Account" +``` + +### show vm images +``` +az image list +``` + +### getting started + +here is a quick run through of spinning up a [centos](https://www.centos.org/) virtual machine + + - create resource group + ``` + az group create --name D-TST-RGRP --location northeurope + ``` + - create Network Security Group + ``` + az network nsg create --resource-group D-TST-RGRP --name D-TST-LAPP01 + ``` + - create a network rule in an existing security group + ``` + az network nsg rule create --resource-group D-TST-RGRP --nsg-name D-TST-NSGP --name allow-ward --description "Allow all traffic from Ward public range" --access Allow --protocol Tcp --direction Inbound --priority 102 --source-address-prefix "93.94.119.240/28" --source-port-range "*" --destination-address-prefix "*" --destination-port-range "*" + ``` + - create a virtual machine + ``` + az vm create -g D-TST-RGRP -n D-TST-LAPP01 --image CentOS --generate-ssh-keys + ``` + +once the VM is successfully created it will output some json. make note of the "publicIpAddress" value, and use this to {{ssh}} to the server. + +## advanced tools + +the following are a collection of tools which have been played around with. some of these tools may require escalated privileges which your account may not have. if you are unable to action anything and really desperately need to then speak to one of the azure admins. + +you can check your current role with the cli. first you need to make a note of the username for the subscription you're using +``` +az account show +{ + "environmentName": "AzureCloud", + "id": "b1447c5c-db40-4a01-9fc3-d56a740047b5", + "isDefault": true, + "name": "Ward MS Partner Account", + "state": "Enabled", + "tenantId": "89252e5d-2862-422f-8279-7d0a49e32e6a", + "user": { + "name": "dudley@wardazureward.onmicrosoft.com", + "type": "user" + } +} +``` +copy the value from {{"user": "name":}}, then run the following replacing {{<value>}} with the username (usually an email address) +``` +az role assignment list --assignee <value> +[ + { + "id": "/subscriptions/b1447c5c-db40-4a01-9fc3-d56a740047b5/providers/Microsoft.Authorization/roleAssignments/85f38488-d3f3-423b-afb7-77d8cadddd32", + "name": "85f38488-d3f3-423b-afb7-77d8cadddd32", + "properties": { + "principalId": "5ad99088-9605-4a83-843c-71bc51bf53f6", + "principalName": "dudley@wardazureward.onmicrosoft.com", + "roleDefinitionId": "/subscriptions/b1447c5c-db40-4a01-9fc3-d56a740047b5/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c", + "roleDefinitionName": "Contributor", + "scope": "/subscriptions/b1447c5c-db40-4a01-9fc3-d56a740047b5" + }, + "type": "Microsoft.Authorization/roleAssignments" + } +] +``` +your current role is under {{"properties": "roleDefinitionName":}} + +## show all resources in your subscription +``` +az group list --output table +``` + +## deploy a kubernetes cluster + +we add the {{aks}} option to manage azure kubernetes services. Currently aks is only available in west europe +``` +az group create --name D-K8S-RGRP --location westeurope +az aks create --name D-K8S-KCLU --resource-group D-K8S-RGRP --generate-ssh-keys +az aks get-credentials --name D-K8S-KCLU --resource-group D-K8S-RGRP +az aks browse --name D-K8S-KCLU --resource-group D-K8S-RGRP +az aks show --resource-group dudley-container-demo-rg --name dudley-container-demo-clu --query "servicePrincipalProfile.clientId" --output tsv +``` + +## deploy webapp and enable for webhooks +``` +az group create --name webapp-rg -l northeurope +az appservice plan create -g webapp-rg -n webapp-srvplan --is-linux +az webapp create -g webapp-rg -p webapp-srvplan -n webapp -i pyratebeard/container-webhook-demo +az webapp deployment container config -n webapp -g webapp-rg --enable-cd true +az webapp deployment container show-cd-url -n D-TST-APP-SRV -g D-TST-APP-RG +``` + +run script tool on VMs (under 'Operation') + +## create vpn - [ref1][] +* virtual network +* virtual network gateway +* local network gateway +* public ip +* connection (under virtual network gateway) +* vpn not coming up in fortigate + * running network watcher troubleshooting + * need to add address space to connection +* connect through gateway to website (using peering?) + + +[auto tagging][] + +## ref +[ref1]: https://cookbook.fortinet.com/ipsec-vpn-microsoft-azure-54/ +[auto tagging]: https://gallery.technet.microsoft.com/scriptcenter/Automatically-Azure-fc5f1443 diff --git a/technology/cloud/docker.md b/technology/cloud/docker.md @@ -0,0 +1,10 @@ +# docker + +## pull files from inside container +``` +docker cp <container_id>:/path/to/file /path/to/save +``` + +## ref +- :1: https://stackoverflow.com/questions/44027873/how-to-create-a-new-docker-image-from-a-running-container-on-amazon +- :2: https://stackoverflow.com/questions/43699368/configure-docker-daemon-port-to-enable-docker-apis/43713435#43713435 diff --git a/technology/cloud/openshift.md b/technology/cloud/openshift.md @@ -0,0 +1,3 @@ +# openshift + +- 20180913: [red_hat_container_event](red_hat_container_event) diff --git a/technology/cloud/red_hat_container_event.md b/technology/cloud/red_hat_container_event.md @@ -0,0 +1,619 @@ +# rh container event (cloud-native roadshow) + +## ops + +- wifi: + - Ballsridge Hotel + - beourguest2018 +- https://redhat.qwiklab.com/focuses/191 +- lab details + - host master.674462327352.aws.testdrive.openshift.com + - user cloud-user + - pass qwikLABS-L64-23179 + - docs http://support.674462327352.aws.testdrive.openshift.com/ +- [feedback](http://bit.ly/CNRSFeedback) +- additional credits: testdrivetokens <at> redhat <dot> com + +- run cloudforms/oshift web console on oshift (app nodes) - reverse proxy for bastion + +## lab +- navigate to `https://redhat.qwiklab.com/focuses/191` and login + - user: dudley.burrows@ward.ie + - pass: reachfortheclouds +- select 'My Learning' then 'OpenShift for Ops Test Drive' +- Click 'Start Lab' in the top right. Once the lab has been spun up the connection details will appear in the left pane. +- The lab guide URL will also be shown. + + +## presentation + +### oshift overview +- hybrid scaling + - from on-prem to cloud in mins +- jenkins pipeline + - servicenow rest api to 'tick box' before continuing +- kubernetes +- oci compatible container runtime (docker) +- internal container repo in oshift (certified by rh) +- 10x workload density than vms --?? +- ownership boundaries + - dev + - container + - app + - os dependencies + - ops + - container host + - infra +- container image layers + - immutable images (kill and redeploy) +- base image patching + - oshift rebuilds all containers using image stream + - source to image build +- lightweight, oci-compliant container runtime (cri-o --??) + - rhel on node (host) and container + - pod = collection of containers + - smallest unit of management in oshift + - only oci-compliant are supported +- masters (3x) + - can lose all w/out effecting live traffic + - rest api (servicenow to do oshift activites) + - datastore + - desired / current state + - etcd db + - one per master + - sync'd across masters + - ansible playbook bundles instead of bakup (infra as code) + - orchestration and scheduling + - placement by policy + - health/scaling - autoscaling pods + - endpoints put in by devs + - readiness probe + - liveness probe +- infra nodes + - integrated container registry +- persistent storage + - glusterfs +- service layer +- routing layer + - expose services externally + +### container storage +- oshift persistent storage framework + - PersistentVolumeClaim + - submitted by dev + - StorageClass + - set up by ops + - Storage Backend + - PersistentVolmue + - mounted by pod + - bound to PersistentVolumeClaim +- glusterfs + - (app) node labelled as container native storage + - underlying storage: das, jbod + - scale-out linearly + - replicate sync and async + - [heketi](#ref#1) - restful glusterfs management + +- subscription licensing + - not required for master/infra + - only for 'worker' nodes (app nodes) + - based on number of vms or socket pairs + - spotfleets?? + - cloudforms to manage subscriptions? + +### lab +- environment + - master x1 + - infra x1 + - app x6 + - idm x1 (ldap auth) +- ssh into master node +- using ansible playbooks for installing oshift + - part of the `openshift-ansible` pkg +- installers config `/etc/ansible/hosts` [docs](#ref#2) + - general settings under `[OSEv3:vars]` +- top level runbook triggers install of cluster + - `/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml` +- requires 'super admin' account +- [cmds](#oc commands) +- [web_console](https://openshift.674462327352.aws.testdrive.openshift.com/console) +- [prometheus](#ref#3) + - cluster infra monitoring and alerting +- verify storage cluster + ``` + export HEKETI_CLI_SERVER=http://heketi-storage-storage.apps.674462327352.aws.testdrive.openshift.com + export HEKETI_CLI_USER=admin + export HEKETI_CLI_KEY=myS3cr3tpassw0rd + heketi-cli cluster list #shows internal uuid of cns cluster + heketi-cli topology info + ``` +- application management + - create new project (bucket) + - deploy new app (automatically created [service](#ref#6)) + - view service yaml + - scale app + - delete pod + - oshift redeploys in less than 10secs! + - create route (expose service) +- [application probes](#ref#7) + - liveness probe + - readiness probe + - check endpoint health + ``` + curl mapit-app-management.apps.674462327352.aws.testdrive.openshift.com/health + ``` + - probe endpoint for liveness (set probe) + ``` + oc set probe dc/mapit --liveness --get-url=http://:8080/health --initial-delay-seconds=30 + ``` + - probe endpoint for readiness (set probe) + ``` + oc set probe dc/mapit --readiness --get-url=http://:8080/health --initial-delay-seconds=30 + ``` + - confirm + ``` + oc describe dc mapit + ``` + - 'Containers' section +- add storage to app + ``` + oc volume dc/mapit --add --name=mapit-storage -t pvc --claim-mode=ReadWriteMany --claim-size=1Gi --claim-name=mapit-storage --mount-path=/app-storage + ``` + - storage now available at `/app-storage` inside node (rsh log on) +- project request template, quota, limits + - view default template + - modify template + ``` + cat /opt/lab/support/project_request_template.yaml + ``` + - new sections: + - [ResourceQuota](#ref#8) + - [LimitRange](#ref#9) + - install new template + - modify 'master-config.yaml' section 'projectRequestTemplate' + ``` + sudo vim /etc/origin/master/master-config.yaml + ``` + - restart master + ``` + sudo systemctl restart atomic-openshift-master-api atomic-openshift-master-controllers + ``` +- groups + - [external auth providers](#ref#10) + - [role based access control](#ref#11) + - login as normal user + - no projects + - login as 'fancyuser' + - projects are shown + - create 3x new projects (lifecycle) + - ose-teamed-app edit dev and test, view prod + - ose-fancy-dev edit prod + - login is as teamed user to see 3x projects + - create app in prod - fails! + - prometheus + - login as fancyuser1 +- infrastructure management, metrics and logging + - extending cluster + - view app nodes + - uncomment '#scaleup_' in '/etc/ansible/hosts' + - use ansible to verify nodes are online + ``` + ansible new_nodes -m ping + ``` + - run playbook to extend cluster + ``` + ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/openshift-node/scaleup.yml + ``` + - multi master ha setup [docs](#ref#12) + - container-native storage for infra + - required by registry, logging, metrics + - configure installer + ``` + sudo sed -i 's/#cnsinfra_//g' /etc/ansible/hosts + ``` + - install cns cluster for infra + ``` + ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/openshift-glusterfs/config.yml + ``` + - regular file storage service (glusterfs) not supported for logging/metrics + - must use block storage (glusterblock) + - metrics + - based on [hawkular](#ref#13) in a cassandra db + - configure installer + ``` + sudo sed -i 's/#metrics_//g' /etc/ansible/hosts + sudo sed -i '/openshift_metrics_install_metrics=false/d' /etc/ansible/hosts + ``` + - run playbook to install metrics + ``` + ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/openshift-metrics/config.yml + ``` + - logging + - using efk + - elasticsearch (centralplace) + - fluentd (consolidated) + - kibana (ui) + - configure installer + ``` + sudo sed -i 's/#logging_//g' /etc/ansible/hosts + sudo sed -i '/openshift_logging_install_logging=false/d' /etc/ansible/hosts + ``` + - run playbook to install logging + ``` + ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/openshift-logging/config.yml + ``` + - multitenant networking + - [sdn](#ref#14) based on open vswitch + - execute creation script + ``` + bash /opt/lab/support/net-proj.sh + ``` + - get ip of pod b + ``` + bash /opt/lab/support/podbip.sh + ``` + - export pod b ip + ``` + export POD_B_IP=$(bash /opt/lab/support/podbip.sh) + ``` + - get name of pod in netproj-a project and export as var + ``` + oc get pods -n netproj-a + export POD_A_NAME=ose-1-zccsx + ``` + - execute `ping` in pod a try to reach pod b + ``` + oc exec -n netproj-a $POD_A_NAME -- ping -c1 -W1 $POD_B_IP + ``` + - fails because networks aren't connected + - join networks + ``` + oc get netnamespace + oc adm pod-network join-projects netproj-a --to=netproj-b + oc get netnamespace + ``` + - network ids of two projs now the same + - retest connectivity + ``` + oc exec -n netproj-a $POD_A_NAME -- ping -c1 -W1 $POD_B_IP + ``` + - isolate (unjoin) projects + ``` + oc adm pod-network isolate-projects netproj-a + ``` + - use ['NetworkPolicy'](#ref#15) for finer grain + - node maintenance + - mark node as 'non-schedulable' then drain all pods on node + - mark node02 as 'non-schedulable' + ``` + oc adm manage-node node02.internal.aws.testdrive.openshift.com --schedulable=false + ``` + - does not impact running pods + - drain pods on node02 (dryrun first) + - node now ready for maintenance (reboot etc) + - add node back into oshift + ``` + oc adm manage-node node02.internal.aws.testdrive.openshift.com --schedulable=true + ``` + - oshift registry with cns + - uses ephemeral storage in its pod + - restarts or redeployments cause container images lost + - add cns to registry + - add volume + ``` + oc volume dc/docker-registry --add --name=registry-storage -t pvc \ + --claim-mode=ReadWriteMany --claim-size=5Gi \ + --claim-name=registry-storage --claim-class=glusterfs-registry --overwrite + ``` + - verify deploymentconfig + ``` + oc get dc/docker-registry + ``` + - scale registry + ``` + oc scale dc/docker-registry --replicas=3 + ``` +- container-native storage concepts + - login as super admin in 'storage' + ``` + oc login -u system:admin -n storage + ``` + - view pods + ``` + oc get pods -n storage -o wide + ``` + - check service and route + ``` + oc get service,route + ``` + - perform health check on endpoint + ``` + curl -w "\n" http://heketi-storage-storage.apps.674462327352.aws.testdrive.openshift.com/hello + ``` + - login as 'fancyuser1' + ``` + oc login -u fancyuser1 -p openshift + ``` + - create new app + ``` + oc new-project my-database-app + ``` + - view template + ``` + oc get template/rails-pgsql-persistent -n openshift + ``` + - view pvc in template + ``` + oc get template/rails-pgsql-persistent -n openshift -o yaml | grep PersistentVolumeClaim -A8 + ``` + - specify storage size + ``` + oc new-app rails-pgsql-persistent -p VOLUME_CAPACITY=5Gi + ``` + - get route + ``` + oc get route + ``` + - explore underlying cns + - login as system admin + - select 'my-database-app' proj + ``` + oc project my-database-app + ``` + - view pvc + - export pvc name as var + ``` + export PGSQL_PV_NAME=$(oc get pvc/postgresql -o jsonpath="{.spec.volumeName}" -n my-database-app) + ``` + - describe pvc + ``` + oc describe pv $PGSQL_PV_NAME + ``` + - export glusterfs volume name + ``` + export PGSQL_GLUSTER_VOLUME=$(oc get pv $PGSQL_PV_NAME -o jsonpath='{.spec.glusterfs.path}') + ``` + - switch to storage project + ``` + oc project storage + ``` + - view glusterfs pods + ``` + oc get pods -o wide -l glusterfs=storage-pod + ``` + - store first glusterfs pod name and ip as vars + ``` + export FIRST_GLUSTER_POD=$(oc get pods -o jsonpath='{.items[0].metadata.name}' -l glusterfs=storage-pod) + export FIRST_GLUSTER_IP=$(oc get pods -o jsonpath='{.items[0].status.podIP}' -l glusterfs=storage-pod) + echo $FIRST_GLUSTER_POD + echo $FIRST_GLUSTER_IP + ``` + - query gluster pod for volumes (rsh) + ``` + oc rsh $FIRST_GLUSTER_POD gluster volume list + ``` + - query for topology + ``` + oc rsh $FIRST_GLUSTER_POD gluster volume info $PGSQL_GLUSTER_VOLUME + ``` + - export brick dir path + ``` + export PGSQL_GLUSTER_BRICK=$(echo -n $(oc rsh $FIRST_GLUSTER_POD gluster vol info $PGSQL_GLUSTER_VOLUME | grep $FIRST_GLUSTER_IP) | cut -d ':' -f 3 | tr -d $'\r' ) + echo $PGSQL_GLUSTER_BRICK + ``` + - look at brick dir + ``` + oc rsh $FIRST_GLUSTER_POD ls -ahl $PGSQL_GLUSTER_BRICK + ``` + - provide scalable, shared storage w/ cns + - deploy file uploader app + ``` + oc login -u fancyuser1 -p openshift + oc new-project my-shared-storage + oc new-app openshift/php:7.0~https://github.com/christianh814/openshift-php-upload-demo --name=file-uploader + ``` + - view logs to wait for app to be deployed + ``` + oc logs -f bc/file-uploader + ``` + - expose app via route + ``` + oc expose svc/file-uploader + ``` + - scale up for ha + ``` + oc scale --replicas=3 dc/file-uploader + ``` + - upload file to app + - view pods to find where file is located + ``` + oc rsh file-uploader-1-k2v0d ls -hl uploaded + oc rsh file-uploader-1-sz49r ls -hl uploaded + oc rsh file-uploader-1-xjg9f ls -hl uploaded + ``` + - create pvc + ``` + oc volume dc/file-uploader --add --name=my-shared-storage \ + -t pvc --claim-mode=ReadWriteMany --claim-size=1Gi \ + --claim-name=my-shared-storage --mount-path=/opt/app-root/src/uploaded + ``` + - refresh app (new nodes) + - upload new file + - view file across all nodes + - increase vol capacity + - fill up current cap + ``` + oc rsh file-uploader-2-jd22b dd if=/dev/zero of=uploaded/bigfile bs=1M count=1000 + oc rsh file-uploader-2-jd22b df -h /opt/app-root/src/uploaded + ``` + - edit pvc + ``` + oc edit pvc my-shared-storage + ``` + - edit storage size + - oshift updates on exit from vi + - confirm cap + ``` + oc rsh file-uploader-2-jd22b df -h /opt/app-root/src/uploaded + ``` + - providing block storage with cns + - block storage = iscsi lun + - view host running elasticsearch + ``` + oc get pod -l component=es -n logging -o wide + ``` + - view running iscsi session over ssh + ``` + ssh node05.internal.aws.testdrive.openshift.com sudo iscsiadm -m session + ``` +- exposed services + - look at [3scale](#ref#16) for protection + + + +### oc commands +| command | description | +|--------------------------------------------------|-------------------------------------------------| +| `oc login -u system:admin` | login to oshift | +| `oc get nodes` | list of nodes | +| `oc project <proj-name>` | change projects | +| `oc describe statefulset prometheus | describe 'StatefulSet'* | +| `oc describe daemonset prometheus-node-exporter` | ['node-exporter'](#ref#4) ['daemonset'](#ref#5) | +| `oc get routes` | show routes | +| `oc new-project <proj-name>` | create project | +| `oc new-app docker.io/repo/image` | deploy app | + + +*'StatefulSet' is a special kubernetes resource + - deals with containers that have various startup and other dependencies + - a daemonset is another special kubernetes resource. + - it makes sure that specified containers are running on certain nodes + +- show pods + ``` + oc get pods + ``` +- pod information + ``` + oc describe pod <pod-name> + ``` +- show yaml output for pod + ``` + oc get pod <pod-name> -o yaml + ``` +- view pods on node + ``` + oc adm manage-node <node-name> --list-pods + ``` +- show services + ``` + oc get services + ``` +- service information + ``` + oc describe service <service-name> + ``` +- show yaml output for service + ``` + oc get service <service-name> -o yaml + ``` +- show deploymentconfig + ``` + oc get dc + ``` +- show replicationcontroller + ``` + oc get rc + ``` +- scale pods + ``` + oc scale --replicas=2 dc/<label> + ``` +- show endpoints for label + ``` + oc get endpoints <label> + ``` +- show router + ``` + oc describe dc router -n default + ``` +- set liveness probe + ``` + oc set probe dc/<label> --liveness --get-url=http://:8080/health --initial-delay-seconds=30 + ``` +- set readiness probe + ``` + oc set probe dc/<label> --readiness --get-url=http://:8080/health --initial-delay-seconds=30 + ``` +- log on to pod (remote shell) + ``` + oc rsh <pod-name> + ``` +- view default project request template + ``` + oc adm create-bootstrap-project-template + ``` +- create new default template + ``` + oc create -f /file/path/to/template.yaml -n default + ``` +- show quota for project + ``` + oc get quote -n <proj-name> + ``` +- show limitrange for project + ``` + oc get limitrange -n <proj-name> + ``` +- show groups (auth) + ``` + oc get groups + ``` +- execute `groupsync` + ``` + sudo oc adm groups sync --sync-config=/path/to/config.yaml --confirm + ``` +- show users + ``` + oc get user + ``` + - users created when first logged in +- show nodes + ``` + oc get nodes + ``` +- view nodes by label (i.e. app nodes) + ``` + oc get nodes -l region=apps + ``` +- show storageclass + ``` + oc get sc + ``` +- show persistentvolumeclaim + ``` + oc get pvc + ``` +- show network namespaces + ``` + oc get netnamespaces + ``` +- + +## ref +- :1: https://github.com/heketi/heketi +- :2: https://docs.openshift.com/container-platform/3.9/install_config/install/advanced_install.html#configuring-ansible +- :3: https://docs.openshift.com/container-platform/3.9/install_config/cluster_metrics.html#openshift-prometheus +- :4: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ +- :5: https://docs.openshift.com/container-platform/3.9/dev_guide/daemonsets.html +- :6: https://docs.openshift.com/container-platform/3.9/architecture/core_concepts/pods_and_services.html#services +- :7: https://docs.openshift.com/container-platform/latest/dev_guide/application_health.html +- :8: https://docs.openshift.com/container-platform/3.9/admin_guide/quota.html +- :9: https://docs.openshift.com/container-platform/3.9/admin_guide/limits.html +- :10: https://docs.openshift.com/container-platform/3.9/install_config/configuring_authentication.html#LDAPPasswordIdentityProvider +- :11: https://docs.openshift.com/container-platform/3.9/admin_guide/manage_rbac.html#admin-guide-manage-rbac +- :12: https://docs.openshift.com/container-platform/3.9/architecture/infrastructure_components/kubernetes_infrastructure.html#high-availability-masters +- :13: http://www.hawkular.org/ +- :14: https://docs.openshift.com/container-platform/3.9/architecture/networking/sdn.html +- :15: https://docs.openshift.com/container-platform/3.9/admin_guide/managing_networking.html#admin-guide-networking-networkpolicy +- :16: https://www.3scale.net/ diff --git a/technology/communication/ftp.md b/technology/communication/ftp.md @@ -0,0 +1,12 @@ +# ftp + +``` +ftp ftp.pyratebeard.net +ftp> ls +ftp> cd funny/ +ftp> get funny_image_01.png +``` + +## guest login +- username: anonymous +- password: email diff --git a/technology/communication/mutt.md b/technology/communication/mutt.md @@ -0,0 +1,8 @@ +# mutt + +## regex +[site{34}-]wug[@uat] +[bB]ackup[bot, Report] +wug@|@noreply + +[wWuUgG]@|@noreply|winbk01|ROS Health|Logwatch|[bB]ackup[bot, Report] diff --git a/technology/databases/mariadb.md b/technology/databases/mariadb.md @@ -0,0 +1,23 @@ +# mariadb +[knowledgebase](https://mariadb.com/kb/en/) + +```bash +dnf install mariadb mariadb-server +systemctl start mariadb.service +/usr/bin/mysql_secure_installation +mysql -u root -p +``` + +```sql +show databases; +show tables; +show columns from table; +show columns from database.table; +``` + +## user +```sql +select user from USER; +create USER username; +drop USER if exists username; +``` diff --git a/technology/databases/oracle.md b/technology/databases/oracle.md @@ -0,0 +1,11 @@ +# oracle + +- show max number of connections allowed +```sql +select name,value from v$parameter where name = 'sessions'; +``` + +- show current active connections +```sql +select count(*) from v$session; +``` diff --git a/technology/encryption/gpg.md b/technology/encryption/gpg.md @@ -0,0 +1,22 @@ +# gpg + +## encrypt file +``` +gpg -c <filename> +``` + +## decrypt file +``` +gpg <filename>.gpg +``` + +## clearsign message +``` +gpg --default-key <key_id> -o <output_file> --clearsign <input_file> + +## ref +https://www.linuxbabe.com/security/verify-pgp-signature-software-downloads-linux +https://unix.stackexchange.com/questions/288933/unable-to-verify-the-kernel-signature-gpg-cant-check-signature-public-key-no + +## red hat verify +https://access.redhat.com/articles/3530471 diff --git a/technology/linux/archlinux/index.md b/technology/linux/archlinux/index.md diff --git a/technology/linux/archlinux/pacman.md b/technology/linux/archlinux/pacman.md @@ -0,0 +1,7 @@ +# pacman + +## find package which contains $filename +``` +pacman -Fy +pacman -Fs $filename +``` diff --git a/technology/linux/audio_visual/index.md b/technology/linux/audio_visual/index.md @@ -0,0 +1,4 @@ +# audio_visual + +- [mpd](mpd) +- [mpv](mpv) diff --git a/technology/linux/audio_visual/mpd.md b/technology/linux/audio_visual/mpd.md @@ -0,0 +1,5 @@ +# mpd + +## radio playlist sites +https://www.radionomy.com/ +http://www.radiosure.com/stations/ diff --git a/technology/linux/audio_visual/mpv.md b/technology/linux/audio_visual/mpv.md @@ -0,0 +1,5 @@ +# mpv + +```zsh +mpv --video-unscaled=no --geometry=579x326+98%+2% --ontop +``` diff --git a/technology/linux/debian/apt_dpkg.md b/technology/linux/debian/apt_dpkg.md @@ -0,0 +1,9 @@ +# apt / dpkg + +- if error code (1) on `apt-get upgrade` [ref_1](#ref#1) + ``` + sudo dpkg --configure -a + ``` + +## ref +- :1: https://itsfoss.com/dpkg-returned-an-error-code-1/ diff --git a/technology/linux/debian/index.md b/technology/linux/debian/index.md diff --git a/technology/linux/general/bash.md b/technology/linux/general/bash.md @@ -0,0 +1,27 @@ +# bash + +use parameter of previous command ([ref 1](#ref#1)) +``` +mkdir test +cd $_ +``` +or +``` +mkdir test +cd !$ +``` + +## `find` examples +``` +find . -type f -iname "*regex*" -exec rm -f {} \; +``` + +## when was user created [ref_2](#ref#2) +- if user has never logged in after account creation + ``` + ls -l /home/<user>/.bash_logout + ``` + +## ref +- :1: https://unix.stackexchange.com/questions/125385/combined-mkdir-and-cd +- :2: https://it.toolbox.com/question/how-to-find-out-when-a-user-is-created-in-linux-030612 diff --git a/technology/linux/general/bin.md b/technology/linux/general/bin.md @@ -0,0 +1,14 @@ + + + /bin (and /sbin) were intended for programs that needed to be on a small / partition before the larger /usr, etc. partitions were mounted. These days, it mostly serves as a standard location for key programs like /bin/sh, although the original intent may still be relevant for e.g. installations on small embedded devices. + + /sbin, as distinct from /bin, is for system management programs (not normally used by ordinary users) needed before /usr is mounted. + + /usr/bin is for distribution-managed normal user programs. + + There is a /usr/sbin with the same relationship to /usr/bin as /sbin has to /bin. + + /usr/local/bin is for normal user programs not managed by the distribution package manager, e.g. locally compiled packages. You should not install them into /usr/bin because future distribution upgrades may modify or delete them without warning. + + /usr/local/sbin, as you can probably guess at this point, is to /usr/local/bin as /usr/sbin to /usr/bin. + diff --git a/technology/linux/general/dmidecode.md b/technology/linux/general/dmidecode.md @@ -0,0 +1,9 @@ +# dmidecode + +## how many pci slots (ref_1)[#ref#1] +``` +dmidecode -t 9 | grep "System Slot Information" | wc -l +``` + +## ref +- :1: https://unix.stackexchange.com/questions/191314/can-i-see-the-number-of-pci-slots-with-a-command diff --git a/technology/linux/general/i3lock.md b/technology/linux/general/i3lock.md @@ -0,0 +1,9 @@ +# i3lock + +lockscreen commands (https://www.reddit.com/r/unixporn/comments/8z15f9/i3lock_with_pixeleffect/) + +```zsh +ICON="/home/pyratebeard/tmp/stop2.png" ; TMPBG="/tmp/lockscreen.jpg" ; RES=$(xrandr | grep 'current' | sed -E 's/.*current\s([0-9]+)\sx\s([0-9]+).*/\1x\2/') ; ffmpeg -f x11grab-video_size $RES -y -i $DISPLAY -vf frei0r=pixeliz0r=0.02:0.02 -vframes 1 $TMPBG -loglevel quiet +ICON="/home/pyratebeard/tmp/stop2.png" ; TMPBG="/tmp/lockscreen.jpg" ; RES=$(xrandr | grep 'current' | sed -E 's/.*current\s([0-9]+)\sx\s([0-9]+).*/\1x\2/') ; ffmpeg -f x11grab -video_size $RES -y -i $DISPLAY -i $ICON -filter_complex "boxblur=5:1,overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2" -vframes 1 $TMPBG -loglevel quiet +/home/pyratebeard/src/warez/i3lock-fancy-multimonitor/lock -p +``` diff --git a/technology/linux/general/index.md b/technology/linux/general/index.md @@ -0,0 +1,15 @@ +# linux general + +- [bash](bash) +- [bin](bin) +- [dmidecode](dmidecode) +- [i3lock](i3lock) +- [journalctl](journalctl) +- [man_pages](man_pages) +- [mpd](mpd) +- [mpv](mpv) +- [rtv](rtv) +- [ssh](ssh) +- [systemctl](systemctl) +- [xclip](xclip) +- [xdotool](xdotool) diff --git a/technology/linux/general/journalctl.md b/technology/linux/general/journalctl.md @@ -0,0 +1,19 @@ +# journalctl + +https://www.loggly.com/ultimate-guide/using-journalctl/ +https://www.digitalocean.com/community/tutorials/how-to-use-journalctl-to-view-and-manipulate-systemd-logs + +search by user +``` +journalctl _UID=<uid> +``` + +search by command +``` +journalctl /usr/bin/sudo +journalctl $(which sudo) +journalctl -t sudo +``` + `-t` show syslog identifier + + [systemctl](systemctl) diff --git a/technology/linux/general/man_pages.md b/technology/linux/general/man_pages.md @@ -0,0 +1,31 @@ +# man pages + +## sections + +taken from `man man`: +``` +MANUAL SECTIONS + The standard sections of the manual include: + + 1 User Commands + 2 System Calls + 3 C Library Functions + 4 Devices and Special Files + 5 File Formats and Conventions + 6 Games et. al. + 7 Miscellanea + 8 System Administration tools and Daemons +``` +[unix stackexchange](https://unix.stackexchange.com/questions/3586/what-do-the-numbers-in-a-man-page-mean) + +## searching +[cyberciti.biz](https://www.cyberciti.biz/faq/howto-search-all-the-linux-unix-man-pages/) + +`apropos` command + +```bash +apropos "term" +apropos -s 1 "term" +``` + +`-s` indicates search only specific section diff --git a/technology/linux/general/rtv.md b/technology/linux/general/rtv.md @@ -0,0 +1,4 @@ +# rtv + +rtv --enable-media (to use mailcap, although it seems to default) +export RTV_BROWSER=qutebrowser diff --git a/technology/linux/general/ssh.md b/technology/linux/general/ssh.md @@ -0,0 +1,20 @@ +# ssh + +- tunnel through jump server [ref_2](#ref#2) + ``` + ssh -t L7070:localhost:7071 user@jumphost ssh -t -D7071 user@furtherhost + ``` + ``` + ssh -A -t -l user jump-host \ + -L 8080:localhost:8080 \ + ssh -A -t -l user webserver.dmz \ + -L 8080:localhost:8080 + ``` +- mordhaus tunnel command + ``` + ssh -4 -A -t -ldburrows -D7070 kali ssh -4 -A -t -lpyratebeard -p22666 -D7070 powerzonemetal.uk + ``` + +## ref +- :1: http://lackof.org/taggart/hacking/ssh/ +- :2: http://digitalcrunch.com/linux/how-to-use-an-ssh-tunnel-through-a-jump-host/ diff --git a/technology/linux/general/systemctl.md b/technology/linux/general/systemctl.md @@ -0,0 +1,21 @@ +# systemctl + +```bash +systemctl list-units [ --all | --type=service ] +``` + +- show all enabled and disabled services +```bash +systemctl list-unit-files +``` + +## create service file +``` +vi /etc/systemd/system/<name>.service +``` + +[journalctl](journalctl) + +## ref +- :1: https://www.digitalocean.com/community/tutorials/how-to-use-systemctl-to-manage-systemd-services-and-units +- https://www.digitalocean.com/community/tutorials/how-to-use-systemctl-to-manage-systemd-services-and-units diff --git a/technology/linux/general/xclip.md b/technology/linux/general/xclip.md @@ -0,0 +1,7 @@ +# xclip + +## copy image to clipboard +```bash +xclip -selection clipboard -t image/png -i <image>.png +``` +use `image/jpeg` or `image/jpg` for jpeg diff --git a/technology/linux/general/xdotool.md b/technology/linux/general/xdotool.md @@ -0,0 +1,6 @@ +# xdotool + +## turn of capslock +``` +xdotool key Caps_Lock +``` diff --git a/technology/linux/redhat/index.md b/technology/linux/redhat/index.md diff --git a/technology/linux/redhat/rpm.md b/technology/linux/redhat/rpm.md @@ -0,0 +1,76 @@ +# rpm + +## how to build rpm videos +- [urban pengiun](#ref#3) + - distributing new repo file + - create new 'build' account + - run `rpmdev-setuptree` + - creates 'rpmbuild' dir structure + - create SOURCES dir structure + ``` + cd ~/rpmbuild/SOURCES/ + mkdir tuprepo-1/etc/yum.repos.d + ``` + *'tuprepo-1' is name and version number* + - copy across repo file + ``` + cp /tmp/CentOS-Tup.repo !$ + ``` + *use '!$' for last arg* + - tar zip dir + ``` + tar -cvzf tuprepo-1.tar.gz tuprepo-1/ + ``` + - create spec file + ``` + cd ../SPECS/ + rpmdev-newspec tuprepo.spec + vi tuprepo.spec + ``` + - spec file details + ``` + Name: qradar_bak + Version: 1 + Release: 1%{?dist} + Summary: Pull backup data and configuration files for QRadar + + License: GPL + URL: https://www.ward.ie + Source0: qradar_bak-1.tgz + + BuildRoot: %{_tmppath}/%{name}-buildroot + + %description + Pull nightly data backups and weekly configuration files from QRadar master. + Clean up is carried out weekly + + + %prep + %autosetup + + %install + mkdir -p "$RPM_BUILD_ROOT" + cp -R * "$RPM_BUILD_ROOT" + + %clean + rm -rf "$RPM_BUILD_ROOT" + + %files + %defattr(-,root,root,-) + /usr/local/bin/backup_pull + /usr/local/bin/cleanup + /usr/local/etc/config + + %changelog + * Mon Oct 22 2018 rpmbuild + ``` + - build rpm + ``` + cd $HOME + rpmbuild -v -bb rpmbuild/SPECS/tuprepo.spec + ``` + +## ref +- :1: https://docs.fedoraproject.org/en-US/quick-docs/creating-rpm-packages/index.html +- :2: https://rpm-packaging-guide.github.io/ +- :3: https://www.youtube.com/watch?v=364Plv6zuBU diff --git a/technology/monitoring/snmpwalk.md b/technology/monitoring/snmpwalk.md @@ -0,0 +1,5 @@ +# snmpwalk + +``` +snmpwalk -Os -c public -v 1 localhost system +``` diff --git a/technology/networking/index.md b/technology/networking/index.md @@ -0,0 +1,3 @@ +# networks + +- [subnet_cheatsheet](subnet_cheatsheet) diff --git a/technology/networking/subnet_cheatsheet.md b/technology/networking/subnet_cheatsheet.md @@ -0,0 +1,247 @@ +# subnet cheatsheet + +https://oav.net/mirrors/cidr.html + +Netmask Netmask (binary) CIDR Notes +_____________________________________________________________________________ +255.255.255.255 11111111.11111111.11111111.11111111 /32 Host (single addr) +255.255.255.254 11111111.11111111.11111111.11111110 /31 Unuseable +255.255.255.252 11111111.11111111.11111111.11111100 /30 2 useable +255.255.255.248 11111111.11111111.11111111.11111000 /29 6 useable +255.255.255.240 11111111.11111111.11111111.11110000 /28 14 useable +255.255.255.224 11111111.11111111.11111111.11100000 /27 30 useable +255.255.255.192 11111111.11111111.11111111.11000000 /26 62 useable +255.255.255.128 11111111.11111111.11111111.10000000 /25 126 useable +255.255.255.0 11111111.11111111.11111111.00000000 /24 "Class C" 254 useable + +255.255.254.0 11111111.11111111.11111110.00000000 /23 2 Class C's +255.255.252.0 11111111.11111111.11111100.00000000 /22 4 Class C's +255.255.248.0 11111111.11111111.11111000.00000000 /21 8 Class C's +255.255.240.0 11111111.11111111.11110000.00000000 /20 16 Class C's +255.255.224.0 11111111.11111111.11100000.00000000 /19 32 Class C's +255.255.192.0 11111111.11111111.11000000.00000000 /18 64 Class C's +255.255.128.0 11111111.11111111.10000000.00000000 /17 128 Class C's +255.255.0.0 11111111.11111111.00000000.00000000 /16 "Class B" + +255.254.0.0 11111111.11111110.00000000.00000000 /15 2 Class B's +255.252.0.0 11111111.11111100.00000000.00000000 /14 4 Class B's +255.248.0.0 11111111.11111000.00000000.00000000 /13 8 Class B's +255.240.0.0 11111111.11110000.00000000.00000000 /12 16 Class B's +255.224.0.0 11111111.11100000.00000000.00000000 /11 32 Class B's +255.192.0.0 11111111.11000000.00000000.00000000 /10 64 Class B's +255.128.0.0 11111111.10000000.00000000.00000000 /9 128 Class B's +255.0.0.0 11111111.00000000.00000000.00000000 /8 "Class A" + +254.0.0.0 11111110.00000000.00000000.00000000 /7 +252.0.0.0 11111100.00000000.00000000.00000000 /6 +248.0.0.0 11111000.00000000.00000000.00000000 /5 +240.0.0.0 11110000.00000000.00000000.00000000 /4 +224.0.0.0 11100000.00000000.00000000.00000000 /3 +192.0.0.0 11000000.00000000.00000000.00000000 /2 +128.0.0.0 10000000.00000000.00000000.00000000 /1 +0.0.0.0 00000000.00000000.00000000.00000000 /0 IP space + + Net Host Total +Net Addr Addr Addr Number +Class Range NetMask Bits Bits of hosts +---------------------------------------------------------- +A 0-127 255.0.0.0 8 24 16777216 (i.e. 114.0.0.0) +B 128-191 255.255.0.0 16 16 65536 (i.e. 150.0.0.0) +C 192-254 255.255.255.0 24 8 256 (i.e. 199.0.0.0) +D 224-239 (multicast) +E 240-255 (reserved) +F 208-215 255.255.255.240 28 4 16 +G 216/8 ARIN - North America +G 217/8 RIPE NCC - Europe +G 218-219/8 APNIC +H 220-221 255.255.255.248 29 3 8 (reserved) +K 222-223 255.255.255.254 31 1 2 (reserved) +(ref: RFC1375 & http://www.iana.org/assignments/ipv4-address-space ) +( http://www.iana.org/numbers.htm ) +---------------------------------------------------------- + +The current list of special use prefixes: + 0.0.0.0/8 + 127.0.0.0/8 + 192.0.2.0/24 + 10.0.0.0/8 + 172.16.0.0/12 + 192.168.0.0/16 + 169.254.0.0/16 + all D/E space +(ref: RFC1918 http://www.rfc-editor.org/rfc/rfc1918.txt ) +( or ftp://ftp.isi.edu/in-notes/rfc1918.txt ) +(rfc search: http://www.rfc-editor.org/rfcsearch.html ) +( http://www.ietf.org/ietf/1id-abstracts.txt ) +( http://www.ietf.org/shadow.html ) + + +Martians: (updates at: www.iana.org/assignments/ipv4-address-space ) + no ip source-route + access-list 100 deny ip host 0.0.0.0 any + deny ip 0.0.0.0 0.255.255.255 any log ! antispoof + deny ip 0.0.0.0 0.255.255.255 0.0.0.0 255.255.255.255 ! antispoof + deny ip any 255.255.255.128 0.0.0.127 ! antispoof + deny ip host 0.0.0.0 any log ! antispoof + deny ip host [router intf] [router intf] ! antispoof + deny ip xxx.xxx.xxx.0 0.0.0.255 any log ! lan area + deny ip 0/8 0.255.255.255 any log ! IANA - Reserved + deny ip 1/8 0.255.255.255 any log ! IANA - Reserved + deny ip 2/8 0.255.255.255 any log ! IANA - Reserved + deny ip 5/8 0.255.255.255 any log ! IANA - Reserved + deny ip 7/8 0.255.255.255 any log ! IANA - Reserved + deny ip 10.0.0.0 0.255.255.255 any log ! IANA - Private Use + deny ip 23/8 0.255.255.255 any log ! IANA - Reserved + deny ip 27/8 0.255.255.255 any log ! IANA - Reserved + deny ip 31/8 0.255.255.255 any log ! IANA - Reserved + deny ip 36-37/8 0.255.255.255 any log ! IANA - Reserved + deny ip 39/8 0.255.255.255 any log ! IANA - Reserved + deny ip 41-42/8 0.255.255.255 any log ! IANA - Reserved + deny ip 50/8 0.255.255.255 any log ! IANA - Reserved + deny ip 58-60/8 0.255.255.255 any log ! IANA - Reserved + deny ip 69-79/8 0.255.255.255 any log ! IANA - Reserved + deny ip 82-95/8 0.255.255.255 any log ! IANA - Reserved + deny ip 96-126/8 0.255.255.255 any log ! IANA - Reserved + deny ip 127/8 0.255.255.255 any log ! IANA - Reserved + deny ip 169.254.0.0 0.0.255.255 any log ! link-local network + deny ip 172.16.0.0 0.15.255.255 any log ! reserved + deny ip 192.168.0.0 0.0.255.255 any log ! reserved + deny ip 192.0.2.0 0.0.0.255 any log ! test network + deny ip 197/8 0.255.255.255 any log ! IANA - Reserved + deny ip 220/8 0.255.255.255 any log ! IANA - Reserved + deny ip 222-223/8 0.255.255.255 any log ! IANA - Reserved + deny ip 224.0.0.0 31.255.255.255 any log ! multicast + deny ip 224.0.0.0 15.255.255.255 any log ! unless MBGP-learned routes + deny ip 224-239/8 0.255.255.255 any log ! IANA - Multicast + deny ip 240-255/8 0.255.255.255 any log ! IANA - Reserved + +filtered source addresses + 0/8 ! broadcast + 10/8 ! RFC 1918 private + 127/8 ! loopback + 169.254.0/16 ! link local + 172.16.0.0/12 ! RFC 1918 private + 192.0.2.0/24 ! TEST-NET + 192.168.0/16 ! RFC 1918 private + 224.0.0.0/4 ! class D multicast + 240.0.0.0/5 ! class E reserved + 248.0.0.0/5 ! reserved + 255.255.255.255/32 ! broadcast + +ARIN administrated blocks: (http://www.arin.net/regserv/IPStats.html) + 24.0.0.0/8 (portions of) + 63.0.0.0/8 + 64.0.0.0/8 + 65.0.0.0/8 + 66.0.0.0/8 + 196.0.0.0/8 + 198.0.0.0/8 + 199.0.0.0/8 + 200.0.0.0/8 + 204.0.0.0/8 + 205.0.0.0/8 + 206.0.0.0/8 + 207.0.0.0/8 + 208.0.0.0/8 + 209.0.0.0/8 + 216.0.0.0/8 +---------------------------------------------------------- + +well known ports: (rfc1700.txt) + www.iana.org/assignments/port-numbers + +protocol numbers: + www.iana.org/assignments/protocol-numbers + www.iana.org/numbers.htm + +ICMP(Types/Codes) + Testing Destination Reachability & Status + (0/0) Echo-Reply + (8/0) Echo + Unreachable Destinations + (3/0) Network Unreachable + (3/1) Host Unreachable + (3/2) Protocol Unreachable + (3/3) Port Unreachable + (3/4) Fragmentaion Needed and DF set (Pkt too big) + (3/5) Source Route Failed + (3/6) Network Unknown + (3/7) Host Unknown + (3/9) DOD Net Prohibited + (3/10) DOD Host Prohibited + (3/11) Net TOS Unreachable + (3/12) Host TOS Unreachable + (3/13) Administratively Prohibited + (3/14) Host Precedence Unreachable + (3/15) Precedence Unreachable + Flow Control + (4/0) Source-Quench [RFC 1016] + Route Change Requests from Gateways + (5/0) Redirect Datagrams for the Net + (5/1) Redirect Datagrams for the Host + (5/2) Redirect Datagrams for the TOS and Net + (5/3) Redirect Datagrams for the TOS and Host + Router + (6/-) Alternate-Address + (9/0) Router-Advertisement + (10/0) Router-Solicitation + Detecting Circular or Excessively Long Routes + (11/0) Time to Live Count Exceeded + (11/1) Fragment Reassembly Time Exceeded + Reporting Incorrect Datagram Headers + (12/0) Parameter-Problem + (12/1) Option Missing + (12/2) No Room for Option + Clock Synchronization and Transit Time Estimation + (13/0) Timestamp-Request + (14/0) Timestamp-Reply + Obtaining a Network Address (RARP Alternative) + (15/0) Information-Request + (16/0) Information-Reply + Obtaining a Subnet Mask [RFC 950] + (17/0) Address Mask-Request + (18/0) Address Mask-Reply + Other + (30/0) Traceroute + (31/0) Conversion-Error + (32/0) Mobile-Redirect + +Ref: [RFC 792] [RFC 896] [RFC 950] [RFC 1016] + www.cisco.com/univercd/cc/td/doc/product/lan/cat6000/sw_5_3/cofigide/qos.htm#19774 + + + +Decimal system Prefix's + Factor Exponent Prefix +--------------------------------------------------- + 1 000 000 000 000 000 000 000 000...10^24....yotta + 1 000 000 000 000 000 000 000...10^21....zetta + 1 000 000 000 000 000 000...10^18....exa + 1 000 000 000 000 000...10^15....peta + 1 000 000 000 000...10^12....tera + 1 000 000 000...10^9.....giga + 1 000 000...10^6.....mega + 1 000...10^3.....kilo + 100...10^2.....hecto + 10...10^1.....deka + 0.1...10^-1....deci + 0.01...10^-2....centi + 0.001...10^-3....milli + 0.000 001...10^-6....micro + 0.000 000 001...10^-9....nano + 0.000 000 000 001...10^-12...pico + 0.000 000 000 000 001...10^-15...femto + 0.000 000 000 000 000 001...10^-18...atto + 0.000 000 000 000 000 000 001...10^-21...zepto + 0.000 000 000 000 000 000 000 001...10^-24...yocto +--------------------------------------------------- + +Convert Fahrenheit <> Celsius: + Celsius = (Fahrenheit - 32) / 1.8 + Fahrenheit = (Celsius * 1.8) + 32 + + +last updated: 4jul02 + + + diff --git a/technology/networking/wifi_connect.md b/technology/networking/wifi_connect.md @@ -0,0 +1,10 @@ + +``` +ip link show wlp2s0 +ip link set wlp2s0 up +iw wlp2s0 link +sudo iw wlp2s0 scan | grep -i ssid +ps -ef | grep wpa_supplicant +sudo kill -9 <pid> +connect lib/doc/wifi/home ; sudo dhclient wlp2s0 +``` diff --git a/technology/virtualisation/esxi.md b/technology/virtualisation/esxi.md @@ -0,0 +1,61 @@ +# esxi + +## list vms +``` +esxcli vm process list +``` + +``` +vim-cmd vmsvc/getallvms +``` + +## create vm +[steps taken from here](#ref#2) + +1. create vm folder + ``` + mkdir /vmfs/volumes/datastore1/hostname/ + ``` + +2. create hard disk + ``` + vmkfstools -c 32G -a lsilogic hostname.vmdk + ``` + * `-c` - createvirtualdisk + * `-a` - adaptertype [buslogic|lsilogic|ide|lsisas|pvscsi] + * `-d` - diskformat [zeroedthick|thin|eagerzeroedthick] + +3. create hostname.vmx file with following ([ref 3](#ref#3) for info on vmx files) + ``` + config.version = "8" + virtualHW.version= "7" + guestOS = "winnetenterprise-64" + memsize = "1024" + displayname = "VirtualCenter" + scsi0.present = "TRUE" + scsi0.virtualDev = "lsilogic" + scsi0:0.present = "TRUE" + scsi0:0.fileName = "VirtualCenter.vmdk" + ide1:0.present = "true" + ide1:0.deviceType = "cdrom-image" + ide1:0.filename = "/vmfs/volumes/4a68046d-2159a120-ebac-001a9253e68f/win2k3_x64.iso" + ide1:0.startConnected = "TRUE" + ethernet0.present= "true" + ethernet0.startConnected = "true" + ethernet0.virtualDev = "e1000" + ``` + +4. change permissions on vmx file + ``` + chmod 744 hostname.vmx + ``` + +5. add vm to inventory + ``` + vim-cmd solo/registervm /vmfs/volumes/datastore/hostname/hostname.vmx hostname + ``` + +## ref +- :1: https://pubs.vmware.com/vsphere-51/index.jsp?topic=%2Fcom.vmware.vsphere.solutions.doc%2FGUID-0A264828-3933-4F4F-82D7-B5006A90CDBA.html +- :2: http://vm-help.com/esx40i/manage_without_VI_client_1.php +- :3: http://sanbarrow.com/vmx.html