As micro service architecture is becoming mainstream VMware is adopting an open source service mesh for further integration with its products .Right now this is only available for Cloud PKS

For more info:Please follow the link

ONOS tutorial with mininet :Part 2

As we have progressed through learning SDN essentials by installing ONOS and testing out a simple network topology with one switch and two hosts .Now its time to take it to another level by adding a routing in between .

Note we wont be using any real simulated router in between for this setup as the intention here is to test the network topology with a router in between .Ip forwarding functionality in linux which is our base server for mininet and ONOS will be used for the routing purpose.

The initial setup and configuration involves creating a python code to create the required topology.


linuxrouter.py: Example network with Linux IP router
This example converts a Node into a router using IP forwarding
already built into Linux.
The example topology creates a router and three IP subnets:
    - (r0-eth1, IP:
    - (r0-eth2, IP:
    - (r0-eth3, IP:
Each subnet consists of a single host connected to
a single switch:
    r0-eth1 - s1-eth1 - h1-eth0 (IP:
    r0-eth2 - s2-eth1 - h2-eth0 (IP:
    r0-eth3 - s3-eth1 - h3-eth0 (IP:
The example relies on default routing entries that are
automatically created for each router interface, as well
as 'defaultRoute' parameters for the host interfaces.
Additional routes may be added to the router or hosts by
executing 'ip route' or 'route' commands on the router or hosts.

from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import Node,Controller, OVSKernelSwitch, RemoteController
from mininet.log import setLogLevel, info
from mininet.cli import CLI

class LinuxRouter( Node ):
    "A Node with IP forwarding enabled."

    def config( self, **params ):
        super( LinuxRouter, self).config( **params )
        # Enable forwarding on the router
        self.cmd( 'sysctl net.ipv4.ip_forward=1' )

    def terminate( self ):
        self.cmd( 'sysctl net.ipv4.ip_forward=0' )
        super( LinuxRouter, self ).terminate()

class NetworkTopo( Topo ):
    "A LinuxRouter connecting three IP subnets"

    def build( self, **_opts ):
    #    net = Mininet(controller=RemoteController, switch=OVSKernelSwitch)

     #   c1 = net.addController('c1', controller=RemoteController, ip="")
         #     c2 = net.addController('c2', controller=RemoteController, ip="", port=6633)
        defaultIP = ''  # IP address for r0-eth1
        router = self.addNode( 'r0', cls=LinuxRouter, ip=defaultIP )

        s1, s2, s3 = [ self.addSwitch( s ) for s in ( 's1', 's2', 's3' ) ]

        self.addLink( s1, router, intfName2='r0-eth1',
                      params2={ 'ip' : defaultIP } )  # for clarity
        self.addLink( s2, router, intfName2='r0-eth2',
                      params2={ 'ip' : '' } )
        self.addLink( s3, router, intfName2='r0-eth3',
                      params2={ 'ip' : '' } )

        h1 = self.addHost( 'h1', ip='',
                           defaultRoute='via' )
        h2 = self.addHost( 'h2', ip='',
                           defaultRoute='via' )
        h3 = self.addHost( 'h3', ip='',
                           defaultRoute='via' )

        for h, s in [ (h1, s1), (h2, s2), (h3, s3) ]:
            self.addLink( h, s )

def run():
    "Test linux router"
    topo = NetworkTopo()
    #c = RemoteController('c', '')
    #net = Mininet( topo=topo )  # controller is used by s1-s3
    net = Mininet(topo=topo,controller=RemoteController, switch=OVSKernelSwitch)

    c1 = net.addController('c1', controller=RemoteController, ip="")
    info( '*** Routing Table on Router:\n' )
    info( net[ 'r0' ].cmd( 'route' ) )
    CLI( net )

if __name__ == '__main__':
    setLogLevel( 'info' )

This will create a topology

Running the python code will execute all the steps and create the above mentioned topology .
We have mentioned our ONOS controller installed in the same server as controller to the code.

root@master1:/home/sreejithkj52# python top1.py 
*** Creating network
*** Adding controller
*** Adding hosts:
h1 h2 h3 r0 
*** Adding switches:
s1 s2 s3 
*** Adding links:
(h1, s1) (h2, s2) (h3, s3) (s1, r0) (s2, r0) (s3, r0) 
*** Configuring hosts
h1 h2 h3 r0 
*** Starting controller
c0 c1 
*** Starting 3 switches
s1 s2 s3 ...
*** Routing Table on Router:
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface       U     0      0        0 r0-eth3     U     0      0        0 r0-eth2   U     0      0        0 r0-eth1
*** Starting CLI:

Topology view in ONOS

From the devices view we will be able to see the configured three switch and its details.

To view the hosts attached to the switches click on the host view section.

After the configuration all the hosts will be reachable to each other .

mininet> h1 ping h3
PING ( 56(84) bytes of data.
64 bytes from icmp_seq=1 ttl=63 time=26.1 ms
64 bytes from icmp_seq=2 ttl=63 time=0.285 ms
mininet> h1 ping h2
PING ( 56(84) bytes of data.
64 bytes from icmp_seq=1 ttl=63 time=7.86 ms
64 bytes from icmp_seq=2 ttl=63 time=0.240 ms
mininet> h3 ping h2
PING ( 56(84) bytes of data.
64 bytes from icmp_seq=1 ttl=63 time=6.37 ms
64 bytes from icmp_seq=2 ttl=63 time=0.233 ms

Note the time delay for the first packet ,this is the time required to contact SDN controller and get the enforced flows after doing this step for the first packet flow will be populated in all sdn enabled switches and there is no need to contact controller any more further communication will happen directly .

mininet> h1 ifconfig
h1-eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet  netmask  broadcast
        inet6 fe80::ac12:bbff:fe99:8b0d  prefixlen 64  scopeid 0x20<link>
        ether ae:12:bb:99:8b:0d  txqueuelen 1000  (Ethernet)
        RX packets 1189  bytes 96316 (94.0 KiB)
        RX errors 0  dropped 1182  overruns 0  frame 0
        TX packets 21  bytes 1642 (1.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

We will continue doing further test by enabling a python webserver from h1 and accessing it through other hosts .

mininet> h1 python -m SimpleHTTPServer 80 &
mininet> h2 wget -O - h1
--2018-02-23 12:46:49--
Connecting to connected.
HTTP request sent, awaiting response... 200 OK
Length: 604 [text/html]
Saving to: ‘STDOUT’

-                     0%[                    ]       0  --.-KB/s               <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"><html>
<title>Directory listing for /</title>
<h2>Directory listing for /</h2>
<li><a href=".bash_history">.bash_history</a>
<li><a href=".bash_logout">.bash_logout</a>
<li><a href=".bashrc">.bashrc</a>
<li><a href=".profile">.profile</a>
<li><a href=".ssh/">.ssh/</a>
<li><a href=".viminfo">.viminfo</a>
<li><a href="customtopo.py">customtopo.py</a>
<li><a href="gitpulltest/">gitpulltest/</a>
<li><a href="gitsync/">gitsync/</a>
<li><a href="playbooks/">playbooks/</a>
<li><a href="top1.py">top1.py</a>
-                   100%[===================>]     604  --.-KB/s    in 0s      

2018-02-23 12:46:49 (191 MB/s) - written to stdout [604/604]

Accessing from h3

mininet> h3 wget -O - h1
--2018-02-23 12:48:09--
Connecting to connected.
HTTP request sent, awaiting response... 200 OK
Length: 604 [text/html]
Saving to: ‘STDOUT’

-                     0%[                    ]       0  --.-KB/s               <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN"><html>
<title>Directory listing for /</title>
<h2>Directory listing for /</h2>
<li><a href=".bash_history">.bash_history</a>
<li><a href=".bash_logout">.bash_logout</a>
<li><a href=".bashrc">.bashrc</a>
<li><a href=".profile">.profile</a>
<li><a href=".ssh/">.ssh/</a>
<li><a href=".viminfo">.viminfo</a>
<li><a href="customtopo.py">customtopo.py</a>
<li><a href="gitpulltest/">gitpulltest/</a>
<li><a href="gitsync/">gitsync/</a>
<li><a href="playbooks/">playbooks/</a>
<li><a href="top1.py">top1.py</a>
-                   100%[===================>]     604  --.-KB/s    in 0s      

2018-02-23 12:48:09 (202 MB/s) - written to stdout [604/604]

This tutorial has demonstrated how easy is to setup a custom topology in mininet and connecting the same to ONOS controller.

ONOS tutorial with mininet : Part 1


ONOS is an SDN controller specifically designed for service providers.Intention is to create a software defined network operating systems intended to integrate all network applications and functions in a viritualized format.The current ONOS version is 1.12.0.


A network emulator which can create virtual switches,hosts and connect to SDN controllers. Mininet can be installed in your laptop and complex networking solutions and topologies can be tested out with ease .


S1-Switch which will be used to connect two hosts

H1 -host 1

H2 -host 2

The topology we are attempting to create here is a single switch and two hosts connected .SDN controller ONOS will be controlling the traffic flows between the devices

ONOS Installation

root@master1: wget -c http://downloads.onosproject.org/release/onos-1.12.0.tar.gz
root@master1:tar xzf onos-1.12.0.tar.gz
root@master1: mv onos-1.12.0 onos
root@master1:/opt/onos/bin/onos-service start
root@master1:/opt# /opt/onos/bin/onos-service start
karaf: JAVA_HOME not set; results may vary
Welcome to Open Network Operating System (ONOS)!
     ____  _  ______  ____     
    / __ \/ |/ / __ \/ __/   
   / /_/ /    / /_/ /\ \     
Documentation: wiki.onosproject.org      
Tutorials:     tutorials.onosproject.org 
Mailing lists: lists.onosproject.org     

Come help out! Find out how at: contribute.onosproject.org 

Hit '<tab>' for a list of available commands
and '[cmd] --help' for help on a specific command.
Hit '<ctrl-d>' or type 'system:shutdown' or 'logout' to shutdown ONOS.

onos> app -s
onos> app download onos-appfwd
onos> feature:list | grep onos-app
onos> feature:install onos-apps-fwd
onos> list | grep onos-*
onos> app activate org.onosproject.openflow
onos> app -a -s

We can check the enabled applications in ONOS GUI

mininet configuration

root@master1:/home/sreejithkj52# sudo mn --controller remote,ip=
*** Creating network
*** Adding controller
*** Adding hosts:
h1 h2 
*** Adding switches:
*** Adding links:
(h1, s1) (h2, s1) 
*** Configuring hosts
h1 h2 
*** Starting controller
*** Starting 1 switches
s1 ...
*** Starting CLI:
mininet> h1 ping h2
PING ( 56(84) bytes of data.
64 bytes from icmp_seq=1 ttl=64 time=84.1 ms
64 bytes from icmp_seq=2 ttl=64 time=0.284 ms
64 bytes from icmp_seq=3 ttl=64 time=0.058 ms

Flows for the device

Creating private network in docker

Create a private network using docker network command

docker network create --subnet= kubenet

Assign the ip to container using —ip

docker run --net kubenet ---ip -it -d ubuntu

Building modern web applications-Part 2

Nginx reverse proxy has been configured and service is running after the successful configuration of nginx.You might get a “Bad Gateway error “ .if we try to connect to nginx using web browser.This means packets are hitting nginx reverse proxy file ,but since our backend server is not initialized and started yet it is giving a gateway error.

Checkpoint 1: Nginx web server configured

Configuring app server in google cloud

Here we will configure a node.js application as an app server for our testing.The application is simple and it will be listening on port 7555 for any incoming connections and will show a dialog box to create a new user, And when we click on “Create User”,a new user will be created in the mysql server which we will be using as a backend service.

Node.js app-****(this part has been taken from the link https://hackernoon.com/setting-up-node-js-with-a-database-part-1-3f2461bdd77f
Thanks to Robert Tod

Creating and initializing node.js app

Install Node.js
Install MySQL
Create a HTTP API for writing to the database
Create some HTML and JS to POST to the API
Use Knex migrations to create a user database schema

root@web01:~/tutorial_node_database# ls
index.js knexfile.js migrations node_modules package.json public store.js
root@web01:~/tutorial_node_database# cat index.js
const express = require(‘express’)
const bodyParser = require(‘body-parser’)
const store = require(‘./store’)
const app = express()
app.post(‘/createUser’, (req, res) => {
username: req.body.username,
password: req.body.password
.then(() => res.sendStatus(200))
app.listen(7555, () => {
console.log(‘Server running on http://localhost:7555’)

root@web01:~/tutorial_node_database# cat knexfile.js
module.exports = {
client: ‘mysql’,
connection: {
user: ‘root’,
password: ‘sree’,
database: ‘tutorial_node_database’
root@web01:~/tutorial_node_database# cat package.json
“name”: “tutorial_node_database”,
“version”: “1.0.0”,
“description”: “”,
“main”: “index.js”,
“scripts”: {
“test”: “echo \”Error: no test specified\” && exit 1″
“author”: “”,
“license”: “ISC”,
“dependencies”: {
“body-parser”: “^1.18.1”,
“express”: “^4.15.4”,
“knex”: “^0.13.0”,
“mysql”: “^2.14.1”
root@web01:~/tutorial_node_database# cat store.js
const knex = require(‘knex’)(require(‘./knexfile’))
module.exports = {
createUser ({ username, password }) {
console.log(`Add user ${username} with password ${password}`)
return knex(‘users’).insert({

root@web01:~/tutorial_node_database# cd public/
root@web01:~/tutorial_node_database/public# ls
app.js index.html
root@web01:~/tutorial_node_database/public# cat app.js
const CreateUser = document.querySelector(‘.CreateUser’)
CreateUser.addEventListener(‘submit’, (e) => {
const username = CreateUser.querySelector(‘.username’).value
const password = CreateUser.querySelector(‘.password’).value
post(‘/createUser’, { username, password })
function post (path, data) {
return window.fetch(path, {
method: ‘POST’,
headers: {
‘Accept’: ‘application/json’,
‘Content-Type’: ‘application/json’
body: JSON.stringify(data)

root@web01:~/tutorial_node_database/public# cat index.html
<!DOCTYPE html>
<title>Node database tutorial</title>
<form class=”CreateUser”>
<h1>Create a new user</h1>
<input type=”text” class=”username” placeholder=”username”>
<input type=”password” class=”password” placeholder=”password”>
<input type=”submit” value=”Create user”>
<script src=”/app.js”></script>

Configure the knexfile.js appropriately to connect to the database.
Use knex to create a new user

root@web01:~/tutorial_node_database# knex migrate:make new_user_for_node
Created Migration: /root/tutorial_node_database/migrations/20170924060137_new_user_for_node.js

Copy the below contents

exports.up = function (knex) {
return knex.schema.createTable(‘user’, function (t) {
t.timestamps(false, true)
exports.down = function (knex) {
return knex.schema.dropTableIfExists(‘user’)

Move to working directory and start node

root@e253b80241fc:/tutorial-node-database# node .
Server running on http://localhost:7555


Installing and configuring mysql server.
Create an instance in google cloud.

root@web01:/# sudo apt-get install mysql-server
root@web01:/# service mysql restart

We have configured three instances and the application will be accessible through nginx webserver
Testing the application
We can see from the live node console that user got added

root@e253b80241fc:/tutorial-node-database# node .
Server running on http://localhost:7555
Add user sree6 with password sree6

We check the DB server we can see the user got added.

mysql> show databases;
| Database |
| information_schema |
| mysql |
| performance_schema |
| sys |
| tutorial_node_database |
5 rows in set (0.04 sec)
mysql> use tutorial_node_database
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
mysql> select * from user;
| id | username | password | created_at | updated_at |
| 1 | sree6 | sree6 | 2017-09-24 11:51:37 | 2017-09-24 11:51:37 |
1 row in set (0.00 sec)
mysql> exit

Uploading code to GitHub

Initializing Github and adding a repository

git init
git add .
git commit -m “First commit”
git remote add origin remote repository URL
git remote -v
git push origin master
If not working
git fetch origin master
git push origin master --force

Building modern web applications-Part 1

Stage: 1 – Building a three-tier web application on Google Cloud


The intent of this tutorial is getting comfortable with the app dependency flows of modern applications in the cloud and how to migrate flawlessly to the latest container based and serverless technologies.

Application has always been the focal point in the enterprise datacenter.Even with all latest trending technologies which are gaining prominence on a daily basis , the end result expected is same from all tools and platforms.Building the better application and augment business strategies.

Three tier app model has been around for long and it has server application really well.

In this tutorial as part of stage -1.

Infrastructure setup has nginx configured as a reverse proxy,node.js application which will serve as an app server and a MySQL database server.

Data flow happens like this.

[Nginx(Ubuntu 16.04) —>node.js (Ubuntu 16.04)—>mysql(Ubuntu 16.04)].

Entire setup will run on Google Cloud Compute Engine.Create web server ubuntu vm using google cloud management interface.


or using a simple gcloud command line.

gcloud compute instances create example-instance-1 example-instance-2 example-instance-3 --zone us-central1-a

Configuring nginx as reverse proxy

root@web01:~#apt-get update
root@web01:~#apt-get install nginx

This will get nginx installed

root@webserver:~# service nginx status
● nginx.service -- A high-performance web server and a reverse proxy server
  Loaded: loaded (/lib/systemd/system/nginx.service; enabled; vendor preset: enabled)
  Active: active (running) since Sat 2017-09-23 11:30:28 UTC; 2min 23s ago
 Process: 1522 ExecStart=/usr/sbin/nginx -g daemon on; master_process on; (code=exited, status=0/SUCCESS)
 Process: 1398 ExecStartPre=/usr/sbin/nginx -t -q -g daemon on; master_process on; (code=exited, status=0/SUCCESS)
Main PID: 1542 (nginx)
   Tasks: 2
  Memory: 10.3M
     CPU: 27ms
  CGroup: /system.slice/nginx.service
          ├─1542 nginx: master process /usr/sbin/nginx -g daemon on; master_process on
          └─1545 nginx: worker process                           
Sep 23 11:30:27 webserver systemd[1]: Starting A high-performance web server and a reverse proxy server…
Sep 23 11:30:28 webserver systemd[1]: Started A high-performance web server and a reverse proxy server.

Create a reverseproxy.conf file

root@webserver:/etc/nginx/sites-available# touch reverseproxy.conf
root@webserver:/etc/nginx/sites-available# cat reverseproxy.conf
http {
   upstream backend {
       server backup;
   server {
       location / {
           proxy_pass http://backend;

Configuration for reverse proxy in nginx is quite simple.configure the number of backend server with the port with which it has to connect to them in the configuration file

Create a simlink to /etc/nginx/sites-enabled .This will make nginx refer the configuration file and forwards the traffic accordingly.

root@webserver:/etc/nginx/sites-available# ln -s /etc/nginx/sites-available/reverseproxy.conf /etc/nginx/sites-enabled/reverseproxy.conf
root@webserver:/etc/nginx/sites-available#service nginx restart