Add March 2021 VM based on Ubuntu 20.04 (#403)

* First draft of Ubuntu 20.04 Vagrantfile and scripts to install 2021-Mar
version of open source P4 development tools.

* Add more tracing output of what files have been installed at each step

* Don't do behavioral-model install_deps.sh before installing PI
This is an experiment to see if the end result will be able to run
tutorials basic exercise using Python3 only on an Ubuntu 20.04 system.
Just before this commit, `vagrant up` resulted in a system that failed
to run the basic exercise, because python3 failed to import
google.grpc (if I recall correctly -- it may have been a different
google.<something> Python3 module name).

* Add missing patch file

* Fix copy and paste mistake

* Add missing patch file

* Change how protobuf Python3 module files are installed

* Correct a few desktop icon file names, and add clean.sh script

* Enhance clean.sh script, and add README for manual steps in creating a VM

* Changes to try to always use Python3, never Python2, in tutorials

* Update README steps for preparing a VM

* More additions to README on steps to create a single file VM image

* Add empty-disk-block zeroing to clean.sh script

* Also install PTF

* Update versions of P4 dev tool source code to 2021-Apr-05
This includes a change to p4lang/PI that allows P4Runtime API clients
to send the shortest byte sequences necessary to encode integer
values, which I want for a PTF test that I have recently created.

* Update README for 2021-Apr-05 version of VM image

* Resolve Python 3 compatibility issues

Most of the Python 2 to 3 code translation changes
were automated with the 2to3 tool.

Signed-off-by: Radostin Stoyanov <rstoyanov@fedoraproject.org>

* Update commit SHAs for 4 p4lang repos to latest as of 2021-May-04

* Update Ubuntu 20.04 README.md for how I created 2021-May-04 version of VM

* mycontroller: Use Python 3 shebang line

Signed-off-by: Radostin Stoyanov <rstoyanov@fedoraproject.org>

* Update Ubuntu 20.04 README.md for how I created 2021-Jun-01 version of VM

* Update commit SHAs for 4 p4lang repos to latest as of 2021-Jul-07

* Update Ubuntu 20.04 README.md for how I created 2021-Jul-07 version of VM

* Update commit SHAs for 4 p4lang repos to latest as of 2021-Aug-01

* Update Ubuntu 20.04 README.md for how I created 2021-Aug-01 version of VM

* Update commit SHAs for 4 p4lang repos to latest as of 2021-Sep-07

* Update Ubuntu 20.04 README.md for how I created 2021-Sep-07 version of VM

Co-authored-by: Radostin Stoyanov <rstoyanov@fedoraproject.org>
This commit is contained in:
Andy Fingerhut 2021-09-07 19:34:30 -07:00 committed by GitHub
parent 4914893445
commit c7f3139533
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 1482 additions and 209 deletions

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import struct
import os
@ -17,7 +17,7 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
@ -35,16 +35,16 @@ class IPOption_MRI(IPOption):
length_from=lambda pkt:pkt.count*4) ]
def handle_pkt(pkt):
if TCP in pkt and pkt[TCP].dport == 1234:
print "got a packet"
print("got a packet")
pkt.show2()
# hexdump(pkt)
sys.stdout.flush()
def main():
ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
ifaces = [i for i in os.listdir('/sys/class/net/') if 'eth' in i]
iface = ifaces[0]
print "sniffing on %s" % iface
print(("sniffing on %s" % iface))
sys.stdout.flush()
sniff(iface = iface,
prn = lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import sys
import socket
@ -17,20 +17,20 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
def main():
if len(sys.argv)<3:
print 'pass 2 arguments: <destination> "<message>"'
print('pass 2 arguments: <destination> "<message>"')
exit(1)
addr = socket.gethostbyname(sys.argv[1])
iface = get_if()
print "sending on interface %s to %s" % (iface, str(addr))
print(("sending on interface %s to %s" % (iface, str(addr))))
pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff')
pkt = pkt /IP(dst=addr) / TCP(dport=1234, sport=random.randint(49152,65535)) / sys.argv[2]
pkt.show2()

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import struct
import os
@ -18,13 +18,13 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
def handle_pkt(pkt):
if MyTunnel in pkt or (TCP in pkt and pkt[TCP].dport == 1234):
print "got a packet"
print("got a packet")
pkt.show2()
# hexdump(pkt)
# print "len(pkt) = ", len(pkt)
@ -32,9 +32,9 @@ def handle_pkt(pkt):
def main():
ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
ifaces = [i for i in os.listdir('/sys/class/net/') if 'eth' in i]
iface = ifaces[0]
print "sniffing on %s" % iface
print(("sniffing on %s" % iface))
sys.stdout.flush()
sniff(iface = iface,
prn = lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import sys
import socket
@ -19,7 +19,7 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
@ -35,11 +35,11 @@ def main():
iface = get_if()
if (dst_id is not None):
print "sending on interface {} to dst_id {}".format(iface, str(dst_id))
print(("sending on interface {} to dst_id {}".format(iface, str(dst_id))))
pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff')
pkt = pkt / MyTunnel(dst_id=dst_id) / IP(dst=addr) / args.message
else:
print "sending on interface {} to IP addr {}".format(iface, str(addr))
print(("sending on interface {} to IP addr {}".format(iface, str(addr))))
pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff')
pkt = pkt / IP(dst=addr) / TCP(dport=1234, sport=random.randint(49152,65535)) / args.message

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import sys
@ -68,10 +68,10 @@ def main():
iface = 'eth0'
while True:
s = str(raw_input('> '))
s = str(eval(input('> ')))
if s == "quit":
break
print s
print(s)
try:
i,ts = p(s,0,[])
pkt = Ether(dst='00:04:00:00:00:00', type=0x1234) / P4calc(op=ts[1].value,
@ -84,13 +84,13 @@ def main():
if resp:
p4calc=resp[P4calc]
if p4calc:
print p4calc.result
print((p4calc.result))
else:
print "cannot find P4calc header in the packet"
print("cannot find P4calc header in the packet")
else:
print "Didn't receive response"
print("Didn't receive response")
except Exception as error:
print error
print(error)
if __name__ == '__main__':

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import struct
@ -15,12 +15,12 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
def handle_pkt(pkt):
print "got a packet"
print("got a packet")
pkt.show2()
# hexdump(pkt)
sys.stdout.flush()
@ -28,7 +28,7 @@ def handle_pkt(pkt):
def main():
iface = 'eth0'
print "sniffing on %s" % iface
print(("sniffing on %s" % iface))
sys.stdout.flush()
sniff(filter="udp and port 4321", iface = iface,
prn = lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import sys
@ -22,14 +22,14 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
def main():
if len(sys.argv)<4:
print 'pass 2 arguments: <destination> "<message>" <duration>'
print('pass 2 arguments: <destination> "<message>" <duration>')
exit(1)
addr = socket.gethostbyname(sys.argv[1])

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
from probe_hdrs import *
@ -11,14 +11,14 @@ def expand(x):
def handle_pkt(pkt):
if ProbeData in pkt:
data_layers = [l for l in expand(pkt) if l.name=='ProbeData']
print ""
print("")
for sw in data_layers:
utilization = 0 if sw.cur_time == sw.last_time else 8.0*sw.byte_cnt/(sw.cur_time - sw.last_time)
print "Switch {} - Port {}: {} Mbps".format(sw.swid, sw.port, utilization)
print(("Switch {} - Port {}: {} Mbps".format(sw.swid, sw.port, utilization)))
def main():
iface = 'eth0'
print "sniffing on {}".format(iface)
print(("sniffing on {}".format(iface)))
sniff(iface = iface,
prn = lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import time
from probe_hdrs import *

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import struct
import os
@ -17,7 +17,7 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
@ -34,16 +34,16 @@ class IPOption_MRI(IPOption):
IntField("", 0),
length_from=lambda pkt:pkt.count*4) ]
def handle_pkt(pkt):
print "got a packet"
print("got a packet")
pkt.show2()
# hexdump(pkt)
sys.stdout.flush()
def main():
ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
ifaces = [i for i in os.listdir('/sys/class/net/') if 'eth' in i]
iface = ifaces[0]
print "sniffing on %s" % iface
print("sniffing on %s" % iface)
sys.stdout.flush()
sniff(filter="tcp", iface = iface,
prn = lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import sys
import socket
@ -17,20 +17,20 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
def main():
if len(sys.argv)<3:
print 'pass 2 arguments: <destination> "<message>"'
print('pass 2 arguments: <destination> "<message>"')
exit(1)
addr = socket.gethostbyname(sys.argv[1])
iface = get_if()
print "sending on interface %s to %s" % (iface, str(addr))
print("sending on interface %s to %s" % (iface, str(addr)))
pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff')
pkt = pkt /IP(dst=addr) / TCP(dport=1234, sport=random.randint(49152,65535)) / sys.argv[2]
pkt.show2()

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import struct
@ -16,7 +16,7 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
@ -40,7 +40,7 @@ class IPOption_MRI(IPOption):
count_from=lambda pkt:(pkt.count*1)) ]
def handle_pkt(pkt):
print "got a packet"
print("got a packet")
pkt.show2()
# hexdump(pkt)
sys.stdout.flush()
@ -48,7 +48,7 @@ def handle_pkt(pkt):
def main():
iface = 'eth0'
print "sniffing on %s" % iface
print("sniffing on %s" % iface)
sys.stdout.flush()
sniff(filter="udp and port 4321", iface = iface,
prn = lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import sys
@ -22,7 +22,7 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
@ -49,7 +49,7 @@ class IPOption_MRI(IPOption):
def main():
if len(sys.argv)<3:
print 'pass 2 arguments: <destination> "<message>"'
print('pass 2 arguments: <destination> "<message>"')
exit(1)
addr = socket.gethostbyname(sys.argv[1])

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
import argparse
import grpc
import os
@ -49,7 +49,7 @@ def writeTunnelRules(p4info_helper, ingress_sw, egress_sw, tunnel_id,
"dst_id": tunnel_id,
})
ingress_sw.WriteTableEntry(table_entry)
print "Installed ingress tunnel rule on %s" % ingress_sw.name
print("Installed ingress tunnel rule on %s" % ingress_sw.name)
# 2) Tunnel Transit Rule
# The rule will need to be added to the myTunnel_exact table and match on
@ -69,7 +69,7 @@ def writeTunnelRules(p4info_helper, ingress_sw, egress_sw, tunnel_id,
# TODO build the transit rule
# TODO install the transit rule on the ingress switch
print "TODO Install transit tunnel rule"
print("TODO Install transit tunnel rule")
# 3) Tunnel Egress Rule
# For our simple topology, the host will always be located on the
@ -87,7 +87,7 @@ def writeTunnelRules(p4info_helper, ingress_sw, egress_sw, tunnel_id,
"port": SWITCH_TO_HOST_PORT
})
egress_sw.WriteTableEntry(table_entry)
print "Installed egress tunnel rule on %s" % egress_sw.name
print("Installed egress tunnel rule on %s" % egress_sw.name)
def readTableRules(p4info_helper, sw):
@ -97,14 +97,14 @@ def readTableRules(p4info_helper, sw):
:param p4info_helper: the P4Info helper
:param sw: the switch connection
"""
print '\n----- Reading tables rules for %s -----' % sw.name
print('\n----- Reading tables rules for %s -----' % sw.name)
for response in sw.ReadTableEntries():
for entity in response.entities:
entry = entity.table_entry
# TODO For extra credit, you can use the p4info_helper to translate
# the IDs in the entry to names
print entry
print '-----'
print(entry)
print('-----')
def printCounter(p4info_helper, sw, counter_name, index):
@ -121,10 +121,10 @@ def printCounter(p4info_helper, sw, counter_name, index):
for response in sw.ReadCounters(p4info_helper.get_counters_id(counter_name), index):
for entity in response.entities:
counter = entity.counter_entry
print "%s %s %d: %d packets (%d bytes)" % (
print("%s %s %d: %d packets (%d bytes)" % (
sw.name, counter_name, index,
counter.data.packet_count, counter.data.byte_count
)
))
def main(p4info_file_path, bmv2_file_path):
# Instantiate a P4Runtime helper from the p4info file
@ -153,10 +153,10 @@ def main(p4info_file_path, bmv2_file_path):
# Install the P4 program on the switches
s1.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on s1"
print("Installed P4 Program using SetForwardingPipelineConfig on s1")
s2.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on s2"
print("Installed P4 Program using SetForwardingPipelineConfig on s2")
# Write the rules that tunnel traffic from h1 to h2
writeTunnelRules(p4info_helper, ingress_sw=s1, egress_sw=s2, tunnel_id=100,
@ -173,14 +173,14 @@ def main(p4info_file_path, bmv2_file_path):
# Print the tunnel counters every 2 seconds
while True:
sleep(2)
print '\n----- Reading tunnel counters -----'
print('\n----- Reading tunnel counters -----')
printCounter(p4info_helper, s1, "MyIngress.ingressTunnelCounter", 100)
printCounter(p4info_helper, s2, "MyIngress.egressTunnelCounter", 100)
printCounter(p4info_helper, s2, "MyIngress.ingressTunnelCounter", 200)
printCounter(p4info_helper, s1, "MyIngress.egressTunnelCounter", 200)
except KeyboardInterrupt:
print " Shutting down."
print(" Shutting down.")
except grpc.RpcError as e:
printGrpcError(e)
@ -198,10 +198,10 @@ if __name__ == '__main__':
if not os.path.exists(args.p4info):
parser.print_help()
print "\np4info file not found: %s\nHave you run 'make'?" % args.p4info
print("\np4info file not found: %s\nHave you run 'make'?" % args.p4info)
parser.exit(1)
if not os.path.exists(args.bmv2_json):
parser.print_help()
print "\nBMv2 JSON file not found: %s\nHave you run 'make'?" % args.bmv2_json
print("\nBMv2 JSON file not found: %s\nHave you run 'make'?" % args.bmv2_json)
parser.exit(1)
main(args.p4info, args.bmv2_json)

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
import argparse
import grpc
import os
@ -48,7 +48,7 @@ def writeTunnelRules(p4info_helper, ingress_sw, egress_sw, tunnel_id,
"dst_id": tunnel_id,
})
ingress_sw.WriteTableEntry(table_entry)
print "Installed ingress tunnel rule on %s" % ingress_sw.name
print("Installed ingress tunnel rule on %s" % ingress_sw.name)
# 2) Tunnel Transit Rule
# The rule will need to be added to the myTunnel_exact table and match on
@ -78,7 +78,7 @@ def writeTunnelRules(p4info_helper, ingress_sw, egress_sw, tunnel_id,
"port": SWITCH_TO_SWITCH_PORT
})
ingress_sw.WriteTableEntry(table_entry)
print "Installed transit tunnel rule on %s" % ingress_sw.name
print("Installed transit tunnel rule on %s" % ingress_sw.name)
# 3) Tunnel Egress Rule
# For our simple topology, the host will always be located on the
@ -96,7 +96,7 @@ def writeTunnelRules(p4info_helper, ingress_sw, egress_sw, tunnel_id,
"port": SWITCH_TO_HOST_PORT
})
egress_sw.WriteTableEntry(table_entry)
print "Installed egress tunnel rule on %s" % egress_sw.name
print("Installed egress tunnel rule on %s" % egress_sw.name)
def readTableRules(p4info_helper, sw):
@ -106,24 +106,24 @@ def readTableRules(p4info_helper, sw):
:param p4info_helper: the P4Info helper
:param sw: the switch connection
"""
print '\n----- Reading tables rules for %s -----' % sw.name
print('\n----- Reading tables rules for %s -----' % sw.name)
for response in sw.ReadTableEntries():
for entity in response.entities:
entry = entity.table_entry
# TODO For extra credit, you can use the p4info_helper to translate
# the IDs in the entry to names
table_name = p4info_helper.get_tables_name(entry.table_id)
print '%s: ' % table_name,
print('%s: ' % table_name, end=' ')
for m in entry.match:
print p4info_helper.get_match_field_name(table_name, m.field_id),
print '%r' % (p4info_helper.get_match_field_value(m),),
print(p4info_helper.get_match_field_name(table_name, m.field_id), end=' ')
print('%r' % (p4info_helper.get_match_field_value(m),), end=' ')
action = entry.action.action
action_name = p4info_helper.get_actions_name(action.action_id)
print '->', action_name,
print('->', action_name, end=' ')
for p in action.params:
print p4info_helper.get_action_param_name(action_name, p.param_id),
print '%r' % p.value,
print
print(p4info_helper.get_action_param_name(action_name, p.param_id), end=' ')
print('%r' % p.value, end=' ')
print()
def printCounter(p4info_helper, sw, counter_name, index):
"""
@ -139,17 +139,17 @@ def printCounter(p4info_helper, sw, counter_name, index):
for response in sw.ReadCounters(p4info_helper.get_counters_id(counter_name), index):
for entity in response.entities:
counter = entity.counter_entry
print "%s %s %d: %d packets (%d bytes)" % (
print("%s %s %d: %d packets (%d bytes)" % (
sw.name, counter_name, index,
counter.data.packet_count, counter.data.byte_count
)
))
def printGrpcError(e):
print "gRPC Error:", e.details(),
print("gRPC Error:", e.details(), end=' ')
status_code = e.code()
print "(%s)" % status_code.name,
print("(%s)" % status_code.name, end=' ')
traceback = sys.exc_info()[2]
print "[%s:%d]" % (traceback.tb_frame.f_code.co_filename, traceback.tb_lineno)
print("[%s:%d]" % (traceback.tb_frame.f_code.co_filename, traceback.tb_lineno))
def main(p4info_file_path, bmv2_file_path):
# Instantiate a P4Runtime helper from the p4info file
@ -178,10 +178,10 @@ def main(p4info_file_path, bmv2_file_path):
# Install the P4 program on the switches
s1.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on s1"
print("Installed P4 Program using SetForwardingPipelineConfig on s1")
s2.SetForwardingPipelineConfig(p4info=p4info_helper.p4info,
bmv2_json_file_path=bmv2_file_path)
print "Installed P4 Program using SetForwardingPipelineConfig on s2"
print("Installed P4 Program using SetForwardingPipelineConfig on s2")
# Write the rules that tunnel traffic from h1 to h2
writeTunnelRules(p4info_helper, ingress_sw=s1, egress_sw=s2, tunnel_id=100,
@ -198,14 +198,14 @@ def main(p4info_file_path, bmv2_file_path):
# Print the tunnel counters every 2 seconds
while True:
sleep(2)
print '\n----- Reading tunnel counters -----'
print('\n----- Reading tunnel counters -----')
printCounter(p4info_helper, s1, "MyIngress.ingressTunnelCounter", 100)
printCounter(p4info_helper, s2, "MyIngress.egressTunnelCounter", 100)
printCounter(p4info_helper, s2, "MyIngress.ingressTunnelCounter", 200)
printCounter(p4info_helper, s1, "MyIngress.egressTunnelCounter", 200)
except KeyboardInterrupt:
print " Shutting down."
print(" Shutting down.")
except grpc.RpcError as e:
printGrpcError(e)
@ -223,10 +223,10 @@ if __name__ == '__main__':
if not os.path.exists(args.p4info):
parser.print_help()
print "\np4info file not found: %s\nHave you run 'make'?" % args.p4info
print("\np4info file not found: %s\nHave you run 'make'?" % args.p4info)
parser.exit(1)
if not os.path.exists(args.bmv2_json):
parser.print_help()
print "\nBMv2 JSON file not found: %s\nHave you run 'make'?" % args.bmv2_json
print("\nBMv2 JSON file not found: %s\nHave you run 'make'?" % args.bmv2_json)
parser.exit(1)
main(args.p4info, args.bmv2_json)

View File

@ -1,18 +1,18 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
from scapy.all import sniff, get_if_list
def handle_pkt(pkt):
print "got a packet"
print("got a packet")
pkt.show2()
sys.stdout.flush()
def main():
iface = 'eth0'
print "sniffing on %s" % iface
print("sniffing on %s" % iface)
sys.stdout.flush()
sniff(iface=iface, prn=lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import socket
@ -16,7 +16,7 @@ def get_if():
iface = i
break
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import sys
import struct
@ -16,7 +16,7 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
@ -33,7 +33,7 @@ class IPOption_MRI(IPOption):
IntField("", 0),
length_from=lambda pkt:pkt.count*4) ]
def handle_pkt(pkt):
print "got a packet"
print("got a packet")
pkt.show2()
# hexdump(pkt)
sys.stdout.flush()
@ -50,7 +50,7 @@ bind_layers(SourceRoute, SourceRoutingTail, bos=1)
def main():
iface = 'eth0'
print "sniffing on %s" % iface
print("sniffing on %s" % iface)
sys.stdout.flush()
sniff(filter="udp and port 4321", iface = iface,
prn = lambda x: handle_pkt(x))

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import argparse
import sys
import socket
@ -19,7 +19,7 @@ def get_if():
iface=i
break;
if not iface:
print "Cannot find eth0 interface"
print("Cannot find eth0 interface")
exit(1)
return iface
@ -34,20 +34,20 @@ bind_layers(SourceRoute, IP, bos=1)
def main():
if len(sys.argv)<2:
print 'pass 2 arguments: <destination>'
print('pass 2 arguments: <destination>')
exit(1)
addr = socket.gethostbyname(sys.argv[1])
iface = get_if()
print "sending on interface %s to %s" % (iface, str(addr))
print("sending on interface %s to %s" % (iface, str(addr)))
while True:
print
s = str(raw_input('Type space separated port nums '
print()
s = str(input('Type space separated port nums '
'(example: "2 3 2 2 1") or "q" to quit: '))
if s == "q":
break;
print
print()
i = 0
pkt = Ether(src=get_if_hwaddr(iface), dst='ff:ff:ff:ff:ff:ff');

View File

@ -32,7 +32,7 @@ endif
all: run
run: build
sudo python $(RUN_SCRIPT) -t $(TOPO) $(run_args)
sudo python3 $(RUN_SCRIPT) -t $(TOPO) $(run_args)
stop:
sudo mn -c

View File

@ -25,7 +25,7 @@ class AppController:
assert entries
if sw: thrift_port = sw.thrift_port
print '\n'.join(entries)
print('\n'.join(entries))
p = subprocess.Popen(['simple_switch_CLI', '--thrift-port', str(thrift_port)], stdin=subprocess.PIPE)
p.communicate(input='\n'.join(entries))
@ -33,8 +33,8 @@ class AppController:
if sw: thrift_port = sw.thrift_port
p = subprocess.Popen(['simple_switch_CLI', '--thrift-port', str(thrift_port)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(input="register_read %s %d" % (register, idx))
reg_val = filter(lambda l: ' %s[%d]' % (register, idx) in l, stdout.split('\n'))[0].split('= ', 1)[1]
return long(reg_val)
reg_val = [l for l in stdout.split('\n') if ' %s[%d]' % (register, idx) in l][0].split('= ', 1)[1]
return int(reg_val)
def start(self):
shortestpath = ShortestPath(self.links)
@ -54,7 +54,7 @@ class AppController:
for host_name in self.topo._host_links:
h = self.net.get(host_name)
for link in self.topo._host_links[host_name].values():
for link in list(self.topo._host_links[host_name].values()):
sw = link['sw']
#entries[sw].append('table_add send_frame rewrite_mac %d => %s' % (link['sw_port'], link['sw_mac']))
#entries[sw].append('table_add forward set_dmac %s => %s' % (link['host_ip'], link['host_mac']))
@ -70,7 +70,7 @@ class AppController:
h.setDefaultRoute("via %s" % link['sw_ip'])
for h in self.net.hosts:
h_link = self.topo._host_links[h.name].values()[0]
h_link = list(self.topo._host_links[h.name].values())[0]
for sw in self.net.switches:
path = shortestpath.get(sw.name, h.name, exclude=lambda n: n[0]=='h')
if not path: continue
@ -85,20 +85,20 @@ class AppController:
path = shortestpath.get(h.name, h2.name, exclude=lambda n: n[0]=='h')
if not path: continue
h_link = self.topo._host_links[h.name][path[1]]
h2_link = self.topo._host_links[h2.name].values()[0]
h2_link = list(self.topo._host_links[h2.name].values())[0]
h.cmd('ip route add %s via %s' % (h2_link['host_ip'], h_link['sw_ip']))
print "**********"
print "Configuring entries in p4 tables"
print("**********")
print("Configuring entries in p4 tables")
for sw_name in entries:
print
print "Configuring switch... %s" % sw_name
print()
print("Configuring switch... %s" % sw_name)
sw = self.net.get(sw_name)
if entries[sw_name]:
self.add_entries(sw=sw, entries=entries[sw_name])
print "Configuration complete."
print "**********"
print("Configuration complete.")
print("**********")
def stop(self):
pass

View File

@ -6,9 +6,9 @@ class AppTopo(Topo):
log_dir="/tmp", bws={}, **opts):
Topo.__init__(self, **opts)
nodes = sum(map(list, zip(*links)), [])
host_names = sorted(list(set(filter(lambda n: n[0] == 'h', nodes))))
sw_names = sorted(list(set(filter(lambda n: n[0] == 's', nodes))))
nodes = sum(list(map(list, list(zip(*links)))), [])
host_names = sorted(list(set([n for n in nodes if n[0] == 'h'])))
sw_names = sorted(list(set([n for n in nodes if n[0] == 's'])))
sw_ports = dict([(sw, []) for sw in sw_names])
self._host_links = {}
@ -23,7 +23,7 @@ class AppTopo(Topo):
self.addHost(host_name)
self._host_links[host_name] = {}
host_links = filter(lambda l: l[0]==host_name or l[1]==host_name, links)
host_links = [l for l in links if l[0]==host_name or l[1]==host_name]
sw_idx = 0
for link in host_links:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
@ -84,7 +84,7 @@ def main():
conf = manifest['targets'][args.target]
params = conf['parameters'] if 'parameters' in conf else {}
os.environ.update(dict(map(lambda (k,v): (k, str(v)), params.iteritems())))
os.environ.update(dict([(k_v[0], str(k_v[1])) for k_v in iter(params.items())]))
def formatParams(s):
for param in params:
@ -124,7 +124,7 @@ def main():
latencies[host_name+other] = host['latency']
for l in latencies:
if isinstance(latencies[l], (str, unicode)):
if isinstance(latencies[l], str):
latencies[l] = formatParams(latencies[l])
else:
latencies[l] = str(latencies[l]) + "ms"
@ -160,7 +160,7 @@ def main():
if args.cli_message is not None:
with open(args.cli_message, 'r') as message_file:
print message_file.read()
print(message_file.read())
if args.cli or ('cli' in conf and conf['cli']):
CLI(net)
@ -176,16 +176,16 @@ def main():
return cmd
def _wait_for_exit(p, host):
print p.communicate()
print(p.communicate())
if p.returncode is None:
p.wait()
print p.communicate()
print(p.communicate())
return_codes.append(p.returncode)
if host_name in stdout_files:
stdout_files[host_name].flush()
stdout_files[host_name].close()
print '\n'.join(map(lambda (k,v): "%s: %s"%(k,v), params.iteritems())) + '\n'
print('\n'.join(["%s: %s"%(k_v1[0],k_v1[1]) for k_v1 in iter(params.items())]) + '\n')
for host_name in sorted(conf['hosts'].keys()):
host = conf['hosts'][host_name]
@ -195,7 +195,7 @@ def main():
stdout_filename = os.path.join(args.log_dir, h.name + '.stdout')
stdout_files[h.name] = open(stdout_filename, 'w')
cmd = formatCmd(host['cmd'])
print h.name, cmd
print(h.name, cmd)
p = h.popen(cmd, stdout=stdout_files[h.name], shell=True, preexec_fn=os.setpgrp)
if 'startup_sleep' in host: sleep(host['startup_sleep'])

View File

@ -39,16 +39,16 @@ class P4Host(Host):
return r
def describe(self, sw_addr=None, sw_mac=None):
print "**********"
print "Network configuration for: %s" % self.name
print "Default interface: %s\t%s\t%s" %(
print("**********")
print("Network configuration for: %s" % self.name)
print("Default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
)
))
if sw_addr is not None or sw_mac is not None:
print "Default route to switch: %s (%s)" % (sw_addr, sw_mac)
print "**********"
print("Default route to switch: %s (%s)" % (sw_addr, sw_mac))
print("**********")
class P4Switch(Switch):
"""P4 virtual switch"""
@ -113,7 +113,7 @@ class P4Switch(Switch):
"Start up a new P4 switch"
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
for port, intf in list(self.intfs.items()):
if not intf.IP():
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
@ -64,11 +64,11 @@ class SingleSwitchTopo(Topo):
enable_debugger = False,
pcap_dump = pcap_dump)
for h in xrange(n):
for h in range(n):
host = self.addHost('h%d' % (h + 1),
ip = "10.0.%d.10/24" % h,
mac = '00:04:00:00:00:%02x' %h)
print "Adding host", str(host)
print("Adding host", str(host))
self.addLink(host, switch)
def main():
@ -88,11 +88,11 @@ def main():
net.start()
sw_mac = ["00:aa:bb:00:00:%02x" % n for n in xrange(num_hosts)]
sw_mac = ["00:aa:bb:00:00:%02x" % n for n in range(num_hosts)]
sw_addr = ["10.0.%d.1" % n for n in xrange(num_hosts)]
sw_addr = ["10.0.%d.1" % n for n in range(num_hosts)]
for n in xrange(num_hosts):
for n in range(num_hosts):
h = net.get('h%d' % (n + 1))
if mode == "l2":
h.setDefaultRoute("dev %s" % h.defaultIntf().name)
@ -100,30 +100,30 @@ def main():
h.setARP(sw_addr[n], sw_mac[n])
h.setDefaultRoute("dev %s via %s" % (h.defaultIntf().name, sw_addr[n]))
for n in xrange(num_hosts):
for n in range(num_hosts):
h = net.get('h%d' % (n + 1))
h.describe(sw_addr[n], sw_mac[n])
sleep(1)
if args.switch_config is not None:
print
print "Reading switch configuration script:", args.switch_config
print()
print("Reading switch configuration script:", args.switch_config)
with open(args.switch_config, 'r') as config_file:
switch_config = config_file.read()
print "Configuring switch..."
print("Configuring switch...")
proc = Popen(["simple_switch_CLI"], stdin=PIPE)
proc.communicate(input=switch_config)
print "Configuration complete."
print
print("Configuration complete.")
print()
print "Ready !"
print("Ready !")
if args.cli_message is not None:
with open(args.cli_message, 'r') as message_file:
print message_file.read()
print(message_file.read())
CLI( net )
net.stop()

View File

@ -45,14 +45,14 @@ class P4Host(Host):
return r
def describe(self):
print "**********"
print self.name
print "default interface: %s\t%s\t%s" %(
print("**********")
print(self.name)
print("default interface: %s\t%s\t%s" %(
self.defaultIntf().name,
self.defaultIntf().IP(),
self.defaultIntf().MAC()
)
print "**********"
))
print("**********")
class P4Switch(Switch):
"""P4 virtual switch"""
@ -120,7 +120,7 @@ class P4Switch(Switch):
"Start up a new P4 switch"
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
for port, intf in list(self.intfs.items()):
if not intf.IP():
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -13,7 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
from collections import OrderedDict
@ -76,7 +76,7 @@ def read_manifest(manifest_file):
elif 'default-target' in manifest:
chosen_target = manifest['default-target']
else:
chosen_target = manifest['targets'].keys()[0]
chosen_target = list(manifest['targets'].keys())[0]
if chosen_target not in manifest['targets']:
log_error('Target not found in manifest:', chosen_target)
@ -188,7 +188,7 @@ def run_mininet(manifest):
switch_args.append('--json "%s"' % output_file)
program = '"%s/mininet/single_switch_mininet.py"' % sys.path[0]
return run_command('python2 %s %s' % (program, ' '.join(switch_args)))
return run_command('python3 %s %s' % (program, ' '.join(switch_args)))
def run_multiswitch(manifest):
output_file = run_compile_bmv2(manifest)
@ -240,7 +240,7 @@ def run_multiswitch(manifest):
script_args.append('--cli-message "%s"' % message_file)
program = '"%s/mininet/multi_switch_mininet.py"' % sys.path[0]
return run_command('python2 %s %s' % (program, ' '.join(script_args)))
return run_command('python3 %s %s' % (program, ' '.join(script_args)))
def run_stf(manifest):
output_file = run_compile_bmv2(manifest)
@ -257,7 +257,7 @@ def run_stf(manifest):
stf_args.append(os.path.join(args.build_dir, stf_file))
program = '"%s/stf/bmv2stf.py"' % sys.path[0]
rv = run_command('python2 %s %s' % (program, ' '.join(stf_args)))
rv = run_command('python3 %s %s' % (program, ' '.join(stf_args)))
if rv != 0:
sys.exit(1)
return rv
@ -273,7 +273,7 @@ def run_custom(manifest):
log_error('No mininet program file provided.')
sys.exit(1)
program = manifest.target_config['program']
rv = run_command('%s python2 %s %s' % (python_path, program, ' '.join(script_args)))
rv = run_command('%s python3 %s %s' % (python_path, program, ' '.join(script_args)))
if rv != 0:
sys.exit(1)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from switch import SwitchConnection
from .switch import SwitchConnection
from p4.tmp import p4config_pb2
@ -21,7 +21,7 @@ def buildDeviceConfig(bmv2_json_file_path=None):
device_config = p4config_pb2.P4DeviceConfig()
device_config.reassign = True
with open(bmv2_json_file_path) as f:
device_config.device_data = f.read()
device_config.device_data = f.read().encode('utf-8')
return device_config

View File

@ -29,10 +29,10 @@ def matchesMac(mac_addr_string):
return mac_pattern.match(mac_addr_string) is not None
def encodeMac(mac_addr_string):
return mac_addr_string.replace(':', '').decode('hex')
return bytes.fromhex(mac_addr_string.replace(':', ''))
def decodeMac(encoded_mac_addr):
return ':'.join(s.encode('hex') for s in encoded_mac_addr)
return ':'.join(s.hex() for s in encoded_mac_addr)
ip_pattern = re.compile('^(\d{1,3}\.){3}(\d{1,3})$')
def matchesIPv4(ip_addr_string):
@ -52,10 +52,10 @@ def encodeNum(number, bitwidth):
num_str = '%x' % number
if number >= 2 ** bitwidth:
raise Exception("Number, %d, does not fit in %d bits" % (number, bitwidth))
return ('0' * (byte_len * 2 - len(num_str)) + num_str).decode('hex')
return bytes.fromhex('0' * (byte_len * 2 - len(num_str)) + num_str)
def decodeNum(encoded_number):
return int(encoded_number.encode('hex'), 16)
return int(encoded_number.hex(), 16)
def encode(x, bitwidth):
'Tries to infer the type of `x` and encode it'
@ -116,4 +116,4 @@ if __name__ == '__main__':
enc_num = encodeNum(num, 8)
raise Exception("expected exception")
except Exception as e:
print e
print(e)

View File

@ -73,20 +73,20 @@ def parseGrpcErrorBinaryDetails(grpc_error):
# batch) in order to print error code + user-facing message. See P4Runtime
# documentation for more details on error-reporting.
def printGrpcError(grpc_error):
print "gRPC Error", grpc_error.details(),
print("gRPC Error", grpc_error.details(), end=' ')
status_code = grpc_error.code()
print "({})".format(status_code.name),
print("({})".format(status_code.name), end=' ')
traceback = sys.exc_info()[2]
print "[{}:{}]".format(
traceback.tb_frame.f_code.co_filename, traceback.tb_lineno)
print("[{}:{}]".format(
traceback.tb_frame.f_code.co_filename, traceback.tb_lineno))
if status_code != grpc.StatusCode.UNKNOWN:
return
p4_errors = parseGrpcErrorBinaryDetails(grpc_error)
if p4_errors is None:
return
print "Errors in batch:"
print("Errors in batch:")
for idx, p4_error in p4_errors:
code_name = code_pb2._CODE.values_by_number[
p4_error.canonical_code].name
print "\t* At index {}: {}, '{}'\n".format(
idx, code_name, p4_error.message)
print("\t* At index {}: {}, '{}'\n".format(
idx, code_name, p4_error.message))

View File

@ -18,7 +18,7 @@ import google.protobuf.text_format
from p4.v1 import p4runtime_pb2
from p4.config.v1 import p4info_pb2
from convert import encode
from .convert import encode
class P4InfoHelper(object):
def __init__(self, p4_info_filepath):
@ -173,7 +173,7 @@ class P4InfoHelper(object):
if match_fields:
table_entry.match.extend([
self.get_match_field_pb(table_name, match_field_name, value)
for match_field_name, value in match_fields.iteritems()
for match_field_name, value in match_fields.items()
])
if default_action:
@ -185,7 +185,7 @@ class P4InfoHelper(object):
if action_params:
action.params.extend([
self.get_action_param_pb(action_name, field_name, value)
for field_name, value in action_params.iteritems()
for field_name, value in action_params.items()
])
return table_entry

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
#
# Copyright 2017-present Open Networking Foundation
#
@ -19,15 +19,15 @@ import json
import os
import sys
import bmv2
import helper
from . import bmv2
from . import helper
def error(msg):
print >> sys.stderr, ' - ERROR! ' + msg
print(' - ERROR! ' + msg, file=sys.stderr)
def info(msg):
print >> sys.stdout, ' - ' + msg
print(' - ' + msg, file=sys.stdout)
class ConfException(Exception):
@ -165,16 +165,13 @@ def insertTableEntry(sw, flow, p4info_helper):
sw.WriteTableEntry(table_entry)
# object hook for josn library, use str instead of unicode object
# https://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-from-json
def json_load_byteified(file_handle):
return _byteify(json.load(file_handle, object_hook=_byteify),
ignore_dicts=True)
return json.load(file_handle)
def _byteify(data, ignore_dicts=False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
if isinstance(data, str):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
@ -184,7 +181,7 @@ def _byteify(data, ignore_dicts=False):
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
for key, value in data.items()
}
# if it's anything else, return it in its original form
return data

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from Queue import Queue
from queue import Queue
from abc import abstractmethod
from datetime import datetime
@ -63,7 +63,7 @@ class SwitchConnection(object):
request.arbitration.election_id.low = 1
if dry_run:
print "P4Runtime MasterArbitrationUpdate: ", request
print("P4Runtime MasterArbitrationUpdate: ", request)
else:
self.requests_stream.put(request)
for item in self.stream_msg_resp:
@ -81,7 +81,7 @@ class SwitchConnection(object):
request.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
if dry_run:
print "P4Runtime SetForwardingPipelineConfig:", request
print("P4Runtime SetForwardingPipelineConfig:", request)
else:
self.client_stub.SetForwardingPipelineConfig(request)
@ -96,7 +96,7 @@ class SwitchConnection(object):
update.type = p4runtime_pb2.Update.INSERT
update.entity.table_entry.CopyFrom(table_entry)
if dry_run:
print "P4Runtime Write:", request
print("P4Runtime Write:", request)
else:
self.client_stub.Write(request)
@ -110,7 +110,7 @@ class SwitchConnection(object):
else:
table_entry.table_id = 0
if dry_run:
print "P4Runtime Read:", request
print("P4Runtime Read:", request)
else:
for response in self.client_stub.Read(request):
yield response
@ -127,7 +127,7 @@ class SwitchConnection(object):
if index is not None:
counter_entry.index.index = index
if dry_run:
print "P4Runtime Read:", request
print("P4Runtime Read:", request)
else:
for response in self.client_stub.Read(request):
yield response
@ -141,7 +141,7 @@ class SwitchConnection(object):
update.type = p4runtime_pb2.Update.INSERT
update.entity.packet_replication_engine_entry.CopyFrom(pre_entry)
if dry_run:
print "P4Runtime Write:", request
print("P4Runtime Write:", request)
else:
self.client_stub.Write(request)

View File

@ -100,7 +100,7 @@ class P4RuntimeSwitch(P4Switch):
def start(self, controllers):
info("Starting P4 switch {}.\n".format(self.name))
args = [self.sw_path]
for port, intf in self.intfs.items():
for port, intf in list(self.intfs.items()):
if not intf.IP():
args.extend(['-i', str(port) + "@" + intf.name])
if self.pcap_dump:

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@ -45,7 +45,7 @@ def configureP4Switch(**switch_args):
P4RuntimeSwitch.__init__(self, *opts, **kwargs)
def describe(self):
print "%s -> gRPC port: %d" % (self.name, self.grpc_port)
print("%s -> gRPC port: %d" % (self.name, self.grpc_port))
return ConfiguredP4RuntimeSwitch
else:
@ -59,7 +59,7 @@ def configureP4Switch(**switch_args):
P4Switch.__init__(self, *opts, **kwargs)
def describe(self):
print "%s -> Thrift port: %d" % (self.name, self.thrift_port)
print("%s -> Thrift port: %d" % (self.name, self.thrift_port))
return ConfiguredP4Switch
@ -79,7 +79,7 @@ class ExerciseTopo(Topo):
else:
switch_links.append(link)
for sw, params in switches.iteritems():
for sw, params in switches.items():
if "program" in params:
switchClass = configureP4Switch(
sw_path=bmv2_exe,
@ -143,7 +143,7 @@ class ExerciseRunner:
def format_latency(self, l):
""" Helper method for parsing link latencies from the topology json. """
if isinstance(l, (str, unicode)):
if isinstance(l, str):
return l
else:
return str(l) + "ms"
@ -297,7 +297,7 @@ class ExerciseRunner:
P4Runtime, depending if any command or runtime JSON files were
provided for the switches.
"""
for sw_name, sw_dict in self.switches.iteritems():
for sw_name, sw_dict in self.switches.items():
if 'cli_input' in sw_dict:
self.program_switch_cli(sw_name, sw_dict)
if 'runtime_json' in sw_dict:
@ -306,7 +306,7 @@ class ExerciseRunner:
def program_hosts(self):
""" Execute any commands provided in the topology.json file on each Mininet host
"""
for host_name, host_info in self.hosts.items():
for host_name, host_info in list(self.hosts.items()):
h = self.net.get(host_name)
if "commands" in host_info:
for cmd in host_info["commands"]:

189
vm-ubuntu-20.04/README.md Normal file
View File

@ -0,0 +1,189 @@
# Creating the VM
Start creating a brand new VM by running `vagrant up` in this
directory (install vagrant on your system if needed). It can take one
to several hours, depending upon the speed of your computer and
Internet connection.
Steps taken to prepare a VM _after_ running `vagrant up` on the host
OS. Some of these could probably be automated with programs, and
changes to the `vagrant up` scripts that can do so are welcome. I did
them manually to create a VM image simply to avoid the experimentation
and time required to automate them, since I do not expect to create a
new VM very often (a couple of times per year?).
+ Log in as user p4 (password p4)
+ Click "Upgrade" in the pop-up window asking if you want to upgrade
the system, if asked. This will download the latest Linux Linux
kernel version released for Ubuntu 20.04, and other updated
packages.
+ Reboot the system.
+ Use `sudo apt purge <list of packages>` to remove older version of
Linux kernel, if the upgrade installed a newer one.
+ `sudo apt clean`
+ Log in as user p4 (password p4)
+ Start menu -> Preferences -> LXQt settings -> Monitor settings
+ Change resolution from initial 800x600 to 1024x768. Apply the changes.
+ Close monitor settings window
+ Note: For some reason I do not know, these settings seem to be
undone, even if I use the "Save" button. They are temporarily in
effect if I shut down the system and log back in, but then in a few
seconds it switches back to 800x600. Strange.
+ Start menu -> Preferences -> LXQt settings -> Desktop
+ In "Wallpaper mode" popup menu, choose "Center on the screen".
+ Click Apply button
+ Close "Desktop preferences" window
+ Several of the icons on the desktop have an exclamation mark on
them. If you try double-clicking those icons, it pops up a window
saying "This file 'Sublime Text' seems to be a desktop entry. What
do you want to do with it?" with buttons for "Open", "Execute", and
"Cancel". Clicking "Open" causes the file to be opened using the
Atom editor. Clicking "Execute" executes the associated command.
If you do a mouse middle click on one of these desktop icons, a
popup menu appears where the second-to-bottom choice is "Trust this
executable". Selecting that causes the exclamation mark to go away,
and future double-clicks of the icon execute the program without
first popping up a window to choose between Open/Execute/Cancel. I
did that for each of these desktop icons:
+ Sublime Text
+ Terminal
+ Wireshark
+ cd tutorials
+ `git remote add jafingerhut https://github.com/jafingerhut/tutorials`
+ `git pull jafingerhut`
+ `git checkout jafingerhut/add-2021-mar-vm-based-on-ubuntu-20.04`
+ The above commands change to a branch that includes changes for
using Python3, and hopefully removes all traces of using Python2.
This is relatively new as of March 2021, and there may be bugs
remaining to be found.
+ Log off
+ Log in as user vagrant (password vagrant)
+ Change monitor settings and wallpaper mode as described above for
user p4.
+ Open a terminal.
+ Run the command `./clean.sh`, which removes about 6 to 7 GBytes of
files created while building the projects.
+ Log off
# Notes on test results for the VM
## p4c testing results
Steps to run the p4c tests:
+ Log in as user vagrant (password vagrant)
+ In a new terminal, execute these commands:
```bash
# Compile p4c again from source, since the clean.sh step reduced disk
# space by deleting the p4c/build directory.
git clone https://github.com/jafingerhut/p4-guide
cd p4c
~/p4-guide/bin/build-p4c.sh
# Run the p4c tests
cd build
make -j2 check |& tee make-check-out.txt
```
As of 2021-09-07, the p4c compiler passes all but 61 of its included
tests.
The test named cpplint fails because Python2 is not installed on the
system. Omitting Python2 is intentional for this VM. The cpplint
test passes fine on other systems that have Python2 installed.
There are 60 tests whose names begin with 'ebpf' and 'ubpf' that fail.
They work fine in the continuous integration tests on the
https://github.com/p4lang/p4c project, because the VM used to run
those tests has additional software installed to enable it. Perhaps
future versions of this VM will enable the ebpf and ubpf back ends to
pass these tests, also. Contributions are welcome to the needed
changes in the VM build scripts to enable this.
## Send ping packets in the solution to `basic` exercise of `p4lang/tutorials` repository
With the branch of the p4lang/tutorials repository included with this
VM, the following tests pass. More testing and/or bug fixes is
welcome here.
First log in as the user `p4` (password `p4`) and open a terminal
window.
```bash
$ cd tutorials/exercises/basic
$ cp solution/basic.p4 basic.p4
$ make run
```
If at the end of many lines of logging output you see a prompt
`mininet>`, you can try entering the command `h1 ping h2` to ping from
virtual host `h1` in the exercise to `h2`, and it should report a
successful ping every second. It will not stop on its own. You can
type Control-C to stop it and return to the `mininet>` prompt, and you
can type Control-D to exit from mininet and get back to the original
shell prompt. To ensure that any processes started by the above steps
are terminated, you can run this command:
```bash
$ make stop
```
# Creating a single file image of the VM
For the particular case of creating the VM named 'P4 Tutorial
2021-09-07' on September 7, 2021, here were the host OS details, in
case it turns out that matters to the finished VM image for some
reason:
+ macOS 10.14.6
+ VirtualBox 6.1.26 r145957
+ Vagrant 2.2.16
In the VirtualBox GUI interface:
+ Choose menu item File -> Export Appliance ...
+ Select the VM named 'P4 Tutorial 2021-09-07' and click Continue button
+ Format
+ I used: Open Virtualization Format 1.0
+ Other available options were:
+ Open Virtualization Format 0.9
+ Open Virtualization Format 2.0
+ Target file
+ I used: /Users/andy/Documents/P4 Tutorial 2021-09-07.ova
+ Mac Address Policy
+ I used: Include only NAT network adapter MAC addresses
+ Other available options were:
+ Include all network adapter MAC addresses
+ Strip all network adapter MAC addresses
+ Additionally
+ Write Manifest file: checked
+ Include ISO image files: unchecked
Clicked "Continue" button.
Virtual system settings:
+ Name: P4 Tutorial 2021-09-07
+ Product: I left this blank
+ Product-URL: I left this blank
+ Vendor: P4.org - P4 Language Consortium
+ Vendor-URL: https://p4.org
+ Version: 2021-09-07
+ Description:
```
Open source P4 development tools built from latest source code as of 2021-Sep-07 and packaged into an Ubuntu 20.04 Desktop Linux VM for the AMD64 architecture.
```
+ License
```
Open source code available hosted at https://github.com/p4lang is released under the Apache 2.0 license. Libraries it depends upon, such as Protobuf, Thrift, gRPC, Ubuntu Linux, etc. are released under their own licenses.
```
Clicked "Export" button.

34
vm-ubuntu-20.04/Vagrantfile vendored Normal file
View File

@ -0,0 +1,34 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure(2) do |config|
config.vm.box = "bento/ubuntu-20.04"
config.vm.define "p4-tutorial" do |tutorial|
end
config.vm.provider "virtualbox" do |vb|
vb.name = "P4 Tutorial" + Time.now.strftime(" %Y-%m-%d")
vb.gui = true
vb.memory = 2048
vb.cpus = 2
vb.customize ["modifyvm", :id, "--cableconnected1", "on"]
vb.customize ["storageattach", :id,
"--storagectl", "IDE Controller",
"--port", "0", "--device", "0",
"--type", "dvddrive",
"--medium", "emptydrive"]
vb.customize ["modifyvm", :id, "--vram", "32"]
end
config.vm.synced_folder '.', '/vagrant', disabled: true
config.vm.hostname = "p4"
config.vm.provision "file", source: "p4-logo.png", destination: "/home/vagrant/p4-logo.png"
config.vm.provision "file", source: "p4_16-mode.el", destination: "/home/vagrant/p4_16-mode.el"
config.vm.provision "file", source: "p4.vim", destination: "/home/vagrant/p4.vim"
config.vm.provision "file", source: "py3localpath.py", destination: "/home/vagrant/py3localpath.py"
config.vm.provision "file", source: "patches/disable-Wno-error-and-other-small-changes.diff", destination: "/home/vagrant/patches/disable-Wno-error-and-other-small-changes.diff"
config.vm.provision "file", source: "patches/behavioral-model-use-correct-libssl-pkg.patch", destination: "/home/vagrant/patches/behavioral-model-use-correct-libssl-pkg.patch"
config.vm.provision "file", source: "patches/mininet-dont-install-python2.patch", destination: "/home/vagrant/patches/mininet-dont-install-python2.patch"
config.vm.provision "file", source: "clean.sh", destination: "/home/vagrant/clean.sh"
config.vm.provision "shell", path: "root-bootstrap.sh"
config.vm.provision "shell", privileged: false, path: "user-bootstrap.sh"
end

50
vm-ubuntu-20.04/clean.sh Executable file
View File

@ -0,0 +1,50 @@
#! /bin/bash
# To reduce disk space used by the virtual machine, delete many build
# files created during execution of root-bootstrap.sh and
# user-bootstrap.sh scripts.
# This script is _not_ automatically run during creation of the VM, so
# that if anything goes wrong during the build, all of the resulting
# files are left behind for examination.
DF1_BEFORE=`df -h .`
DF2_BEFORE=`df -BM .`
cd protobuf
make clean
cd ..
cd grpc
make clean
cd ..
cd behavioral-model
make clean
cd ..
cd p4c
/bin/rm -fr build
cd ..
/bin/rm usr-local-*.txt pip3-list-2b-*.txt
sudo apt autoremove
sudo apt clean
# Zero out unused disk blocks. Results in significantly smaller VM
# image files.
echo "Writing zeros to unused disk blocks (be patient) ..."
FNAME=`mktemp --tmpdir big-empty-zero-file-XXXXXXXX`
dd if=/dev/zero of=${FNAME} bs=4096k
/bin/rm -f ${FNAME}
echo "Disk usage before running this script:"
echo "$DF1_BEFORE"
echo "$DF2_BEFORE"
echo ""
echo "Disk usage after running this script:"
df -h .
df -BM .

BIN
vm-ubuntu-20.04/p4-logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.2 KiB

133
vm-ubuntu-20.04/p4.vim Normal file
View File

@ -0,0 +1,133 @@
" Vim syntax file
" Language: P4_16
" Maintainer: Antonin Bas, Barefoot Networks Inc
" Latest Revision: 5 August 2014
" Updated By: Gyanesh Patra, Unicamp University
" Latest Revision: 12 April 2016
" Updated Again By: Robert MacDavid, Princeton University
" Latest Revision: 12 June 2017
if version < 600
syntax clear
elseif exists("b:current_syntax")
finish
endif
" Use case sensitive matching of keywords
syn case match
syn keyword p4ObjectKeyword action apply control default
syn keyword p4ObjectKeyword enum extern exit
syn keyword p4ObjectKeyword header header_union
syn keyword p4ObjectKeyword match_kind
syn keyword p4ObjectKeyword package parser
syn keyword p4ObjectKeyword state struct switch size
syn keyword p4ObjectKeyword table transition tuple typedef
syn keyword p4ObjectKeyword verify
" Tables
syn keyword p4ObjectAttributeKeyword key actions default_action entries
syn keyword p4ObjectAttributeKeyword implementation
" Counters and meters
syn keyword p4ObjectAttributeKeyword counters meters
" Var Attributes
syn keyword p4ObjectKeyword const in out inout
syn keyword p4Annotation @name @tableonly @defaultonly
syn keyword p4Annotation @globalname @atomic @hidden
syn keyword p4MatchTypeKeyword exact ternary lpm range
syn keyword p4TODO contained FIXME TODO
syn match p4Comment '\/\/.*' contains=p4TODO
syn region p4BlockComment start='\/\*' end='\*\/' contains=p4TODO keepend
syn match p4Preprocessor '#(include|define|undef|if|ifdef) .*$'
syn match p4Preprocessor '#(if|ifdef|ifndef|elif|else) .*$'
syn match p4Preprocessor '#(endif|defined|line|file) .*$'
syn match p4Preprocessor '#(error|warning) .*$'
syn keyword p4Type bit bool int varbit void error
" Integer Literals
syn match p4Int '[0-9][0-9_]*'
syn match p4Indentifier '[A-Za-z_][A-Za-z0-9_]*'
syn match p4HexadecimalInt '0[Xx][0-9a-fA-F]\+'
syn match p4DecimalInt '0[dD][0-9_]\+'
syn match p4OctalInt '0[oO][0-7_]\+'
syn match p4BinaryInt '0[bB][01_]\+'
syn region p4SizedType start='(bit|int|varbit)\<' end='\>'
syn match p4UserType '[A-Za-z_][A-Za-z0-9_]*[_][t]\W'
syn keyword p4Operators and or not &&& mask
" Header Methods
syn keyword p4Primitive isValid setValid setInvalid
" Table Methods
syn keyword p4Primitive hit action_run
" Packet_in methods
syn keyword p4Primitive extract lookahead advance length
" Packet_out methods
syn keyword p4Primitive emit
" Known parser states
syn keyword p4Primitive accept reject
" Misc
syn keyword p4Primitive NoAction
syn keyword p4Conditional if else select
syn keyword p4Statement return
" Don't Care
syn keyword p4Constant _
" Error
syn keyword p4Constant NoError PacketTooShort NoMatch StackOutOfBounds
syn keyword p4Constant OverwritingHeader HeaderTooShort ParserTiimeout
" Boolean
syn keyword p4Boolean false true
""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
" Apply highlight groups to syntax groups defined above
" For version 5.7 and earlier: only when not done already
" For version 5.8 and later: only when an item doesn't have highlighting yet
if version >= 508 || !exists("did_p4_syntax_inits")
if version <= 508
let did_p4_syntax_inits = 1
command -nargs=+ HiLink hi link <args>
else
command -nargs=+ HiLink hi def link <args>
endif
HiLink p4ObjectKeyword Repeat
HiLink p4UserType Type
HiLink p4ObjectAttributeKeyword Keyword
HiLink p4TypeAttribute StorageClass
HiLink p4Annotation Special
HiLink p4MatchTypeKeyword Keyword
HiLink p4TODO Todo
HiLink p4Comment Comment
HiLink p4BlockComment Comment
HiLink p4Preprocessor PreProc
HiLink p4SizedType Type
HiLink p4Type Type
HiLink p4DecimalInt Number
HiLink p4HexadecimalInt Number
HiLink p4OctalInt Number
HiLink p4BinaryInt Number
HiLink p4Int Number
HiLink p4Operators Operator
HiLink p4Primitive Function
HiLink p4Conditional Conditional
HiLink p4Statement Statement
HiLink p4Constant Constant
HiLink p4Boolean Boolean
delcommand HiLink
endif
let b:current_syntax = "p4"

View File

@ -0,0 +1,222 @@
;;; p4_16-mode.el --- Support for the P4_16 programming language
;; Copyright (C) 2016- Barefoot Networks
;; Author: Vladimir Gurevich <vladimir.gurevich@barefootnetworks.com>
;; Maintainer: Vladimir Gurevich <vladimir.gurevich@barefootnetworks.com>
;; Created: 15 April 2017
;; Version: 0.2
;; Keywords: languages p4_16
;; Homepage: http://p4.org
;; This file is not part of GNU Emacs.
;; This file is free software…
;; This mode has preliminary support for P4_16. It covers the core language,
;; but it is not clear yet, how we can highlight the indentifiers, defined
;; for a particular architecture. Core library definitions are included
;; Placeholder for user customization code
(defvar p4_16-mode-hook nil)
;; Define the keymap (for now it is pretty much default)
(defvar p4_16-mode-map
(let ((map (make-keymap)))
(define-key map "\C-j" 'newline-and-indent)
map)
"Keymap for P4_16 major mode")
;; Syntactic HighLighting
;; Main keywors (declarations and operators)
(setq p4_16-keywords
'("action" "apply"
"control"
"default"
"else" "enum" "extern" "exit"
"header" "header_union"
"if"
"match_kind"
"package" "parser"
"return"
"select" "state" "struct" "switch"
"table" "transition" "tuple" "typedef"
"verify"
))
(setq p4_16-annotations
'("@name" "@metadata" "@alias"
))
(setq p4_16-attributes
'("const" "in" "inout" "out"
;; Tables
"key" "actions" "default_action" "entries" "implementation"
"counters" "meters"
))
(setq p4_16-variables
'("packet_in" "packet_out"
))
(setq p4_16-operations
'("&&&" ".." "++" "?" ":"))
(setq p4_16-constants
'(
;;; Don't care
"_"
;;; bool
"false" "true"
;;; error
"NoError" "PacketTooShort" "NoMatch" "StackOutOfBounds"
"OverwritingHeader" "HeaderTooShort" "ParserTiimeout"
;;; match_kind
"exact" "ternary" "lpm" "range"
;;; We can add constants for supported architectures here
))
(setq p4_16-types
'("bit" "bool" "int" "varbit" "void" "error"
))
(setq p4_16-primitives
'(
;;; Header methods
"isValid" "setValid" "setInvalid"
;;; Table Methods
"hit" "action_run"
;;; packet_in methods
"extract" "lookahead" "advance" "length"
;;; packet_out methods
"emit"
;;; Known parser states
"accept" "reject"
;;; misc
"NoAction"
))
(setq p4_16-cpp
'("#include"
"#define" "#undef"
"#if" "#ifdef" "#ifndef"
"#elif" "#else"
"#endif"
"defined"
"#line" "#file"))
(setq p4_16-cppwarn
'("#error" "#warning"))
;; Optimize the strings
(setq p4_16-keywords-regexp (regexp-opt p4_16-keywords 'words))
(setq p4_16-annotations-regexp (regexp-opt p4_16-annotations 1))
(setq p4_16-attributes-regexp (regexp-opt p4_16-attributes 'words))
(setq p4_16-variables-regexp (regexp-opt p4_16-variables 'words))
(setq p4_16-operations-regexp (regexp-opt p4_16-operations 'words))
(setq p4_16-constants-regexp (regexp-opt p4_16-constants 'words))
(setq p4_16-types-regexp (regexp-opt p4_16-types 'words))
(setq p4_16-primitives-regexp (regexp-opt p4_16-primitives 'words))
(setq p4_16-cpp-regexp (regexp-opt p4_16-cpp 1))
(setq p4_16-cppwarn-regexp (regexp-opt p4_16-cppwarn 1))
;; create the list for font-lock.
;; each category of keyword is given a particular face
(defconst p4_16-font-lock-keywords
(list
(cons p4_16-cpp-regexp font-lock-preprocessor-face)
(cons p4_16-cppwarn-regexp font-lock-warning-face)
(cons p4_16-types-regexp font-lock-type-face)
(cons p4_16-constants-regexp font-lock-constant-face)
(cons p4_16-attributes-regexp font-lock-builtin-face)
(cons p4_16-variables-regexp font-lock-variable-name-face)
;;; This is a special case to distinguish the method from the keyword
(cons "\\.apply" font-lock-function-name-face)
(cons p4_16-primitives-regexp font-lock-function-name-face)
(cons p4_16-operations-regexp font-lock-builtin-face)
(cons p4_16-keywords-regexp font-lock-keyword-face)
(cons p4_16-annotations-regexp font-lock-keyword-face)
(cons "\\(\\w*_t +\\)" font-lock-type-face)
(cons "[^A-Z_][A-Z] " font-lock-type-face) ;; Total hack for templates
(cons "<[A-Z, ]*>" font-lock-type-face)
(cons "\\(<[^>]+>\\)" font-lock-string-face)
(cons "\\([^_A-Za-z]\\([0-9]+w\\)?0x[0-9A-Fa-f]+\\)" font-lock-constant-face)
(cons "\\([^_A-Za-z]\\([0-9]+w\\)?0b[01]+\\)" font-lock-constant-face)
(cons "\\([^_A-Za-z][+-]?\\([0-9]+w\\)?[0-9]+\\)" font-lock-constant-face)
;;(cons "\\(\\w*\\)" font-lock-variable-name-face)
)
"Default Highlighting Expressions for P4_16")
(defvar p4_16-mode-syntax-table
(let ((st (make-syntax-table)))
(modify-syntax-entry ?_ "w" st)
(modify-syntax-entry ?/ ". 124b" st)
(modify-syntax-entry ?* ". 23" st)
(modify-syntax-entry ?\n "> b" st)
st)
"Syntax table for p4_16-mode")
;;; Indentation
(defvar p4_16-indent-offset 4
"Indentation offset for `p4_16-mode'.")
(defun p4_16-indent-line ()
"Indent current line for any balanced-paren-mode'."
(interactive)
(let ((indent-col 0)
(indentation-increasers "[{(]")
(indentation-decreasers "[})]")
)
(save-excursion
(beginning-of-line)
(condition-case nil
(while t
(backward-up-list 1)
(when (looking-at indentation-increasers)
(setq indent-col (+ indent-col p4_16-indent-offset))))
(error nil)))
(save-excursion
(back-to-indentation)
(when (and (looking-at indentation-decreasers)
(>= indent-col p4_16-indent-offset))
(setq indent-col (- indent-col p4_16-indent-offset))))
(indent-line-to indent-col)))
;;; Imenu support
(require 'imenu)
(setq p4_16-imenu-generic-expression
'(
("Controls" "^ *control +\\([A-Za-z0-9_]*\\)" 1)
("Externs" "^ *extern +\\([A-Za-z0-9_]*\\) *\\([A-Za-z0-9_]*\\)" 2)
("Tables" "^ *table +\\([A-Za-z0-9_]*\\)" 1)
("Actions" "^ *action +\\([A-Za-z0-9_]*\\)" 1)
("Parsers" "^ *parser +\\([A-Za-z0-9_]*\\)" 1)
("Parser States" "^ *state +\\([A-Za-z0-9_]*\\)" 1)
("Headers" "^ *header +\\([A-Za-z0-9_]*\\)" 1)
("Header Unions" "^ *header_union +\\([A-Za-z0-9_]*\\)" 1)
("Structs" "^ *struct +\\([A-Za-z0-9_]*\\)" 1)
))
;;; Cscope Support
(require 'xcscope)
;; Put everything together
(defun p4_16-mode ()
"Major mode for editing P4_16 programs"
(interactive)
(kill-all-local-variables)
(set-syntax-table p4_16-mode-syntax-table)
(use-local-map p4_16-mode-map)
(set (make-local-variable 'font-lock-defaults) '(p4_16-font-lock-keywords))
(set (make-local-variable 'indent-line-function) 'p4_16-indent-line)
(setq major-mode 'p4_16-mode)
(setq mode-name "P4_16")
(setq imenu-generic-expression p4_16-imenu-generic-expression)
(imenu-add-to-menubar "P4_16")
(cscope-minor-mode)
(run-hooks 'p4_16-mode-hook)
)
;; The most important line
(provide 'p4_16-mode)

View File

@ -0,0 +1,18 @@
diff --git a/install_deps.sh b/install_deps.sh
index 7acf14d..bdd1d03 100755
--- a/install_deps.sh
+++ b/install_deps.sh
@@ -1,7 +1,10 @@
#!/bin/bash
set -e
ubuntu_release=`lsb_release -s -r`
-if [[ "${ubuntu_release}" > "18" ]]
+if [[ "${ubuntu_release}" > "19" ]]
+then
+ LIBSSL_DEV="libssl-dev"
+elif [[ "${ubuntu_release}" > "18" ]]
then
# This older package libssl1.0-dev enables compiling Thrift 0.9.2
# on Ubuntu 18.04. Package libssl-dev exists, but Thrift 0.9.2
diff --git a/travis/install-thrift.sh b/travis/install-thrift.sh
index 2ea4177..c17de69 100644

View File

@ -0,0 +1,78 @@
diff --git a/Makefile b/Makefile
index 736583fd93..472d91337d 100644
--- a/Makefile
+++ b/Makefile
@@ -300,37 +300,37 @@ else
TMPOUT = `mktemp /tmp/test-out-XXXXXX`
endif
-CHECK_NO_CXX14_COMPAT_WORKS_CMD = $(CC) -std=c++11 -Werror -Wno-c++14-compat -o $(TMPOUT) -c test/build/no-c++14-compat.cc
+CHECK_NO_CXX14_COMPAT_WORKS_CMD = $(CC) -std=c++11 -Wno-c++14-compat -o $(TMPOUT) -c test/build/no-c++14-compat.cc
HAS_WORKING_NO_CXX14_COMPAT = $(shell $(CHECK_NO_CXX14_COMPAT_WORKS_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_WORKING_NO_CXX14_COMPAT),true)
W_NO_CXX14_COMPAT=-Wno-c++14-compat
endif
-CHECK_SHADOW_WORKS_CMD = $(CC) -std=c99 -Werror -Wshadow -o $(TMPOUT) -c test/build/shadow.c
+CHECK_SHADOW_WORKS_CMD = $(CC) -std=c99 -Wshadow -o $(TMPOUT) -c test/build/shadow.c
HAS_WORKING_SHADOW = $(shell $(CHECK_SHADOW_WORKS_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_WORKING_SHADOW),true)
W_SHADOW=-Wshadow
NO_W_SHADOW=-Wno-shadow
endif
-CHECK_EXTRA_SEMI_WORKS_CMD = $(CC) -std=c99 -Werror -Wextra-semi -o $(TMPOUT) -c test/build/extra-semi.c
+CHECK_EXTRA_SEMI_WORKS_CMD = $(CC) -std=c99 -Wextra-semi -o $(TMPOUT) -c test/build/extra-semi.c
HAS_WORKING_EXTRA_SEMI = $(shell $(CHECK_EXTRA_SEMI_WORKS_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_WORKING_EXTRA_SEMI),true)
W_EXTRA_SEMI=-Wextra-semi
NO_W_EXTRA_SEMI=-Wno-extra-semi
endif
-CHECK_NO_SHIFT_NEGATIVE_VALUE_WORKS_CMD = $(CC) -std=c99 -Werror -Wno-shift-negative-value -o $(TMPOUT) -c test/build/no-shift-negative-value.c
+CHECK_NO_SHIFT_NEGATIVE_VALUE_WORKS_CMD = $(CC) -std=c99 -Wno-shift-negative-value -o $(TMPOUT) -c test/build/no-shift-negative-value.c
HAS_WORKING_NO_SHIFT_NEGATIVE_VALUE = $(shell $(CHECK_NO_SHIFT_NEGATIVE_VALUE_WORKS_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_WORKING_NO_SHIFT_NEGATIVE_VALUE),true)
W_NO_SHIFT_NEGATIVE_VALUE=-Wno-shift-negative-value
NO_W_NO_SHIFT_NEGATIVE_VALUE=-Wshift-negative-value
endif
-CHECK_NO_UNUSED_BUT_SET_VARIABLE_WORKS_CMD = $(CC) -std=c99 -Werror -Wno-unused-but-set-variable -o $(TMPOUT) -c test/build/no-unused-but-set-variable.c
+CHECK_NO_UNUSED_BUT_SET_VARIABLE_WORKS_CMD = $(CC) -std=c99 -Wno-unused-but-set-variable -o $(TMPOUT) -c test/build/no-unused-but-set-variable.c
HAS_WORKING_NO_UNUSED_BUT_SET_VARIABLE = $(shell $(CHECK_NO_UNUSED_BUT_SET_VARIABLE_WORKS_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_WORKING_NO_UNUSED_BUT_SET_VARIABLE),true)
W_NO_UNUSED_BUT_SET_VARIABLE=-Wno-unused-but-set-variable
NO_W_NO_UNUSED_BUT_SET_VARIABLE=-Wunused-but-set-variable
endif
-CHECK_NO_MAYBE_UNINITIALIZED_WORKS_CMD = $(CC) -std=c99 -Werror -Wno-maybe-uninitialized -o $(TMPOUT) -c test/build/no-maybe-uninitialized.c
+CHECK_NO_MAYBE_UNINITIALIZED_WORKS_CMD = $(CC) -std=c99 -Wno-maybe-uninitialized -o $(TMPOUT) -c test/build/no-maybe-uninitialized.c
HAS_WORKING_NO_MAYBE_UNINITIALIZED = $(shell $(CHECK_NO_MAYBE_UNINITIALIZED_WORKS_CMD) 2> /dev/null && echo true || echo false)
ifeq ($(HAS_WORKING_NO_MAYBE_UNINITIALIZED),true)
W_NO_MAYBE_UNINITIALIZED=-Wno-maybe-uninitialized
@@ -353,7 +353,7 @@ ifeq ($(SYSTEM),Darwin)
CXXFLAGS += -stdlib=libc++
endif
CXXFLAGS += -Wnon-virtual-dtor
-CPPFLAGS += -g -Wall -Wextra -Werror -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1 -Wno-deprecated-declarations -Ithird_party/nanopb -DPB_FIELD_32BIT
+CPPFLAGS += -g -Wall -Wextra -Wno-long-long -Wno-unused-parameter -DOSATOMIC_USE_INLINED=1 -Wno-deprecated-declarations -Ithird_party/nanopb -DPB_FIELD_32BIT
COREFLAGS += -fno-rtti -fno-exceptions
LDFLAGS += -g
@@ -10180,7 +10180,7 @@ PUBLIC_HEADERS_C += \
LIBARES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(LIBARES_SRC))))
$(LIBARES_OBJS): CPPFLAGS += -Ithird_party/cares -Ithird_party/cares/cares -fvisibility=hidden -D_GNU_SOURCE $(if $(subst Darwin,,$(SYSTEM)),,-Ithird_party/cares/config_darwin) $(if $(subst FreeBSD,,$(SYSTEM)),,-Ithird_party/cares/config_freebsd) $(if $(subst Linux,,$(SYSTEM)),,-Ithird_party/cares/config_linux) $(if $(subst OpenBSD,,$(SYSTEM)),,-Ithird_party/cares/config_openbsd) -DWIN32_LEAN_AND_MEAN -D_HAS_EXCEPTIONS=0 -DNOMINMAX $(if $(subst MINGW32,,$(SYSTEM)),-DHAVE_CONFIG_H,)
-$(LIBARES_OBJS): CFLAGS += -Wno-sign-conversion $(if $(subst Darwin,,$(SYSTEM)),,-Wno-shorten-64-to-32) $(if $(subst MINGW32,,$(SYSTEM)),-Wno-invalid-source-encoding,)
+$(LIBARES_OBJS): CFLAGS += -Wno-sign-conversion $(if $(subst Darwin,,$(SYSTEM)),,-Wno-shorten-64-to-32) $(if $(subst MINGW32,,$(SYSTEM)),)
$(LIBDIR)/$(CONFIG)/libares.a: $(LIBARES_OBJS)
$(E) "[AR] Creating $@"
diff --git a/src/core/lib/gpr/log_linux.cc b/src/core/lib/gpr/log_linux.cc
index 561276f0c2..59bc0fee95 100644
--- a/src/core/lib/gpr/log_linux.cc
+++ b/src/core/lib/gpr/log_linux.cc
@@ -40,7 +40,6 @@
#include <time.h>
#include <unistd.h>
-static long gettid(void) { return syscall(__NR_gettid); }
void gpr_log(const char* file, int line, gpr_log_severity severity,
const char* format, ...) {

View File

@ -0,0 +1,32 @@
diff --git a/util/install.sh b/util/install.sh
index 0a67871..7076dbe 100755
--- a/util/install.sh
+++ b/util/install.sh
@@ -5,6 +5,7 @@
# Fail on error
set -e
+set -x
# Fail on unset var usage
set -o nounset
@@ -174,15 +175,16 @@ function mn_deps {
python-pep8 ${PYPKG}-pexpect ${PYPKG}-tk
else # Debian/Ubuntu
pf=pyflakes
- # Starting around 20.04, installing pyflakes instead of pyflakes3
+ # Starting around 18.04, installing pyflakes instead of pyflakes3
# causes Python 2 to be installed, which is exactly NOT what we want.
- if [ `expr $RELEASE '>=' 20.04` = "1" ]; then
+ if [ `expr $RELEASE '>=' 18.04` = "1" ]; then
pf=pyflakes3
fi
$install gcc make socat psmisc xterm ssh iperf telnet \
- ethtool help2man $pf pylint pep8 \
+ ethtool help2man pylint3 pep8 \
net-tools \
${PYPKG}-pexpect ${PYPKG}-tk
+ $install --no-install-recommends $pf
# Install pip
$install ${PYPKG}-pip || $install ${PYPKG}-pip-whl
if ! ${PYTHON} -m pip -V; then

21
vm-ubuntu-20.04/py3localpath.py Executable file
View File

@ -0,0 +1,21 @@
#! /usr/bin/env python3
import re
import sys
l1=[x for x in sys.path if re.match(r'/usr/local/lib/python3.[0-9]+/dist-packages', x)]
if len(l1) == 1:
py3distdir = l1[0]
m = re.match(r'(/usr/local/lib/python3.[0-9]+)/dist-packages', l1[0])
if m:
print(m.group(1))
else:
print("Inconceivable! Somehow the second pattern did not match but the first did.")
sys.exit(1)
else:
print("Found %d matching entries in Python3 sys.path instead of 1: %s"
% (len(l1), l1))
sys.exit(1)
sys.exit(0)

192
vm-ubuntu-20.04/root-bootstrap.sh Executable file
View File

@ -0,0 +1,192 @@
#!/bin/bash
# Print commands and exit on errors
set -xe
# Sublime 3 install steps came from this page on 2020-May-11:
# https://www.sublimetext.com/docs/3/linux_repositories.html#apt
# The commands were modified only to remove 'sudo' from several
# commands. sudo is unnecessary here since this entire script is
# executed as the user root.
wget -qO - https://download.sublimetext.com/sublimehq-pub.gpg | apt-key add -
apt-get install apt-transport-https
echo "deb https://download.sublimetext.com/ apt/stable/" | tee /etc/apt/sources.list.d/sublime-text.list
# These commands are done later below
#apt-get update
#apt-get install sublime-text
# Atom install steps came from this page on 2020-May-11:
# https://flight-manual.atom.io/getting-started/sections/installing-atom/#platform-linux
wget -qO - https://packagecloud.io/AtomEditor/atom/gpgkey | apt-key add -
sh -c 'echo "deb [arch=amd64] https://packagecloud.io/AtomEditor/atom/any/ any main" > /etc/apt/sources.list.d/atom.list'
# These commands are done later below
#apt-get update
#apt-get install atom
apt-get update
KERNEL=$(uname -r)
DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
apt-get install -y --no-install-recommends --fix-missing\
atom \
autoconf \
automake \
bison \
build-essential \
ca-certificates \
clang \
cmake \
cpp \
curl \
emacs \
flex \
g++ \
git \
iproute2 \
libboost-dev \
libboost-filesystem-dev \
libboost-graph-dev \
libboost-iostreams-dev \
libboost-program-options-dev \
libboost-system-dev \
libboost-test-dev \
libboost-thread-dev \
libelf-dev \
libevent-dev \
libffi-dev \
libfl-dev \
libgc-dev \
libgflags-dev \
libgmp-dev \
libjudy-dev \
libpcap-dev \
libpython3-dev \
libreadline-dev \
libssl-dev \
libtool \
libtool-bin \
linux-headers-$KERNEL\
llvm \
lubuntu-desktop \
make \
net-tools \
pkg-config \
python3 \
python3-dev \
python3-pip \
python3-setuptools \
sublime-text \
tcpdump \
unzip \
valgrind \
vim \
wget \
xcscope-el \
xterm
# TBD: Should these packages be installed via apt-get ? They are in
# my install-p4dev-v4.sh script, but they might not be needed, either.
# zlib1g-dev18
# On a freshly installed Ubuntu 20.04.1 or 18.04.5 system, desktop
# amd64 minimal installation, the Debian package python3-protobuf is
# installed. This is depended upon by another package called
# python3-macaroonbakery, which in turn is is depended upon by a
# package called gnome-online accounts. I suspect this might have
# something to do with Ubuntu's desire to make it easy to connect with
# on-line accounts like Google accounts.
# This python3-protobuf package enables one to have a session like
# this with no error, on a freshly installed system:
# $ python3
# >>> import google.protobuf
# However, something about this script doing its work causes a
# conflict between the Python3 protobuf module installed by this
# script, and the one installed by the package python3-protobuf, such
# that the import statement above gives an error. The package
# google.protobuf.internal is used by the p4lang/tutorials Python
# code, and the only way I know to make this work right now is to
# remove the Debian python3-protobuf package, and then install Python3
# protobuf support using pip3 as done below.
# Experiment starting from a freshly installed Ubuntu 20.04.1 Linux
# desktop amd64 system, minimal install:
# Initially, python3-protobuf package was installed.
# Doing python3 followed 'import' of any of these gave no error:
# + google
# + google.protobuf
# + google.protobuf.internal
# Then did 'sudo apt-get purge python3-protobuf'
# At that point, attempting to import any of the 3 modules above gave an error.
# Then did 'sudo apt-get install python3-pip'
# At that point, attempting to import any of the 3 modules above gave an error.
# Then did 'sudo pip3 install protobuf==3.6.1'
# At that point, attempting to import any of the 3 modules above gave NO error.
sudo apt-get purge -y python3-protobuf || echo "Failed to remove python3-protobuf, probably because there was no such package installed"
sudo pip3 install protobuf==3.6.1
# Starting in 2019-Nov, Python3 version of Scapy is needed for `cd
# p4c/build ; make check` to succeed.
sudo pip3 install scapy
# Earlier versions of this script installed the Ubuntu package
# python-ipaddr. However, that no longer exists in Ubuntu 20.04. PIP
# for Python3 can install the ipaddr module, which is good enough to
# enable two of p4c's many tests to pass, tests that failed if the
# ipaddr Python3 module is not installed, in my testing on
# 2020-Oct-17. From the Python stack trace that appears when running
# those failing tests, the code that requires this module is in
# behavioral-model's runtime_CLI.py source file, in a function named
# ipv6Addr_to_bytes.
sudo pip3 install ipaddr
# Things needed for PTF
sudo pip3 install pypcap
# Things needed for `cd tutorials/exercises/basic ; make run` to work:
sudo pip3 install psutil crcmod
useradd -m -d /home/p4 -s /bin/bash p4
echo "p4:p4" | chpasswd
echo "p4 ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/99_p4
chmod 440 /etc/sudoers.d/99_p4
usermod -aG vboxsf p4
cd /usr/share/lubuntu/wallpapers/
cp /home/vagrant/p4-logo.png .
rm lubuntu-default-wallpaper.png
ln -s p4-logo.png lubuntu-default-wallpaper.png
rm /home/vagrant/p4-logo.png
cd ~
# 2021-Mar-06 this command failed with an error that the file did not exist.
#sed -i s@#background=@background=/usr/share/lubuntu/wallpapers/1604-lubuntu-default-wallpaper.png@ /etc/lightdm/lightdm-gtk-greeter.conf
# The following command will hopefully cause the P4 logo to be normal
# size and centered on the initial desktop image, rather than scaled
# and stretched and cropped horribly.
#sed -i s@wallpaper_mode=crop@wallpaper_mode=center@ /etc/xdg/pcmanfm/lubuntu/desktop-items-0.conf
# If that does not have the desired effect, another possibility is
# executing that command to edit the same string in file
# /etc/xdg/pcmanfm/lubuntu/pcmanfm.conf
# TBD: Ubuntu 20.04 does not have the light-locker package, so it
# fails if you try to remove it. Probably enabling auto-login
# requires a different modification than is done below with the cat <<
# EOF command.
# Disable screensaver
#apt-get -y remove light-locker
# Automatically log into the P4 user
#cat << EOF | tee -a /etc/lightdm/lightdm.conf.d/10-lightdm.conf
#[SeatDefaults]
#autologin-user=p4
#autologin-user-timeout=0
#user-session=Lubuntu
#EOF

307
vm-ubuntu-20.04/user-bootstrap.sh Executable file
View File

@ -0,0 +1,307 @@
#!/bin/bash
# Print script commands and exit on errors.
set -xe
#Src
BMV2_COMMIT="b0fb01ecacbf3a7d7f0c01e2f149b0c6a957f779" # 2021-Sep-07
PI_COMMIT="a5fd855d4b3293e23816ef6154e83dc6621aed6a" # 2021-Sep-07
P4C_COMMIT="149634bbe4842fb7c1e80d1b7c9d1e0ec91b0051" # 2021-Sep-07
PTF_COMMIT="8f260571036b2684f16366962edd0193ef61e9eb" # 2021-Sep-07
PROTOBUF_COMMIT="v3.6.1"
GRPC_COMMIT="tags/v1.17.2"
#Get the number of cores to speed up the compilation process
NUM_CORES=`grep -c ^processor /proc/cpuinfo`
# The install steps for p4lang/PI and p4lang/behavioral-model end
# up installing Python module code in the site-packages directory
# mentioned below in this function. That is were GNU autoconf's
# 'configure' script seems to find as the place to put them.
# On Ubuntu systems when you run the versions of Python that are
# installed via Debian/Ubuntu packages, they only look in a
# sibling dist-packages directory, never the site-packages one.
# If I could find a way to change the part of the install script
# so that p4lang/PI and p4lang/behavioral-model install their
# Python modules in the dist-packages directory, that sounds
# useful, but I have not found a way.
# As a workaround, after finishing the part of the install script
# for those packages, I will invoke this function to move them all
# into the dist-packages directory.
# Some articles with questions and answers related to this.
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=765022
# https://bugs.launchpad.net/ubuntu/+source/automake/+bug/1250877
# https://unix.stackexchange.com/questions/351394/makefile-installing-python-module-out-of-of-pythonpath
PY3LOCALPATH=`${HOME}/py3localpath.py`
move_usr_local_lib_python3_from_site_packages_to_dist_packages() {
local SRC_DIR
local DST_DIR
local j
local k
SRC_DIR="${PY3LOCALPATH}/site-packages"
DST_DIR="${PY3LOCALPATH}/dist-packages"
# When I tested this script on Ubunt 16.04, there was no
# site-packages directory. Return without doing anything else if
# this is the case.
if [ ! -d ${SRC_DIR} ]
then
return 0
fi
# Do not move any __pycache__ directory that might be present.
sudo rm -fr ${SRC_DIR}/__pycache__
echo "Source dir contents before moving: ${SRC_DIR}"
ls -lrt ${SRC_DIR}
echo "Dest dir contents before moving: ${DST_DIR}"
ls -lrt ${DST_DIR}
for j in ${SRC_DIR}/*
do
echo $j
k=`basename $j`
# At least sometimes (perhaps always?) there is a directory
# 'p4' or 'google' in both the surce and dest directory. I
# think I want to merge their contents. List them both so I
# can see in the log what was in both at the time:
if [ -d ${SRC_DIR}/$k -a -d ${DST_DIR}/$k ]
then
echo "Both source and dest dir contain a directory: $k"
echo "Source dir $k directory contents:"
ls -l ${SRC_DIR}/$k
echo "Dest dir $k directory contents:"
ls -l ${DST_DIR}/$k
sudo mv ${SRC_DIR}/$k/* ${DST_DIR}/$k/
sudo rmdir ${SRC_DIR}/$k
else
echo "Not a conflicting directory: $k"
sudo mv ${SRC_DIR}/$k ${DST_DIR}/$k
fi
done
echo "Source dir contents after moving: ${SRC_DIR}"
ls -lrt ${SRC_DIR}
echo "Dest dir contents after moving: ${DST_DIR}"
ls -lrt ${DST_DIR}
}
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-1-before-protobuf.txt
# --- Protobuf --- #
git clone https://github.com/google/protobuf.git
cd protobuf
git checkout ${PROTOBUF_COMMIT}
./autogen.sh
# install-p4dev-v4.sh script doesn't have --prefix=/usr option here.
./configure --prefix=/usr
make -j${NUM_CORES}
sudo make install
sudo ldconfig
# Force install python module
#cd python
#sudo python3 setup.py install
#cd ../..
cd ..
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-2-after-protobuf.txt
# --- gRPC --- #
git clone https://github.com/grpc/grpc.git
cd grpc
git checkout ${GRPC_COMMIT}
git submodule update --init --recursive
# Apply patch that seems to be necessary in order for grpc v1.17.2 to
# compile and install successfully on an Ubuntu 19.10 and later
# system.
PATCH_DIR="${HOME}/patches"
patch -p1 < "${PATCH_DIR}/disable-Wno-error-and-other-small-changes.diff" || echo "Errors while attempting to patch grpc, but continuing anyway ..."
make -j${NUM_CORES}
sudo make install
# I believe the following 2 commands, adapted from similar commands in
# src/python/grpcio/README.rst, should install the Python3 module
# grpc.
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-2b-before-grpc-pip3.txt
pip3 list | tee $HOME/pip3-list-2b-before-grpc-pip3.txt
sudo pip3 install -rrequirements.txt
GRPC_PYTHON_BUILD_WITH_CYTHON=1 sudo pip3 install .
sudo ldconfig
cd ..
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-3-after-grpc.txt
# Note: This is a noticeable difference between how an earlier
# user-bootstrap.sh version worked, where it effectively ran
# behavioral-model's install_deps.sh script, then installed PI, then
# went back and compiled the behavioral-model code. Building PI code
# first, without first running behavioral-model's install_deps.sh
# script, might result in less PI project features being compiled into
# its binaries.
# --- PI/P4Runtime --- #
git clone https://github.com/p4lang/PI.git
cd PI
git checkout ${PI_COMMIT}
git submodule update --init --recursive
./autogen.sh
# install-p4dev-v4.sh adds more --without-* options to the configure
# script here. I suppose without those, this script will cause
# building PI code to include more features?
./configure --with-proto
make -j${NUM_CORES}
sudo make install
# install-p4dev-v4.sh at this point does these things, which might be
# useful in this script, too:
# Save about 0.25G of storage by cleaning up PI build
make clean
move_usr_local_lib_python3_from_site_packages_to_dist_packages
sudo ldconfig
cd ..
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-4-after-PI.txt
# --- Bmv2 --- #
git clone https://github.com/p4lang/behavioral-model.git
cd behavioral-model
git checkout ${BMV2_COMMIT}
PATCH_DIR="${HOME}/patches"
patch -p1 < "${PATCH_DIR}/behavioral-model-use-correct-libssl-pkg.patch" || echo "Errors while attempting to patch behavioral-model, but continuing anyway ..."
./install_deps.sh
./autogen.sh
./configure --enable-debugger --with-pi
make -j${NUM_CORES}
sudo make install
sudo ldconfig
# Simple_switch_grpc target
cd targets/simple_switch_grpc
./autogen.sh
./configure --with-thrift
make -j${NUM_CORES}
sudo make install
sudo ldconfig
# install-p4dev-v4.sh script does this here:
move_usr_local_lib_python3_from_site_packages_to_dist_packages
cd ../../..
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-5-after-behavioral-model.txt
# --- P4C --- #
git clone https://github.com/p4lang/p4c
cd p4c
git checkout ${P4C_COMMIT}
git submodule update --init --recursive
mkdir -p build
cd build
cmake ..
# The command 'make -j${NUM_CORES}' works fine for the others, but
# with 2 GB of RAM for the VM, there are parts of the p4c build where
# running 2 simultaneous C++ compiler runs requires more than that
# much memory. Things work better by running at most one C++ compilation
# process at a time.
make -j1
sudo make install
sudo ldconfig
cd ../..
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-6-after-p4c.txt
# --- Mininet --- #
git clone git://github.com/mininet/mininet mininet
cd mininet
PATCH_DIR="${HOME}/patches"
patch -p1 < "${PATCH_DIR}/mininet-dont-install-python2.patch" || echo "Errors while attempting to patch mininet, but continuing anyway ..."
cd ..
# TBD: Try without installing openvswitch, i.e. no '-v' option, to see
# if everything still works well without it.
sudo ./mininet/util/install.sh -nw
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-7-after-mininet-install.txt
# --- PTF --- #
git clone git://github.com/p4lang/ptf
cd ptf
git checkout ${PTF_COMMIT}
sudo python3 setup.py install
cd ..
find /usr/lib /usr/local $HOME/.local | sort > $HOME/usr-local-8-after-ptf-install.txt
# --- Tutorials --- #
git clone https://github.com/p4lang/tutorials
sudo mv tutorials /home/p4
sudo chown -R p4:p4 /home/p4/tutorials
# --- Emacs --- #
sudo cp p4_16-mode.el /usr/share/emacs/site-lisp/
sudo mkdir /home/p4/.emacs.d/
echo "(autoload 'p4_16-mode' \"p4_16-mode.el\" \"P4 Syntax.\" t)" > init.el
echo "(add-to-list 'auto-mode-alist '(\"\\.p4\\'\" . p4_16-mode))" | tee -a init.el
sudo mv init.el /home/p4/.emacs.d/
sudo ln -s /usr/share/emacs/site-lisp/p4_16-mode.el /home/p4/.emacs.d/p4_16-mode.el
sudo chown -R p4:p4 /home/p4/.emacs.d/
# --- Vim --- #
cd ~
mkdir .vim
cd .vim
mkdir ftdetect
mkdir syntax
echo "au BufRead,BufNewFile *.p4 set filetype=p4" >> ftdetect/p4.vim
echo "set bg=dark" >> ~/.vimrc
sudo mv ~/.vimrc /home/p4/.vimrc
cp ~/p4.vim syntax/p4.vim
cd ~
sudo mv .vim /home/p4/.vim
sudo chown -R p4:p4 /home/p4/.vim
sudo chown p4:p4 /home/p4/.vimrc
# --- Adding Desktop icons --- #
DESKTOP=/home/${USER}/Desktop
mkdir -p ${DESKTOP}
cat > ${DESKTOP}/Terminal.desktop << EOF
[Desktop Entry]
Encoding=UTF-8
Type=Application
Name=Terminal
Name[en_US]=Terminal
Icon=konsole
Exec=/usr/bin/x-terminal-emulator
Comment[en_US]=
EOF
cat > ${DESKTOP}/Wireshark.desktop << EOF
[Desktop Entry]
Encoding=UTF-8
Type=Application
Name=Wireshark
Name[en_US]=Wireshark
Icon=wireshark
Exec=/usr/bin/wireshark
Comment[en_US]=
EOF
cat > ${DESKTOP}/Sublime\ Text.desktop << EOF
[Desktop Entry]
Encoding=UTF-8
Type=Application
Name=Sublime Text
Name[en_US]=Sublime Text
Icon=sublime-text
Exec=/opt/sublime_text/sublime_text
Comment[en_US]=
EOF
sudo mkdir -p /home/p4/Desktop
sudo mv /home/${USER}/Desktop/* /home/p4/Desktop
sudo chown -R p4:p4 /home/p4/Desktop/
# Do this last!
sudo reboot