how to optimize this awk script? - awk

I browse 2 files with awk. I browse the first file and store the columns I need in arrays. I use after these arrays to make a comparison with a column (8) of the second file.
my script runs very slowly. I would like to know if there is not a way to optimize it?
FNR==NR
{
a[$1];
ip[NR]=$1;
site[NR]=$2;
next
}
BEGIN{
FS="[\t,=]";
OFS="|";
}
sudo awk -f{
l=length(ip);
if($8 in a)
{
for(k=0;k<=l;k++)
{
if(ip[k]== $8)
{
if(NF <= 70)
{
print "siteID Ipam: "site[k],"siteID zsc: "$14,"date: " $4,"src: "$8,"dst: "$10,"role: "$22,"urlcategory: "$36, "urlsupercategory: "$38,"urlclass: "$40;
}
else
{
print "siteID Ipam: "site[k], "siteID zsc: "$14,"date: " $4, "src: " $8, "dst: " $10, "role: "$22, "urlcategory: " $37, "urlsupercategory: "$39, "urlclass: $41;
}
break;
}
}
}
else
{
print $8 " is not in referentiel ";
}
}

Here is a better formatted same code with the initial typo.
BEGIN {
FS = "[\t,=]";
OFS = "|";
}
FNR == NR {
a[$1];
ip[NR] = $1;
site[NR] = $2;
next;
}
sudo awk -f {
l = length(ip);
if($8 in a) {
for(k = 0; k <= l; k++) {
if(ip[k] == $8) {
if(NF <= 70) {
print "siteID Ipam: "site[k],"siteID zsc: "$14,"date: " $4,"src: "$8,"dst: "$10,"role: "$22,"urlcategory: "$36, "urlsupercategory: "$38,"urlclass: "$40;
}
else {
print "siteID Ipam: "site[k], "siteID zsc: "$14,"date: " $4, "src: " $8, "dst: " $10, "role: "$22, "urlcategory: " $37, "urlsupercategory: "$39, "urlclass: $41;
}
break;
}
}
} else {
print $8 " is not in referentiel ";
}
}
suggest:
fix sudo awk -f typo.
a[$1]; --> a[$1] = 1;
($8 in a) --> (a[$8])

Related

checking cell value in csv and formatting in HTML using awk

#!/usr/bin/awk -f
BEGIN {
FS=","
print "<table>"
}
{
gsub(/</, "\\<")
gsub(/>/, "\\>")
gsub(/&/, "\\>")
print "\t<tr>"
for(f = 1; f <= NF; f++) {
if(NR == 1 && header) {
printf "\t\t<th>%s</th>\n", $f
}
else printf "\t\t<td>%s</td>\n", $f
}
print "\t</tr>"
}
END {
print "</table>"
}
how to check value of $f inside loop if cell value contains "No" then how to print using
printf("<TD class=AltGreen align=right height="17" width="5%">%s</TD>\n",$f)
instead of printf "\t\t<td>%s</td>\n", $f
Input.csv
USA,NO,45
UK,YES,90*
I have made a couple of changes to your original logic in Awk
To remove the empty spaces from the $f fields, while parsing in a loop
Include check for $f to string NO
The Awk code I use as follows,
#!/usr/bin/awk -f
BEGIN {
FS=","
print "<table>"
}
{
gsub(/</, "\\<")
gsub(/>/, "\\>")
gsub(/&/, "\\>")
print "\t<tr>"
for(f = 1; f <= NF; f++) {
gsub(/ /, "", $f)
if(NR == 1 && header) {
printf "\t\t<th>%s</th>\n", $f
}
else if ( $f == "NO" ) {
printf "\t\t<TD class=AltGreen align=right height=\"17\" width=\"5%\">%s</TD>\n",$f
}
else printf "\t\t<td>%s</td>\n", $f
}
print "\t</tr>"
}
END {
print "</table>"
}
produced an output as
<table>
<tr>
<td>USA</td>
<TD class=AltGreen align=right height="17" width="5%">NO</TD>
<td>45</td>
</tr>
<tr>
<td>UK</td>
<td>YES</td>
<td>90*</td>
</tr>
</table>
#!/usr/bin/awk -f
BEGIN {
#header = 1
# for the no in OP and NO in sample
IGNORECASE = 1
FS=","
print "<table>"
}
{
gsub(/</, "\\<")
gsub(/>/, "\\>")
gsub(/&/, "\\>")
print "\t<tr>"
for(f = 1; f <= NF; f++) {
if(NR == 1 && header) {
printf "\t\t<th>%s</th>\n", $f
}
else {
# your NO filtering
if ( $f ~ /^NO$/) {
printf("<TD class=AltGreen align=right height=\"17\" width=\"5%\">%s</TD>\n", $f)
else {
printf "\t\t<td>%s</td>\n", $f
}
}
}
print "\t</tr>"
}
END {
print "</table>"
}
i just modify a bit your code to keep it the most as you do.
use $f ~ //
I add IGNORECASE, 0 for case sensitive, 1 not
adapt your double quote for quoted value of HTML output
Some remarks:
I think you want to replace gsub(/&/, "\\>") with gsub(/&/, "\\&").
You do not need header when you check on NR.
When you want to check on "NO" in the header too, you can do something like
echo "USA,NO,45
UK,YES,90*" | awk '
BEGIN {
FS=","
print "<table>"
}
{
gsub(/</, "\\<")
gsub(/>/, "\\>")
gsub(/&/, "\\&")
print "\t<tr>"
if(NR==1) {
tag="th"
} else {
tag="td"
}
for (f = 1; f <= NF; f++) {
if ( $f =="NO") {
printf("<%s class=AltGreen align=right height=\"17\" width=\"5%%\">%s</%s>\n",
tag, $f, tag)
} else {
printf "\t\t<%s>%s</%s>\n", tag, $f, tag
}
}
print "\t</tr>"
}

AWK - Working with two files

I have these two csv files:
File A:
veículo;carro;sust
automóvel;carro;sust
viatura;carro;sust
breve;rápido;adj
excepcional;excelente;adj
maravilhoso;excelente;adj
amistoso;simpático;adj
amigável;simpático;adj
...
File B:
"A001","carro","sust","excelente","adj","ocorrer","adv","bom","adj"
...
In the file A, $1(word) is synonym for $2(word) and $3(word) the part of speech.
In the lines of the file B we can skip $1,the remaining columns are words and their part of speech.
What I need to to do is to look line by line each pair (word-pos) in the file A and generate a line for each synonym. It is difficult to explain.
Desired Output:
"A001","carro","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","viatura","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","veículo","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","automóvel","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","carro","sust","excepcional","adj","ocorrer","adv","bom","adj"
"A001","viatura","sust","excepcional","adj","ocorrer","adv","bom","adj"
"A001","veículo","sust","excepcional","adj","ocorrer","adv","bom","adj"
"A001","automóvel","sust","excepcional","adj","ocorrer","adv","bom","adj"
"A001","carro","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
"A001","viatura","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
"A001","veículo","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
"A001","automóvel","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
Done:
BEGIN {
FS="[,;]";
OFS=";";
}
FNR==NR{
sinonim[$1","$2","$3]++;
next;
}
{
s1=split($0,AX,"\n");
for (i=1;i<=s1;i++)
{
s2=split(AX[i],BX,",");
for (j=2;j<=NF;j+=2)
{
lineX=BX[j]","BX[j+1];
gsub(/\"/,"",lineX);
for (item in sinonim)
{
s3=split(item,CX,",");
lineS=CX[2]","CX[3];
if (lineX == lineS)
{
BX[j]=CX[1];
lineD=""
for (t=1;t<=s2;t++)
{
lineD=lineD BX[t]",";
}
lineF=lineF lineD"\n";
}
}
}
}
print lineF
}
$ cat tst.awk
BEGIN { FS=";" }
NR==FNR { synonyms[$2,$3][$2]; synonyms[$2,$3][$1]; next }
FNR==1 { FS=OFS="\",\""; $0=$0 }
{
gsub(/^"|"$/,"")
for (i=2;i<NF;i+=2) {
if ( ($i,$(i+1)) in synonyms) {
for (synonym in synonyms[$i,$(i+1)]) {
$i = synonym
for (j=2;j<NF;j+=2) {
if ( ($j,$(j+1)) in synonyms) {
for (synonym in synonyms[$j,$(j+1)]) {
orig = $0
$j = synonym
if (!seen[$0]++) {
print "\"" $0 "\""
}
$0 = orig
}
}
}
}
}
}
}
.
$ awk -f tst.awk fileA fileB
"A001","carro","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","veículo","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","automóvel","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","viatura","sust","excelente","adj","ocorrer","adv","bom","adj"
"A001","carro","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
"A001","carro","sust","excepcional","adj","ocorrer","adv","bom","adj"
"A001","veículo","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
"A001","veículo","sust","excepcional","adj","ocorrer","adv","bom","adj"
"A001","automóvel","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
"A001","automóvel","sust","excepcional","adj","ocorrer","adv","bom","adj"
"A001","viatura","sust","maravilhoso","adj","ocorrer","adv","bom","adj"
"A001","viatura","sust","excepcional","adj","ocorrer","adv","bom","adj"
The above uses GNU awk for multi-dimensional arrays, with other awks it's a simple tweak to use synonyms[$2,$3] = synonyms[$2,$3] " " $2 etc. or similar and then split() later instead of synonyms[$2,$3][$2] and in.
BEGIN { FS="[,;]"; OFS="," }
NR == FNR { key = "\"" $2 "\""; synonym[key] = synonym[key] "," $1; next }
{
print;
if ($2 in synonym) {
count = split(substr(synonym[$2], 2), choices)
for (i = 1; i <= count; i++) {
$2 = "\"" choices[i] "\""
print
}
}
}

How to iterate over a list of log files using gnuplot and awk simultaneously

I am using awk and gnuplot to extract some running times and their averages that are number of processes-dependent from a series of log files named bt.B.*.log where * can be 1, 4, 9 and 16 (those are the number of processes).
The code I'm running under gunplot is
system "awk 'BEGIN { FS = \"[ \\t]*=[ \\t]*\" } /Time in seconds/ { s += $2; c++ } /Total processes/ { if (! CP) CP = $2 } END { print CP, s/c }' bt.B.1.log > tavg.dat"
system "awk 'BEGIN { FS = \"[ \\t]*=[ \\t]*\" } /Time in seconds/ { s += $2; c++ } /Total processes/ { if (! CP) CP = $2 } END { print CP, s/c }' bt.B.4.log >> tavg.dat"
system "awk 'BEGIN { FS = \"[ \\t]*=[ \\t]*\" } /Time in seconds/ { s += $2; c++ } /Total processes/ { if (! CP) CP = $2 } END { print CP, s/c }' bt.B.9.log >> tavg.dat"
system "awk 'BEGIN { FS = \"[ \\t]*=[ \\t]*\" } /Time in seconds/ { s += $2; c++ } /Total processes/ { if (! CP) CP = $2 } END { print CP, s/c }' bt.B.16.log >> tavg.dat"
system "awk 'BEGIN { FS = \"[ \\t]*[=][ \\t]\" } /Time in seconds/ { printf \"%s\", $2 } /Total processes/ { if (CP) { printf \"\\n\" } else { CP = $2; printf \"%s\\n\", $2 } }' bt.B.1.log > t.dat"
system "awk 'BEGIN { FS = \"[ \\t]*[=][ \\t]\" } /Time in seconds/ { printf \"%s\", $2 } /Total processes/ { if (CP) { printf \"\\n\" } else { CP = $2; printf \"%s\\n\", $2 } }' bt.B.4.log >> t.dat"
system "awk 'BEGIN { FS = \"[ \\t]*[=][ \\t]\" } /Time in seconds/ { printf \"%s\", $2 } /Total processes/ { if (CP) { printf \"\\n\" } else { CP = $2; printf \"%s\\n\", $2 } }' bt.B.9.log >> t.dat"
system "awk 'BEGIN { FS = \"[ \\t]*[=][ \\t]\" } /Time in seconds/ { printf \"%s\", $2 } /Total processes/ { if (CP) { printf \"\\n\" } else { CP = $2; printf \"%s\\n\", $2 } }' bt.B.16.log >> t.dat"
#set xrange [0:20]
#set yrange [0:300]
set encoding iso_8859_1
set xlabel "Número de processos"
set ylabel "Tempo médio de execução (s)"
set border 3
set rmargin 10
unset key
set title "bt.B.*.log"
set tics nomirror
set mxtics
set mytics
set arrow from graph 1,0 to graph 1.05,0 filled
set arrow from graph 0,1 to graph 0,1.05 filled
plot "tavg.dat" using 1:2 smooth bezier title "bt.b.*.log", '' using 1:2 with points ps 3 title ""
Note that the way I'm doing it creates the file tavg.dat using > in the first system command, and then appends the other results to the same file using >>. Same organization for creating the t.dat file.
The issue here is how to iterate over the 4 log files in order to avoid repetition in the script.
Just define two functions, which generate the appropriate strings which you can give to the system function:
tavg(n) = "awk 'BEGIN { FS = \"[ \\t]*=[ \\t]*\" } /Time in seconds/ { s += $2; c++ } /Total processes/ { if (! CP) CP = $2 } END { print CP, s/c }' bt.B.".n.".log ".(n == 1 ? ">" : ">>")." tavg.dat;"
t(n) = "awk 'BEGIN { FS = \"[ \\t]*[=][ \\t]\" } /Time in seconds/ { printf \"%s\", $2 } /Total processes/ { if (CP) { printf \"\\n\" } else { CP = $2; printf \"%s\\n\", $2 } }' bt.B.".n.".log ".(n == 1 ? ">" : ">>")." t.dat;"
do for [n in "1 4 9 16"] { system(tavg(n).t(n)) }
...
plot "tavg.dat" ...

merge file on the basis of 2 fields

file1
session=1|w,eventbase=4,operation=1,rule=15
session=1|e,eventbase=5,operation=2,rule=14
session=2|t,eventbase=,operation=1,rule=13
file2
field1,field2,field3,session=1,fieldn,operation=1,fieldn
field1,field2,field3,session=1,fieldn,operation=2,fieldn
field1,field2,field3,session=2,fieldn,operation=2,fieldn
field1,field2,field3,session=2,fieldn,operation=1,fieldn
Output
field1,field2,field3,session=1,fieldn,operation=1,fieldn,eventbase=4,rule=15
field1,field2,field3,session=1,fieldn,operation=2,fieldn,eventbase=5,rule=14
field1,field2,field3,session=2,fieldn,operation=2,fieldn,NOMATCH
field1,field2,field3,session=2,fieldn,operation=1,fieldn,eventbase=,rule=13
I have Tried
BEGIN { FS = OFS = "," }
FNR == NR {
split($1,s,"|")
session=s[1];
a[session,$3] = session","$2","$3","$4;
next
}
{
split($4,x,"|");
nsession=x[1];
if(nsession in a)print $0 a[nsession,$6];
else print $0",NOMATCH";
}
Issue is I am not able to FIND nsession in 2D array a with if(nsession in a)
matching 2 files on the combination basis of session and operation
Thanks.. it helped.. Now I am learning :) Thanks team
BEGIN { FS = OFS = "," }
FNR == NR {
split($1,s,"|")
session=s[1];
a[session,$3] = session","$2","$3","$4;
next
}
{
split($4,x,"|");
nsession=x[1];
key=nsession SUBSEP $6
if(key in a)print $0 a[nsession,$6];
else print $0",NOMATCH";
}
You can try
awk -f merge.awk file1 file2
where merge.awk is
NR==FNR {
sub(/[[:blank:]]*$/,"")
getSessionInfo(1)
ar[ses,op]=",eventbase="evb",rule="rule
next
}
{
sub(/[[:blank:]]*$/,"")
getSessionInfo(0)
if ((ses,op) in ar)
print $0 ar[ses,op]
else
print $0 ",NOMATCH"
}
function getSessionInfo(f, a) {
match($0,/session=([^|])[|,]/,a)
ses=a[1]
match($0,/operation=([^,]),/,a)
op=a[1]
if (f) {
match($0,/eventbase=([^,]),/,a)
evb=a[1]
match($0,/rule=(.*)$/,a)
rule=a[1]
}
}

awk '/range start/,/range end/' within script

How do I use the awk range pattern '/begin regex/,/end regex/' within a self-contained awk script?
To clarify, given program csv.awk:
#!/usr/bin/awk -f
BEGIN {
FS = "\""
}
/TREE/,/^$/
{
line="";
for (i=1; i<=NF; i++) {
if (i != 2) line=line $i;
}
split(line, v, ",");
if (v[5] ~ "FOAM") {
print NR, v[5];
}
}
and file chunk:
TREE
10362900,A,INSTL - SEAL,Revise
,10362901,A,ASSY / DETAIL - PANEL,Revise
,,-203,ASSY - PANEL,Qty -,Add
,,,-309,PANEL,Qty 1,Add
,,,,"FABRICATE FROM TEKLAM NE1G1-02-250 PER TPS-CN-500, TYPE A"
,,,-311,PANEL,Qty 1,Add
,,,,"FABRICATE FROM TEKLAM NE1G1-02-750 PER TPS-CN-500, TYPE A"
,,,-313,FOAM SEAL,1.00 X 20.21 X .50 THK,Qty 1,Add
,,,,"BMS1-68, GRADE B, FORM II, COLOR BAC706 (BLACK)"
,,,-315,FOAM SEAL,1.50 X 8.00 X .25 THK,Qty 1,Add
,,,,"BMS1-68, GRADE B, FORM II, COLOR BAC706 (BLACK)"
,PN HERE,Dual Lock,Add
,
10442900,IR,INSTL - SEAL,Update (not released)
,10362901,A,ASSY / DETAIL - PANEL,Revise
,PN HERE,Dual Lock,Add
I want to have this output:
27 FOAM SEAL
29 FOAM SEAL
What is the syntax for adding the command line form '/begin regex/,/end regex/' to the script to operate on those lines only? All my attempts lead to syntax errors and googling only gives me the cli form.
why not use 2 steps:
% awk '/start/,/end/' < input.csv | awk csv.awk
Simply do:
#!/usr/bin/awk -f
BEGIN {
FS = "\""
}
/from/,/to/ {
line="";
for (i=1; i<=NF; i++) {
if (i != 2) line=line $i;
}
split(line, v, ",");
if (v[5] ~ "FOAM") {
print NR, v[5];
}
}
If the from to regexes are dynamic:
#!/usr/bin/awk -f
BEGIN {
FS = "\""
FROM=ARGV[1]
TO=ARGV[2]
if (ARGC == 4) { # the pattern was the only thing, so force read from standard input
ARGV[1] = "-"
} else {
ARGV[1] = ARGV[3]
}
}
{ if ($0 ~ FROM) { p = 1 ; l = 0} }
{ if ($0 ~ TO) { p = 0 ; l = 1} }
{
if (p == 1 || l == 1) {
line="";
for (i=1; i<=NF; i++) {
if (i != 2) line=line $i;
}
split(line, v, ",");
if (v[5] ~ "FOAM") {
print NR, v[5];
}
l = 0 }
}
Now you have to call it like: ./scriptname.awk "FROM_REGEX" "TO_REGEX" INPUTFILE. The last param is optional, if missing STDIN can be used.
HTH
You need to show us what you have tried. Is there something about /begin regex/ or /end regex/ you're not telling us, other wise your script with the additions should work, i.e.
#!/usr/bin/awk -f
BEGIN {
FS = "\""
}
/begin regex/,/end regex/{
line="";
for (i=1; i<=NF; i++) {
if (i != 2) line=line $i;
}
split(line, v, ",");
if (v[5] ~ "FOAM") {
print NR, v[5];
}
}
OR are you using an old Unix, where there is old awk as /usr/bin/awk and New awk as /usr/bin/nawk. Also see if you have /usr/xpg4/bin/awk or gawk (path could be anything).
Finally, show us the error messages you are getting.
I hope this helps.