I've been serching (search engine, SO, AHK forums) without success how to get the PIDs of the hierarchy tree of anything ran by Run[Wait] command.
Any help/directiono will be highly appreciated.
With 20 iterations looking for a 30-child process tree my average times were:
DLL: 0.2587435
WMI: 0.1113015
Here's the bench:
pid := 7880
loops := 20
time_dll := 0
Loop, % loops
{
QPC(1)
tree_dll := processTree(pid)
time_dll += QPC()
}
time_dll /= loops
time_wmi := 0
Loop, % loops
{
QPC(1)
tree_wmi := processTree(pid, True)
time_wmi += QPC()
}
time_wmi /= loops
MsgBox,,>, % "DLL: " time_dll "`nWMI: " time_wmi
;-------------------------------------------------------------------------------
processTree(pid, wmi := False, obj := False)
{
Local
tree := []
If wmi
{
If !obj
obj := ComObjGet("winmgmts:").ExecQuery("SELECT ProcessId,ParentProcessId FROM Win32_Process")
tree := []
For result in obj
If pid = result.ParentProcessId
{
tree.Push(result.ProcessId)
subs := %A_ThisFunc%(result.ProcessId, True, obj)
For idx,val in subs
tree.Push(val)
}
}
Else
{
Static MAX_PATH := 260 << !!A_IsUnicode
Static varCapacity := MAX_PATH
VarSetCapacity(lppe, varCapacity)
NumPut(varCapacity, lppe, "UInt")
hSnapshot := DllCall("CreateToolhelp32Snapshot", "UInt",0x2, "UInt",pid, "Ptr")
DllCall("Process32First", "Ptr",hSnapshot, "Ptr",&lppe)
Loop
{
parent := NumGet(lppe, 16 + A_PtrSize * 2, "UInt")
If (parent = pid)
{
child := NumGet(lppe, 8, "UInt")
tree.Push(child)
gchilds := %A_ThisFunc%(child)
For idx,val in gchilds
tree.Push(val)
}
} Until !DllCall("Process32Next", "Ptr",hSnapshot, "Ptr",&lppe)
DllCall("CloseHandle", "Ptr",hSnapshot)
}
Return, tree
}
QPC(start := 0)
{
Static P := 0, F := 0, Q := DllCall("QueryPerformanceFrequency", "Int64P",F)
Return !DllCall("QueryPerformanceCounter", "Int64P",Q) + (start ? (P:=Q)/F : (Q-P)/F)
}
Related
I am dealing with a bit over 15 billion rows of data in various text files. I am trying to insert them into MariaDB using golang. Golang is a fast language and is often used for big data but I cannot get more than 10k-15k inserts a second, at this rate its gonna take over 15 days, I need this data imported sooner than that. I have tried various batch sizes but they all give about the same results.
function I'm using to handle file data:
func handlePath(path string) {
file, err := os.Open(path)
if err != nil {
fmt.Printf("error opening %v: %v", path, err)
return
}
defer file.Close()
scanner := bufio.NewScanner(file)
var temp_lines []string
for scanner.Scan() {
if len(temp_lines) == line_batch {
insertRows(temp_lines)
temp_lines = []string{}
}
temp_lines = append(temp_lines, scanner.Text())
}
insertRows(temp_lines)
fmt.Printf("\nFormatted %v\n", path)
if err := scanner.Err(); err != nil {
fmt.Printf("\nScanner error %v\n", err)
return
}
}
function I'm using for inserting:
func insertRows(rows []string) {
var Args []string
for _, row := range rows {
line_split := strings.Split(row, "|")
if len(line_split) != 6 {return}
database_id := line_split[0]
email := line_split[1]
password := line_split[2]
username := line_split[3]
ip := line_split[4]
phone := line_split[5]
arg := fmt.Sprintf("('%v','%v','%v','%v','%v','%v')",database_id,email,password,username,ip,phone)
Args = append(Args, arg)
}
sqlQuery := fmt.Sprintf("INSERT INTO new_table (database_id, email, password, username, ip, phone_number) VALUES %s", strings.Join(Args, ","))
_, err := db.Exec(sqlQuery)
if err != nil {
//fmt.Printf("%v\n", err)
return
}
total+=line_batch
writes++
}
Server specs:
server
How to make a row as map[string]map[string]interface{}
cannot use s.ID (type string) as type map[string]interface {} in assignment
var row = make(map[string]map[string]interface{})
Listservers
func ListServers() (map[string]map[string]interface{}, error) {
listOptions := servers.ListOpts{}
pager := servers.List(GetClientCompute(), listOptions)
err := pager.EachPage(func(page pagination.Page) (bool, error) {
serverList, err := servers.ExtractServers(page)
if err != nil {
fmt.Println(err)
}
for _, s := range serverList {
row["ID"] = s.ID <---- error is here
row["Name"] = s.Name <---- error is here
if s.Addresses["public"] != nil {
for _, i := range s.Addresses["public"].([]interface{}) {
temp := i.(map[string]interface{})
if temp["version"].(float64) == 4 {
row["IP"] = temp["addr"]
}
}
}
t, _ := time.Parse(time.RFC3339, s.Created)
row["Flavor"] = s.Flavor
row["Created"] = time.Now().Sub(t) <---- error is here
row["Status"] = s.Status <---- error is here
}
return false, nil
})
// fmt.Println(lists)
return row, err
}
The row is a SLICE of map[string]interface{}. You need to provide the length when you initialize the slice like this:
row := make([]map[string]interface{}, 0)
The index of a slice MUST be an integer, That's why you encounter the second problem mentioned in your comment.
Let's suppose serverList is a slice. You code may be modified as:
rows := make([]map[string]interface{}, 0) // create a slice
// ... codes omitted
for _, s := range serverList {
row := make(map[string]interface{}) // create an item
row["ID"] = s.ID
row["Name"] = s.Name
// ... codes omitted
row["Flavor"] = s.Flavor
row["Created"] = time.Now().Sub(t)
row["Status"] = s.Status
rows = append(rows, row) // append the item to the slice
}
return rows, err
In MOESI_CMP_directory-L2cache.sm--the action of j_forwardGlobalRequestToLocalOwner, the "out_msg.Requestor" of the request message is filled in the machineID of that L2 cache, so it is the correct requestor.
While, to response that message in "MOESI_CMP_directory-L1cache.sm"--the action of "ee_sendDataExclusive" ,when in_msg.RequestorMachine == MachineType:L2Cache, why is the out_msg.Destination filled in "mapAddressToRange", instead of "in_msg.Requestor"? (I think it is the "out_msg.Requestor" value of the request message.)
L2 cache:
action(j_forwardGlobalRequestToLocalOwner, "j", desc="Forward external request to local owner") {
peek(requestNetwork_in, RequestMsg) {
enqueue( localRequestNetwork_out, RequestMsg, response_latency ) {
out_msg.addr := in_msg.addr;
out_msg.Type := in_msg.Type;
out_msg.Requestor := machineID;// the machineID of the request L2
out_msg.RequestorMachine := MachineType:L2Cache;
out_msg.Destination.add(getLocalOwner(cache_entry, in_msg.addr));
out_msg.Type := in_msg.Type;
out_msg.MessageSize := MessageSizeType:Forwarded_Control;
out_msg.Acks := 0 - 1;
}
}
}
L1 cacheļ¼
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
peek(requestNetwork_in, RequestMsg) {
assert(is_valid(cache_entry));
if (in_msg.RequestorMachine == MachineType:L2Cache) {
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));//why do not filled in "in_msg.Requestor"?
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
}
else {
enqueue(responseNetwork_out, ResponseMsg, request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Acks := in_msg.Acks;
out_msg.MessageSize := MessageSizeType:ResponseLocal_Data;
}
DPRINTF(RubySlicc, "Sending exclusive data to L1\n");
}
}
}
I have a fleet of servers that I'm trying to establish SSH connections to, and I'm spawning a new goroutine for every new SSH connection I have to establish. I then send the results of that connection (along with the error(s) (if any)) down a channel, and then read from the channel. This program sort of works, but it freezes in the end even though I close the channel.
This is what I have so far:
package main
import (
"fmt"
"net"
"sync"
"github.com/awslabs/aws-sdk-go/aws"
"github.com/awslabs/aws-sdk-go/service/ec2"
)
// ConnectionResult container
type ConnectionResult struct {
host string
message string
}
func main() {
cnres := make(chan ConnectionResult)
ec2svc := ec2.New(&aws.Config{Region: "us-east-1"})
wg := sync.WaitGroup{}
params := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
&ec2.Filter{
Name: aws.String("instance-state-name"),
Values: []*string{
aws.String("running"),
},
},
},
}
resp, err := ec2svc.DescribeInstances(params)
if err != nil {
panic(err)
}
for _, res := range resp.Reservations {
for _, inst := range res.Instances {
for _, tag := range inst.Tags {
if *tag.Key == "Name" {
host := *tag.Value
wg.Add(1)
go func(hostname string, cr chan ConnectionResult) {
defer wg.Done()
_, err := net.Dial("tcp", host+":22")
if err != nil {
cr <- ConnectionResult{host, "failed"}
} else {
cr <- ConnectionResult{host, "succeeded"}
}
}(host, cnres)
}
}
}
}
for cr := range cnres {
fmt.Println("Connection to " + cr.host + " " + cr.message)
}
close(cnres)
defer wg.Wait()
}
What am I doing wrong? Is there a better way of doing concurrent SSH connections in Go?
The code above is stuck in the range cnres for loop. As pointed out in the excellent 'Go by Example', range will only exit on a closed channel.
One way to address that difficulty, is to run the range cnres iteration in another goroutine. You could then wg.Wait(), and then close() the channel, as such:
...
go func() {
for cr := range cnres {
fmt.Println("Connection to " + cr.host + " " + cr.message)
}
}()
wg.Wait()
close(cnres)
On a tangential note (independently of the code being stuck), I think the intention was to use hostname in the Dial() function, and subsequent channel writes, rather than host.
Thanks to Frederik, I was able to get this running successfully:
package main
import (
"fmt"
"net"
"sync"
"github.com/awslabs/aws-sdk-go/aws"
"github.com/awslabs/aws-sdk-go/service/ec2"
)
// ConnectionResult container
type ConnectionResult struct {
host string
message string
}
func main() {
cnres := make(chan ConnectionResult)
ec2svc := ec2.New(&aws.Config{Region: "us-east-1"})
wg := sync.WaitGroup{}
params := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
&ec2.Filter{
Name: aws.String("instance-state-name"),
Values: []*string{
aws.String("running"),
},
},
},
}
resp, err := ec2svc.DescribeInstances(params)
if err != nil {
panic(err)
}
for _, res := range resp.Reservations {
for _, inst := range res.Instances {
for _, tag := range inst.Tags {
if *tag.Key == "Name" {
host := *tag.Value
publicdnsname := *inst.PublicDNSName
wg.Add(1)
go func(ec2name, cbname string, cr chan ConnectionResult) {
defer wg.Done()
_, err := net.Dial("tcp", ec2name+":22")
if err != nil {
cr <- ConnectionResult{cbname, "failed"}
} else {
cr <- ConnectionResult{cbname, "succeeded"}
}
}(publicdnsname, host, cnres)
}
}
}
}
go func() {
for cr := range cnres {
fmt.Println("Connection to " + cr.host + " " + cr.message)
}
}()
wg.Wait()
}
Frederik's solution works fine but with some exceptions. If command group routines (from loop which write to to the channel) execute command with a bit longer response time, processing routine (Frederik's hint) will process and close the channel, before last command routine to finish, so some data loss may occur.
In my case I'm using it to execute remote SSH command to multiple servers and to print response. Working solution for me is to use 2 separate WaitGroups, one for command group routines and second for processing routine. This way, processing routine will wait all command routines to be completed, then process response and close channel to exit for loop:
// Create waitgroup, channel and execute command with concurrency (goroutine)
outchan := make(chan CommandResult)
var wg_command sync.WaitGroup
var wg_processing sync.WaitGroup
for _, t := range validNodes {
wg_command.Add(1)
target := t + " (" + user + "#" + nodes[t] + ")"
go func(dst, user, ip, command string, out chan CommandResult) {
defer wg_command.Done()
result := remoteExec(user, ip, cmdCommand)
out <- CommandResult{dst, result}
}(target, user, nodes[t], cmdCommand, outchan)
}
wg_processing.Add(1)
go func() {
defer wg_processing.Done()
for o := range outchan {
bBlue.Println(o.target, "=>", cmdCommand)
fmt.Println(o.cmdout)
}
}()
// wait untill all goroutines to finish and close the channel
wg_command.Wait()
close(outchan)
wg_processing.Wait()
I want to create a 10 GB file that looks like:
prefix:username:timestamp, number
So an example is like:
login:jbill:2013/3/25, 1
I want to create a 10GB file, by creating random rows like the one above.
How could I do this in Go?
I can have an array of prefixes like:
login, logout, register
And also an array of usernames:
jbill, dkennedy
For example,
package main
import (
"bufio"
"fmt"
"math/rand"
"os"
"strconv"
"time"
)
func main() {
fileSize := int64(10e9) // 10GB
f, err := os.Create("/tmp/largefile")
if err != nil {
fmt.Println(err)
return
}
w := bufio.NewWriter(f)
prefixes := []string{"login", "logout", "register"}
names := []string{"jbill", "dkennedy"}
timeStart := time.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)
timeDur := timeStart.AddDate(1, 0, 0).Sub(timeStart)
rand.Seed(time.Now().UnixNano())
size := int64(0)
for size < fileSize {
// prefix:username:timestamp, number
// login:jbill:2012/3/25, 1
prefix := prefixes[int(rand.Int31n(int32(len(prefixes))))]
name := names[int(rand.Int31n(int32(len(names))))]
time := timeStart.Add(time.Duration(rand.Int63n(int64(timeDur)))).Format("2006/1/2")
number := strconv.Itoa(int(rand.Int31n(100) + 1))
line := prefix + ":" + name + ":" + time + ", " + number + "\n"
n, err := w.WriteString(line)
if err != nil {
fmt.Println(n, err)
return
}
size += int64(len(line))
}
err = w.Flush()
if err != nil {
fmt.Println(err)
return
}
err = f.Close()
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Size:", size)
}
Output:
register:jbill:2012/8/24, 15
login:jbill:2012/10/7, 98
register:dkennedy:2012/8/29, 70
register:jbill:2012/6/1, 89
register:jbill:2012/5/24, 63
login:dkennedy:2012/3/29, 48
logout:jbill:2012/7/8, 93
logout:dkennedy:2012/1/12, 74
login:jbill:2012/4/12, 14
login:jbill:2012/2/5, 83
This is a naive approach (1GB):
package main
import (
"fmt"
"log"
"os"
)
func main() {
myfile, err := os.OpenFile("myfile", os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
log.Fatal(err)
}
defer myfile.Close()
var pos int
var line string
// sample: login:jbill:2013/3/25, 1
line = fmt.Sprintf("%s:%s:%s, %d\n", "login", "jbill", "2013/3/25", 1)
for pos < 1024*1024*1024 {
bytes, err := myfile.Write([]byte(line))
if err != nil {
log.Fatal(err)
}
pos = pos + bytes
}
}
which takes forever (1:16), because the output is not buffered. By adding bufio you can decrease the time dramatically
package main
import (
"bufio"
"fmt"
"log"
"os"
)
func main() {
myfile, err := os.OpenFile("myfile", os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
log.Fatal(err)
}
defer myfile.Close()
mybufferedfile := bufio.NewWriter(myfile)
var pos int
var line string
// sample: login:jbill:2013/3/25, 1
line = fmt.Sprintf("%s:%s:%s, %d\n", "login", "jbill", "2013/3/25", 1)
for pos < 1024*1024*1024 {
bytes, err := mybufferedfile.WriteString(line)
if err != nil {
log.Fatal(err)
}
pos = pos + bytes
}
err = mybufferedfile.Flush()
if err != nil {
log.Fatal(err)
}
}
Still 26 sec on my machine, I'd like to see a faster solution.
BTW: you need to do the random fileds, but that is left as an exercise to the reader :)