Unlocking RLock in loop with defer leads to Deadlock - locking

for key := range keyBuffers {
partition = int(GetHash(keyBuffers[key]) % store.parts)
if _, ok := locks[partition]; !ok {
locks[partition] = struct{}{}
store.partitions[partition].lock.RLock()
defer store.partitions[partition].lock.RUnlock()
}
request, buffers[key], err = store.partitions[partition].buildIORequests(keyBuffers[key], valueBuffers[key], key)
if err != nil || request == nil {
errors[key] = err
continue
}
requests = append(requests, request)
buffers[key] = valueBuffers[key]
}
In the above code i try to unlock the taken Rlock with defer statement in a loop is the above code valid?
i am encountering a deadlock and can't seem to find the root cause.

Related

How to intercept `rollback` in gorm?

I need to execute some things after all create executions fail.
It seems that callbacks can be satisfied, but there is a case that if it is an operation in a transaction, it may not actually be executed. I need to do it after rollback Treat accordingly. So the question is, how do I intercept rollback?
You can use manual transaction in a function like this.
func CreateAnimals(db *gorm.DB) error {
// Note the use of tx as the database handle once you are within a transaction
tx := db.Begin()
defer func() {
if r := recover(); r != nil {
tx.Rollback()
}
}()
if err := tx.Error; err != nil {
return err
}
if err := tx.Create(&Animal{Name: "Giraffe"}).Error; err != nil {
tx.Rollback()
return err
}
if err := tx.Create(&Animal{Name: "Lion"}).Error; err != nil {
tx.Rollback()
return err
}
return tx.Commit().Error
}
If CreateAnimals fails then you can do your desired job.

How can i tell the PATCH Method which field i want to update

I'm working on a simple REST API and I'm having troubles with the PATCH method. I don't know how can i tell the method and the query which fields i want to update(for example which fields are passed as JSON) in the database. Here is what i have so far.
func PatchServer(c echo.Context) error {
patchedServer := new(structs.Server)
requestID := c.Param("id")
if err := c.Bind(patchedServer); err != nil {
return err
}
sql := "UPDATE servers SET server_name = CASE WHEN ? IS NOT NULL THEN ? END WHERE id = ?"
stmt, err := db.Get().Prepare(sql)
if err != nil {
panic(err)
}
_, err2 := stmt.Exec(patchedServer.Name, patchedServer.Name, requestID)
if err2 != nil {
panic(err2)
}
fmt.Println(patchedServer.ID, patchedServer.Name, patchedServer.Components)
fmt.Println("Requested id: ", requestID)
return c.JSON(http.StatusOK, "Patched!")
}

Bulk insert copy sql table with golang

For the context, I'm new to go and I'm creating a program that can copy tables from Oracle to MySQL.
I use database/sql go package, so I assume it can be used for migrating any kind of database.
To simplify my question I'm coping on the same MySQL database table name world.city to world.city_copy2.
with my following code, I ended up with the same last values in all the rows in the table :-(
do I somehow need to read through all the values inside the loop? what is the efficient way to do that?
package main
import (
"database/sql"
"fmt"
"strings"
_ "github.com/go-sql-driver/mysql"
)
const (
user = "user"
pass = "testPass"
server = "localhost"
)
func main() {
fmt.Print("test")
conStr := fmt.Sprintf("%s:%s#tcp(%s)/world", user, pass, server)
db, err := sql.Open("mysql", conStr)
if err != nil {
panic(err.Error())
}
defer db.Close()
err = db.Ping()
if err != nil {
panic(err.Error())
}
rows, err := db.Query("SELECT * FROM city")
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
columns, err := rows.Columns()
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
// Make a slice for the values
values := make([]sql.RawBytes, len(columns))
// rows.Scan wants '[]interface{}' as an argument, so we must copy the
// references into such a slice
scanArgs := make([]interface{}, len(values))
for i := range values {
scanArgs[i] = &values[i]
}
// that string will be generated according to len of columns
placeHolders := "( ?, ?, ?, ?, ? )"
// slice will contain all the values at the end
bulkValues := []interface{}{}
valueStrings := make([]string, 0)
for rows.Next() {
// get RawBytes from data
err = rows.Scan(scanArgs...)
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
valueStrings = append(valueStrings, placeHolders)
bulkValues = append(bulkValues, scanArgs...)
//
}
stmStr := fmt.Sprintf("INSERT INTO city_copy2 VALUES %s", strings.Join(valueStrings, ","))
_, err = db.Exec(stmStr, bulkValues...)
if err != nil {
panic(err.Error())
}
}
I have checked out the docs of the library, and it seems that the problem here is that bulkValues keeps the address of the pointer so when scanArgs changes, bulkValues also changes to latest value of that scanArgs.
You need to use the values variable to get the values like below:
func main() {
fmt.Print("test")
conStr := fmt.Sprintf("%s:%s#tcp(%s)/soverflow", user, pass, server)
db, err := sql.Open("mysql", conStr)
if err != nil {
panic(err.Error())
}
defer db.Close()
err = db.Ping()
if err != nil {
panic(err.Error())
}
rows, err := db.Query("SELECT * FROM city")
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
columns, err := rows.Columns()
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
// Make a slice for the values
values := make([]sql.RawBytes, len(columns))
// rows.Scan wants '[]interface{}' as an argument, so we must copy the
// references into such a slice
scanArgs := make([]interface{}, len(values))
for i := range values {
scanArgs[i] = &values[i]
}
// that string will be generated according to len of columns
placeHolders := "( ?, ?, ?, ?, ? )"
// slice will contain all the values at the end
bulkValues := []interface{}{}
valueStrings := make([]string, 0)
// make an interface to keep the record's value
record := make([]interface{}, len(columns))
for rows.Next() {
// get RawBytes from data
err = rows.Scan(scanArgs...)
if err != nil {
panic(err.Error()) // proper error handling instead of panic in your app
}
valueStrings = append(valueStrings, placeHolders)
for i, col := range values {
// you need to be carefull with the datatypes here
// check out the docs for details on here
record[i] = string(value)
}
bulkValues = append(bulkValues, record...)
}
stmStr := fmt.Sprintf("INSERT INTO city_copy2 VALUES %s", strings.Join(valueStrings, ","))
_, err = db.Exec(stmStr, bulkValues...)
if err != nil {
panic(err.Error())
}
}
You can also find the example of the documentation here.
Note: There might be more efficient ways to copy database from psql to mysql but this answer only gives a quick solution for this particular issue that you are having.

Go ioutil using too many file descriptors/leak?

I am going through a list of files and Unmarshalling the xml data in them into an array of structs rArray. I intend to process about 18000 files. When I get to about 1300 files processed, the program panics and says that too many files are open. If I limit the amount of files processed to a safe amount of 1000, the program does not crash.
As seen below, I am using ioutil.ReadFile to read the file data.
for _, f := range files {
func() {
data, err := ioutil.ReadFile("./" + recordDir + "/" + f.Name())
if err != nil {
fmt.Println("error reading %v", err)
return
} else {
if (strings.Contains(filepath.Ext(f.Name()), "xml")) {
//unmarshal data and put into struct array
err = xml.Unmarshal([]byte(data), &rArray[a])
if err != nil {
fmt.Println("error decoding %v: %v",f.Name(), err)
return
}
}
}
}()
}
I am not sure if Go is using too many file descriptors or not closing the files fast enough.
After reading https://groups.google.com/forum/#!topic/golang-nuts/7yXXjgcOikM and viewing the ioutil source in http://golang.org/src/pkg/io/ioutil/ioutil.go, the code for ioutil.ReadFile shows that it uses defer to close the file. defer runs when calling function is returned and ReadFile() is the calling function. Am I correct in this understanding?
I also tried wrapping the ioutil.ReadFile part of my code in a function, but it makes no difference.
My ulimit is set to unlimited.
UPDATE:
I believe that the error of too many files is actually occurring during my Unzip function.
func Unzip(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
for _, f := range r.File {
rc, err := f.Open()
if err != nil {
panic(err)
}
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
os.MkdirAll(path, f.Mode())
} else {
f, err := os.OpenFile(
path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
panic(err)
}
_, err = io.Copy(f, rc)
if err != nil {
panic(err)
}
f.Close()
}
rc.Close()
}
r.Close()
return nil
}
I initially got the Unzip function from https://gist.github.com/hnaohiro/4572580, but upon further inspection, the use of defer in the gist author's function seemed wrong as the file would only be closed after the Unzip() function returned which is too late becuase then 18000 file descriptors will be open. ;)
I replaced the deferred Closes with explicit Close() as shown above, but am still getting the same "too many open files" error. Is there a problem with my modified Unzip function?
UPDATE # 2
Oops, I was running this on Heroku and was pushing to the wrong app with my changes this entire time. Lesson learned: verify target app in heroku toolbelt.
Unzip code from https://gist.github.com/hnaohiro/4572580 does not work as it does not close files until all files processed.
My unzip code with explicit close above works and so does the defer version in #peterSO's answer.
I would modify the Unzip function from https://gist.github.com/hnaohiro/4572580 to the following:
package main
import (
"archive/zip"
"io"
"log"
"os"
"path/filepath"
)
func unzipFile(f *zip.File, dest string) error {
rc, err := f.Open()
if err != nil {
return err
}
defer rc.Close()
path := filepath.Join(dest, f.Name)
if f.FileInfo().IsDir() {
err := os.MkdirAll(path, f.Mode())
if err != nil {
return err
}
} else {
f, err := os.OpenFile(
path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(f, rc)
if err != nil {
return err
}
}
return nil
}
func Unzip(src, dest string) error {
r, err := zip.OpenReader(src)
if err != nil {
return err
}
defer r.Close()
for _, f := range r.File {
err := unzipFile(f, dest)
if err != nil {
return err
}
}
return nil
}
func main() {
err := Unzip("./sample.zip", "./out")
if err != nil {
log.Fatal(err)
}
}

GO lang : Communicate with shell process

I want to execute a shell script from Go.
The shell script takes standard input and echoes the result.
I want to supply this input from GO and use the result.
What I am doing is:
cmd := exec.Command("python","add.py")
in, _ := cmd.StdinPipe()
But how do I read from in?
Here is some code writing to a process, and reading from it:
package main
import (
"bufio"
"fmt"
"os/exec"
)
func main() {
// What we want to calculate
calcs := make([]string, 2)
calcs[0] = "3*3"
calcs[1] = "6+6"
// To store the results
results := make([]string, 2)
cmd := exec.Command("/usr/bin/bc")
in, err := cmd.StdinPipe()
if err != nil {
panic(err)
}
defer in.Close()
out, err := cmd.StdoutPipe()
if err != nil {
panic(err)
}
defer out.Close()
// We want to read line by line
bufOut := bufio.NewReader(out)
// Start the process
if err = cmd.Start(); err != nil {
panic(err)
}
// Write the operations to the process
for _, calc := range calcs {
_, err := in.Write([]byte(calc + "\n"))
if err != nil {
panic(err)
}
}
// Read the results from the process
for i := 0; i < len(results); i++ {
result, _, err := bufOut.ReadLine()
if err != nil {
panic(err)
}
results[i] = string(result)
}
// See what was calculated
for _, result := range results {
fmt.Println(result)
}
}
You might want to read/write from/to the process in different goroutines.