filepath = self.class.instance_variable_get(:#filename)
# puts" #{:#filename}"
qget = params['clientquery']
if !qget.nil? then
begin
systemCmd = "bash /home/abc/t.sh \"#{qget}\" \"#{filepath}\""
puts systemCmd
output = system("#{systemCmd} 2>&1")
data = File.read(filepath)
send_data data, filename: File.basename(filepath),
type: 'application/csv',
disposition: 'attachment'
ensure
# delfile = File.basename("/tmp/download.csv")
FileUtils.remove_entry_secure File.basename("/tmp/download.csv")
# File.delete(delfile)
# redirect_to '/report'
end
FileUtils.remove_entry_secure File.basename("/tmp/download.csv") using this code i try to remove file after downloading but it not working
if i comment the line FileUtils.remove_entry_secure File.basename("/tmp/download.csv")
The file downloaded but i want remove that file after download the file
I think permission problem.could you please verify permission for /tmp folder.
because FileUtils.remove_entry_secure method will check all permission,user and group and it will remove.
Please refer click here
Related
Is there a way I can save a Pyspark or Pandas dataframe from Databricks to a blob storage without mounting or installing libraries?
I was able to achieve this after mounting the storage container into Databricks and using the library com.crealytics.spark.excel, but I was wondering if I can do the same without the library or without mounting because I will be working on clusters without these 2 permissions.
Here the code for saving the dataframe locally to dbfs.
# export
from os import path
folder = "export"
name = "export"
file_path_name_on_dbfs = path.join("/tmp", folder, name)
# Writing to DBFS
# .coalesce(1) used to generate only 1 file, if the dataframe is too big this won't work so you'll have multiple files qnd you need to copy them later one by one
sampleDF \
.coalesce(1) \
.write \
.mode("overwrite") \
.option("header", "true") \
.option("delimiter", ";") \
.option("encoding", "UTF-8") \
.csv(file_path_name_on_dbfs)
# path of destination, which will be sent to az storage
dest = file_path_name_on_dbfs + ".csv"
# Renaming part-000...csv to our file name
target_file = list(filter(lambda file: file.name.startswith("part-00000"), dbutils.fs.ls(file_path_name_on_dbfs)))
if len(target_file) > 0:
dbutils.fs.mv(target_file[0].path, dest)
dbutils.fs.cp(dest, f"file://{dest}") # this line is added for community edition only cause /dbfs is not recognized, so we copy the file locally
dbutils.fs.rm(file_path_name_on_dbfs,True)
The code that will send the file into az storage :
import requests
sas="YOUR_SAS_TOKEN_PREVIOUSLY_CREATED" # follow the link below to create SAS token (using sas is slightly more secure than raw key storage)
blob_account_name = "YOUR_BLOB_ACCOUNT_NAME"
container = "YOUR_CONTAINER_NAME"
destination_path_w_name = "export/export.csv"
url = f"https://{blob_account_name}.blob.core.windows.net/{container}/{destination_path_w_name}?{sas}"
# here we read the content of our previously exported df -> csv
# if you are not on community edition you might want to use /dbfs + dest
payload=open(dest).read()
headers = {
'x-ms-blob-type': 'BlockBlob',
'Content-Type': 'text/csv' # you can change the content type according to your needs
}
response = requests.request("PUT", url, headers=headers, data=payload)
# if response.status_code is 201 it means your file was created successfully
print(response.status_code)
Follow this link to setup a SAS token
Remember that anyone who got the sas token can access your storage depending on permissions you set while creating the sas token
Code for Excel export version (using com.crealytics:spark-excel_2.12:0.14.0)
Saving the dataframe :
data = [
('a',25,'ast'),
('b',15,'phone'),
('c',32,'dlp'),
('d',45,'rare'),
('e',60,'phq' )
]
colums = ["column1" ,"column2" ,"column3"]
sampleDF = spark.createDataFrame(data=data, schema = colums)
sampleDF.show()
# export
from os import path
folder = "export"
name = "export"
file_path_name_on_dbfs = path.join("/tmp", folder, name)
# Writing to DBFS
sampleDF.write.format("com.crealytics.spark.excel")\
.option("header", "true")\
.mode("overwrite")\
.save(file_path_name_on_dbfs + ".xlsx")
# excel
dest = file_path_name_on_dbfs + ".xlsx"
dbutils.fs.cp(dest, f"file://{dest}") # this line is added for community edition only cause /dbfs is not recognized, so we copy the file locally
Uploading the file to azure storage :
import requests
sas="YOUR_SAS_TOKEN_PREVIOUSLY_CREATED" # follow the link below to create SAS token (using sas is slightly more secure than raw key storage)
blob_account_name = "YOUR_BLOB_ACCOUNT_NAME"
container = "YOUR_CONTAINER_NAME"
destination_path_w_name = "export/export.xlsx"
# destination_path_w_name = "export/export.csv"
url = f"https://{blob_account_name}.blob.core.windows.net/{container}/{destination_path_w_name}?{sas}"
# here we read the content of our previously exported df -> csv
# if you are not on community edition you might want to use /dbfs + dest
# payload=open(dest).read()
payload=open(dest, 'rb').read()
headers = {
'x-ms-blob-type': 'BlockBlob',
# 'Content-Type': 'text/csv'
'Content-Type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
}
response = requests.request("PUT", url, headers=headers, data=payload)
# if response.status_code is 201 it means your file was created successfully
print(response.status_code)
I have thousands of .htaccess files added by malware, I need to login via SSH and delete them, but only when they are inside wp-content folder, outside this folder they need to stay.
What's the cmd please?
Note this is for Python
Try This:
import os
file_type = input("Enter file type: ")
folder_path = input("Enter folder path: ")
filelist = []
for root, dirs, files in os.walk(folder_path):
for file in files:
#append the file name to the list
filelist.append(os.path.join(root,file))
#print all the file names
for name in filelist:
delete_file = str(name)
if delete_file.endswith(file_type):
os.remove(delete_file)
print("File deleted successfully.")
break
else:
print("No files found.")
I have a simple table:
db.define_table('myfiles',
Field('title','string'),
Field('myfile','upload))
Then i run my app from shell:
python web2py.py -S myapp -M
Choose my file_path:
file_path = os.path.join(request.folder,'upload',db.myfiles[1].myfile)
but then i try to read my uploaded file, i get "File not open for reading"
with open(file_path, 'wb') as f: data = f.readlines()
I even tried the same process with copy-paste my file to private folder but still get the same error.
First, the default folder for uploaded files is "uploads", not "upload":
file_path = os.path.join(request.folder, 'uploads', db.myfiles[1].myfile)
Second, you should open the file for reading rather than writing:
with open(file_path, 'rb') as f:
data = f.readlines()
how would I get the file from amazon s3 to local system using php.
I am trying to do this but its not working
$s3 = new AmazonS3("key 1", " acces pass");
$s3->getObject("Bucket/filename");
//write to local
$fp = fopen('/tmp/filename.mp4', 'w');
fpassthru($fp);
EDIT
I am trying to save the file to my local server from s3
As of 3.35.x verison AWS SDK -- the following snippet works with SaveAs.
Notice the buket name, key, and saveas with full path with file name.
$result = $s3->getObject(array(
'Bucket' => $bucket,
'Key' => $key,
'SaveAs' => $path . $model->file_name,
));
Check out the docs for getObject:
You need to either pass the remote file name as the 2nd param, then in the options set the value of 'fileDownload' to a file name or an OPEN file resource as a parameter there.
Example:
$s3->getObject('myBucket','myRemoteFile', array('fileDownload' => 'localFileName'));
Currently, Compass is watching the .scss files inside the src folder and automatically updating the cs files. (by typing compass watch myproject).
Is there any way of including haml files in the "watching process"?
(I couldn't install StaticMatic because I don't want to install Ruby1.8).
Firstly, you should be using RVM. Installing any version of Ruby becomes painless.
Secondly, I just wrote this for myself, using the same 'fssm' gem that Compass uses to watch files. Add this to your / create a Rakefile:
require 'rubygems'
require 'fssm'
require 'haml'
class HamlWatcher
class << self
def watch
refresh
puts ">>> HamlWatcher is watching for changes. Press Ctrl-C to Stop."
FSSM.monitor('haml', '**/*.haml') do
update do |base, relative|
puts ">>> Change detected to: #{relative}"
HamlWatcher.compile(relative)
end
create do |base, relative|
puts ">>> File created: #{relative}"
HamlWatcher.compile(relative)
end
delete do |base, relative|
puts ">>> File deleted: #{relative}"
HamlWatcher.remove(relative)
end
end
end
def output_file(filename)
# './haml' retains the base directory structure
filename.gsub(/\.html\.haml$/,'.html')
end
def remove(file)
output = output_file(file)
File.delete output
puts "\033[0;31m remove\033[0m #{output}"
end
def compile(file)
output_file_name = output_file(file)
origin = File.open(File.join('haml', file)).read
result = Haml::Engine.new(origin).render
raise "Nothing rendered!" if result.empty?
# Write rendered HTML to file
color, action = File.exist?(output_file_name) ? [33, 'overwrite'] : [32, ' create']
puts "\033[0;#{color}m#{action}\033[0m #{output_file_name}"
File.open(output_file_name,'w') {|f| f.write(result)}
end
# Check that all haml templates have been rendered.
def refresh
Dir.glob('haml/**/*.haml').each do |file|
file.gsub!(/^haml\//, '')
compile(file) unless File.exist?(output_file(file))
end
end
end
end
namespace :haml do
desc "Watch the site's HAML templates and recompile them when they change"
task :watch do
require File.join(File.dirname(__FILE__), 'lib', 'haml_watcher')
HamlWatcher.watch
end
end
Run it with:
rake haml:watch