How to post an image as a request with Flask Server API in Dart/Flutter? - api

I want to post a request to a Python-based API that has an image in its body. I have tried to send data with 5 methods:
await http.post()
final api = Uri.parse("https://e8f628d7.ngrok.io/detections");
Map<String, dynamic> body = {'images': image};
final response = await http.post(
api,
body: body,
);
if (response.statusCode == 200) {
final responseJson = json.decode(response.body);
print(responseJson);
}
Client().post()
Map<String, dynamic> body = {'images': image};
var client = new http.Client();
client.post("https://e8f628d7.ngrok.io/detections",body: body).then((response) {
print("Post " + response.statusCode.toString());
});
dio
MultipartRequest
final api = Uri.parse("https://e8f628d7.ngrok.io/detections");
var stream = new http.ByteStream(DelegatingStream.typed(image.openRead()));
var length = await image.length();
var request = new http.MultipartRequest("POST", api);
var multipartFileSign = new http.MultipartFile(
'profile_pic', stream, length,
filename: path.basename(image.path));
request.files.add(multipartFileSign);
// send
var response = await request.send();
print(response.statusCode);
response.stream.transform(utf8.decoder).listen((value) {
print(value);
});
Link of [DELETED]First Answer to this question:
if (image == null) return;
String base64Image = base64Encode(image.readAsBytesSync());
http.post(api, body: {
'images': base64Image,
}).then((res) {
print(res.statusCode);
print(json.decode(res.body));
}).catchError((err) {
print(err);
});
}
I am able to send the image and am getting a 200 success response. But, I am not sure if the image is getting altered or any problem happens while sending the image as the response is empty whereas it should have some sort of response.
This is my app.py from with which my server works:
import time
from absl import app, logging
import cv2
import numpy as np
import tensorflow as tf
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny
)
from yolov3_tf2.dataset import transform_images, load_tfrecord_dataset
from yolov3_tf2.utils import draw_outputs
from flask import Flask, request, Response, jsonify, send_from_directory, abort
import os
# customize your API through the following parameters
classes_path = './data/labels/coco.names'
weights_path = './weights/yolov3.tf'
tiny = False # set to True if using a Yolov3 Tiny model
size = 416 # size images are resized to for model
output_path = './detections/' # path to output folder where images with detections are saved
num_classes = 80 # number of classes in model
# load in weights and classes
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
if tiny:
yolo = YoloV3Tiny(classes=num_classes)
else:
yolo = YoloV3(classes=num_classes)
yolo.load_weights(weights_path).expect_partial()
print('weights loaded')
class_names = [c.strip() for c in open(classes_path).readlines()]
print('classes loaded')
# Initialize Flask application
app = Flask(__name__)
# API that returns JSON with classes found in images
#app.route('/detections', methods=['POST'])
def get_detections():
raw_images = []
images = request.files.getlist("images")
image_names = []
for image in images:
image_name = image.filename
image_names.append(image_name)
image.save(os.path.join(os.getcwd(), image_name))
img_raw = tf.image.decode_image(
open(image_name, 'rb').read(), channels=3)
raw_images.append(img_raw)
num = 0
# create list for final response
response = []
for j in range(len(raw_images)):
# create list of responses for current image
responses = []
raw_img = raw_images[j]
num+=1
img = tf.expand_dims(raw_img, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
responses.append({
"class": class_names[int(classes[0][i])],
"confidence": float("{0:.2f}".format(np.array(scores[0][i])*100))
})
response.append({
"image": image_names[j],
"detections": responses
})
img = cv2.cvtColor(raw_img.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection' + str(num) + '.jpg', img)
print('output saved to: {}'.format(output_path + 'detection' + str(num) + '.jpg'))
#remove temporary images
for name in image_names:
os.remove(name)
try:
return jsonify({"response":response}), 200
except FileNotFoundError:
abort(404)
# API that returns image with detections on it
#app.route('/image', methods= ['POST'])
def get_image():
image = request.files["images"]
image_name = image.filename
image.save(os.path.join(os.getcwd(), image_name))
img_raw = tf.image.decode_image(
open(image_name, 'rb').read(), channels=3)
img = tf.expand_dims(img_raw, 0)
img = transform_images(img, size)
t1 = time.time()
boxes, scores, classes, nums = yolo(img)
t2 = time.time()
print('time: {}'.format(t2 - t1))
print('detections:')
for i in range(nums[0]):
print('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
np.array(scores[0][i]),
np.array(boxes[0][i])))
img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
cv2.imwrite(output_path + 'detection.jpg', img)
print('output saved to: {}'.format(output_path + 'detection.jpg'))
# prepare image for response
_, img_encoded = cv2.imencode('.png', img)
response = img_encoded.tostring()
#remove temporary image
os.remove(image_name)
try:
return Response(response=response, status=200, mimetype='image/png')
except FileNotFoundError:
abort(404)
if __name__ == '__main__':
app.run(debug=True, host = '0.0.0.0', port=5000)
I try to send the same image directly through Postman and get the desired response but when I do it with the flutter app, I don't get it. Is there any possibility of the image getting altered or modified? And, is there any other method in which I can send the image to the API other than the above 3?

You need to make sure that you are using a good version of http. There was a regression recently that broke multipart form. It's safest for now to hard code the exact version in pubspec.yaml (You might want to look in pubspec.lock to see what version you were using to confirm that it was one of the ones with the error.)
http: 0.12.0+4
Then try this:
main() async {
http.MultipartRequest request = http.MultipartRequest('POST', Uri.parse(url));
request.files.add(
await http.MultipartFile.fromPath(
'images',
File('kitten1.jpg').path,
contentType: MediaType('application', 'jpeg'),
),
);
http.StreamedResponse r = await request.send();
print(r.statusCode);
print(await r.stream.transform(utf8.decoder).join());
}

Related

Scrapy: how to send the items to the site via the api

Now my spiders are sending data to my site in this way:
def parse_product(response, **cb_kwargs):
item = {}
item[url] = response.url
data = {
"source_id": 505,
"token": f"{API_TOKEN}",
"products": [item]
}
headers = {'Content-Type': 'application/json'}
url = 'http://some.site.com/api/'
requests.post(url=url, headers=headers, data=json.dumps(data))
is it possible to somehow implement this design through a pipeline or middleware, because it is inconvenient to prescribe for each spider?
p.s. the data (data) needs to be sent in the json format (json.dumps(data)), if I make the item = MyItemClass() class, an error occurs...
It can be done using a pipeline fairly easily. You can also use scrapy's Item class and item Field class as long as you cast them to a dict prior to calling json.dumps.
For Example:
class Pipeline:
def process_item(self, item, spider):
data = dict(item)
headers = {'Content-Type': 'application/json'}
url = 'http://some.site.com/api/'
requests.post(url=url, headers=headers, data=json.dumps(data))
return item
If you use this example it will call it on each and every item you yield from your spider. Just remember to activate it in your settings.py file.
I found another additional solution (on github), maybe someone will be interested...
pipeline.py
import json
import logging
import requests
from scrapy.utils.serialize import ScrapyJSONEncoder
from twisted.internet.defer import DeferredLock
from twisted.internet.threads import deferToThread
default_serialize = ScrapyJSONEncoder().encode
class HttpPostPipeline(object):
settings = None
items_buffer = []
DEFAULT_HTTP_POST_PIPELINE_BUFFERED = False
DEFAULT_HTTP_POST_PIPELINE_BUFFER_SIZE = 100
def __init__(self, url, headers=None, serialize_func=default_serialize):
"""Initialize pipeline.
Parameters
----------
url : StrictRedis
Redis client instance.
serialize_func : callable
Items serializer function.
"""
self.url = url
self.headers = headers if headers else {}
self.serialize_func = serialize_func
self._lock = DeferredLock()
#classmethod
def from_crawler(cls, crawler):
params = {
'url': crawler.settings.get('HTTP_POST_PIPELINE_URL'),
}
if crawler.settings.get('HTTP_POST_PIPELINE_HEADERS'):
params['headers'] = crawler.settings['HTTP_POST_PIPELINE_HEADERS']
ext = cls(**params)
ext.settings = crawler.settings
return ext
def process_item(self, item, spider):
if self.settings.get('HTTP_POST_PIPELINE_BUFFERED', self.DEFAULT_HTTP_POST_PIPELINE_BUFFERED):
self._lock.run(self._process_items, item)
return item
else:
return deferToThread(self._process_item, item, spider)
def _process_item(self, item, spider):
data = self.serialize_func(item)
requests.post(self.url, json=json.loads(data), headers=self.headers)
return item
def _process_items(self, item):
self.items_buffer.append(item)
if len(self.items_buffer) >= int(self.settings.get('HTTP_POST_PIPELINE_BUFFER_SIZE',
self.DEFAULT_HTTP_POST_PIPELINE_BUFFER_SIZE)):
deferToThread(self.send_items, self.items_buffer)
self.items_buffer = []
def send_items(self, items):
logging.debug("Sending batch of {} items".format(len(items)))
serialized_items = [self.serialize_func(item) for item in items]
requests.post(self.url, json=[json.loads(data) for data in serialized_items], headers=self.headers)
def close_spider(self, spider):
if len(self.items_buffer) > 0:
deferToThread(self.send_items, self.items_buffer)

telethon how to request call with code for login

Since I am writing a client, my friends from the USA noticed that the code does not go through SMS, but only through a call. but I don’t understand how to do it, how to request a call(
await client.connect()
code_settings = types.CodeSettings(
current_number=True,
allow_flashcall=False,
allow_app_hash=False
)
result = await client(functions.auth.SendCodeRequest(
phone_number=phone,
api_id=api_id,
api_hash=api_hash,
settings=code_settings
))
# time.sleep(10)
result = await client(functions.auth.ResendCodeRequest(
phone_number=phone,
phone_code_hash=result.phone_code_hash
))
# time.sleep(20)
result = await client(functions.auth.ResendCodeRequest(
phone_number=phone,
phone_code_hash=result.phone_code_hash
))
# result = await client(SentCodeTypeCall(5))
# result = await client(functions.auth.)
while not isinstance(result.type, SentCodeTypeCall):
# time.sleep(10)
result = await client(functions.auth.ResendCodeRequest(
phone_number=phone,
phone_code_hash=result.phone_code_hash
))
# time.sleep(20)
# await client(functions.auth.SendCodeRequest(
# phone_number=phone,
# api_id=api_id,
# api_hash=api_hash,
# settings=code_settings
# ))
def code_callback():
code = input('Please enter the code you received: ')
return code
time.sleep(5)
await client.start(phone=phone, code_callback=code_callback)
I assume this is not the correct code.
can I tell the SendCode Request method to call immediately without sending SMS?

How to receive multiple messages in python-telegram-bot?

I am sending multiple images at a time to a bot in telegram. I am trying to create a conversational chatbot using python-telegram bot.
here is my code:
def main():
updater = Updater("1141074258:Axxxxxxxxxxxxxxxxxxxxxxxxg", use_context=True)
dp = updater.dispatcher
conv_handler = ConversationHandler(
entry_points = [CommandHandler('start',start)],
states = {
CHOSEN_OPTION: [MessageHandler(Filters.regex('^(Option2|Option3|Option4)$'),choose_option)],
PRODUCTS: [MessageHandler(Filters.text | Filters.photo,products)],
Option2: [MessageHandler(Filters.text,option2)],
Option3: [MessageHandler(Filters.text,option3)],
Option4: [CommandHandler('create', create_order)]
},
fallbacks=[CommandHandler('cancel', cancel)]
)
dp.add_handler(conv_handler)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
#run_async
def products(update,context):
logger.info("update is %s",update)
input_message = update.message.text
if input_message:
data['products'] = input_message
logger.info("product text is:%s",input_message)
elif update.message.photo:
photo_list = []
bot = context.bot
length = len(update.message.photo)
for photo in range(0,length):
ident = update.message.photo[photo].file_id
getFile = context.bot.get_file(ident)
photo_list.append(getFile['file_path'])
data['products_image'] = photo_list
update.message.reply_text("Please type name.",)
return Option3
If i am send 2 images same time, i am getting one image with a different size (3 times), How can I receive the actual two messages?
if update contains photo return PRODUCTS for getting other photos else
get text and return to every state you want
#run_async
def products(update,context):
logger.info("update is %s",update)
if update.message.photo:
# to what you want with your photos
return PRODUCTS
if update.message.text:
# getting product text
return Option3

Combine two TTS outputs in a single mp3 file not working

I want to combine two requests to the Google cloud text-to-speech API in a single mp3 output. The reason I need to combine two requests is that the output should contain two different languages.
Below code works fine for many language pair combinations, but unfortunately not for all. If I request e.g. a sentence in English and one in German and combine them everything works. If I request one in English and one in Japanes I can't combine the two files in a single output. The output only contains the first sentence and instead of the second sentence, it outputs silence.
I tried now multiple ways to combine the two outputs but the result stays the same. The code below should show the issue.
Please run the code first with:
python synthesize_bug.py --t1 'Hallo' --code1 de-De --t2 'August' --code2 de-De
This works perfectly.
python synthesize_bug.py --t1 'Hallo' --code1 de-De --t2 'こんにちは' --code2 ja-JP
This doesn't work. The single files are ok, but the combined files contain silence instead of the Japanese part.
Also, if used with two Japanes sentences everything works.
I already filed a bug report at Google with no response yet, but maybe it's just me who is doing something wrong here with encoding assumptions. Hope someone has an idea.
#!/usr/bin/env python
import argparse
# [START tts_synthesize_text_file]
def synthesize_text_file(text1, text2, code1, code2):
"""Synthesizes speech from the input file of text."""
from apiclient.discovery import build
import base64
service = build('texttospeech', 'v1beta1')
collection = service.text()
data1 = {}
data1['input'] = {}
data1['input']['ssml'] = '<speak><break time="2s"/></speak>'
data1['voice'] = {}
data1['voice']['ssmlGender'] = 'FEMALE'
data1['voice']['languageCode'] = code1
data1['audioConfig'] = {}
data1['audioConfig']['speakingRate'] = 0.8
data1['audioConfig']['audioEncoding'] = 'MP3'
request = collection.synthesize(body=data1)
response = request.execute()
audio_pause = base64.b64decode(response['audioContent'].decode('UTF-8'))
raw_pause = response['audioContent']
ssmlLine = '<speak>' + text1 + '</speak>'
data1 = {}
data1['input'] = {}
data1['input']['ssml'] = ssmlLine
data1['voice'] = {}
data1['voice']['ssmlGender'] = 'FEMALE'
data1['voice']['languageCode'] = code1
data1['audioConfig'] = {}
data1['audioConfig']['speakingRate'] = 0.8
data1['audioConfig']['audioEncoding'] = 'MP3'
request = collection.synthesize(body=data1)
response = request.execute()
# The response's audio_content is binary.
with open('output1.mp3', 'wb') as out:
out.write(base64.b64decode(response['audioContent'].decode('UTF-8')))
print('Audio content written to file "output1.mp3"')
audio_text1 = base64.b64decode(response['audioContent'].decode('UTF-8'))
raw_text1 = response['audioContent']
ssmlLine = '<speak>' + text2 + '</speak>'
data2 = {}
data2['input'] = {}
data2['input']['ssml'] = ssmlLine
data2['voice'] = {}
data2['voice']['ssmlGender'] = 'MALE'
data2['voice']['languageCode'] = code2 #'ko-KR'
data2['audioConfig'] = {}
data2['audioConfig']['speakingRate'] = 0.8
data2['audioConfig']['audioEncoding'] = 'MP3'
request = collection.synthesize(body=data2)
response = request.execute()
# The response's audio_content is binary.
with open('output2.mp3', 'wb') as out:
out.write(base64.b64decode(response['audioContent'].decode('UTF-8')))
print('Audio content written to file "output2.mp3"')
audio_text2 = base64.b64decode(response['audioContent'].decode('UTF-8'))
raw_text2 = response['audioContent']
result = audio_text1 + audio_pause + audio_text2
with open('result.mp3', 'wb') as out:
out.write(result)
print('Audio content written to file "result.mp3"')
raw_result = raw_text1 + raw_pause + raw_text2
with open('raw_result.mp3', 'wb') as out:
out.write(base64.b64decode(raw_result.decode('UTF-8')))
print('Audio content written to file "raw_result.mp3"')
# [END tts_synthesize_text_file]ls
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--t1')
parser.add_argument('--code1')
parser.add_argument('--t2')
parser.add_argument('--code2')
args = parser.parse_args()
synthesize_text_file(args.t1, args.t2, args.code1, args.code2)
You can find the answer here:
https://issuetracker.google.com/issues/120687867
Short answer: It's not clear why it is not working, but Google suggests a workaround to first write the files as .wav, combine and then re-encode the result to mp3.
I have managed to do this in NodeJS with just one function (idk how optimal is it, but at least it works). Maybe you could take inspiration from it
I have used memory-streams dependency from npm
var streams = require('memory-streams');
function mergeAudios(audios) {
var reader = new streams.ReadableStream();
var writer = new streams.WritableStream();
audios.forEach(element => {
if (element instanceof streams.ReadableStream) {
element.pipe(writer)
}
else {
writer.write(element)
}
});
reader.append(writer.toBuffer())
return reader
}
Input parameter is a list which contain ReadableStream or responce.audioContent from synthesizeSpeech operation. If it is readablestream, it uses pipe operation, if it is audiocontent, it uses write method. At the end all content is passed into an readabblestream.

AttributeError: 'unicode' object has no attribute 'key'

I'm very new to Python coding and have run into an issue while trying to upgrade some code. I'm working with an app that pulls data via an API from stored data from a scan.
here is the code as it sits working
def _collect_one_host_scan_info(self, host_id, sid, scan_info):
"""
The method to collect all the vulnerabilities of one host and generate the event data.
"""
count = 0
host_uri = self.endpoint + '/' + str(sid) + '/hosts/' + str(host_id)
result = self.client.request(host_uri).get("content")
# if there is exception in request, return None
if result is None:
_LOGGER.info("There is exception in request, return None")
return None
else:
host_info = result.get("info", {})
host_end_time = host_info.get("host_end", "")
if self.ckpt.is_new_host_scan(host_end_time,
self.config.get("start_date")):
self.source = self.url + self.endpoint + '/' + str(
sid) + '/hosts/' + str(host_id)
for vuln in result.get("vulnerabilities", []):
vuln["sid"] = sid
vuln["host_id"] = host_id
#get the port info
plugin_id = vuln.get("plugin_id", "")
port_info = []
if plugin_id:
plugin_uri = "{}/plugins/{}".format(host_uri,
plugin_id)
plugin_outputs = self.client.request(plugin_uri).get(
"content", {}).get("outputs")
ports = []
for output in plugin_outputs:
ports.extend(output.get("ports", {}).keys())
for port in ports:
port_elem = {}
port_items = re.split(r"\s*/\s*", port)
port_elem["port"] = int(port_items[0])
if port_items[1]:
port_elem["transport"] = port_items[1]
if port_items[2]:
port_elem["protocol"] = port_items[2]
port_info.append(port_elem)
vuln = dict(vuln, **scan_info)
vuln = dict(vuln, **host_info)
if port_info:
vuln["ports"] = port_info
entry = NessusObject(
vuln.get("timestamp"), self.sourcetype, self.source,
vuln)
self._print_stream(entry)
count += 1
return count
The data that is being pulled from looks like this
"outputs": [
{
"ports": {
"445 / tcp / cifs": [
{
"hostname": "computer.domain.com"
}
]
},
"has_attachment": 0,
"custom_description": null,
"plugin_output": "\nPath : c:\\program files (x86)\\folder\\bin\\fax.exe\nUsed by services : RFDB\nFile write allowed for groups : Domain Users\nFull control of directory allowed for groups : Domain Users\n\nPath : c:\\program files (x86)\\folder\\bin\\faxrpc.exe\nUsed by services : RFRPC\nFile write allowed for groups : Domain Users\nFull control of directory allowed for groups : Domain Users\n\nPath : c:\\program files (x86)\\folder\\bin\\faxserv.exe\nUsed by services : RFSERVER\nFile write allowed for groups : Domain Users\nFull control of directory allowed for groups : Domain Users\n`,
"hosts": null,
"severity": 3
}
with the working code the return is
ports{}.port 445
ports{}.protocol tcp
ports{}.transport cifs
What I really would like is to grab the "plugin_output" data with the "port" data
I'm currently just trying to replace the "port" data with "plugin_output" data
#get the output info
plugin_id = vuln.get("plugin_id", "")
output_info = []
if plugin_id:
plugin_uri = "{}/plugins/{}".format(host_uri,
plugin_id)
plugin_outputs = self.client.request(plugin_uri).get(
"content", {}).get("outputs")
outputs = []
for output in plugin_outputs:
outputs.extend(output.get("plugin_output", "").keys())
for plugin in plugin_outputs:
plugin_elem = {}
plugin_items = re.split(r"nPath\s*", plugin)
plugin_elem["location1"] = plugin_items[0]
if plugin_items[1]:
plugin_elem["location2"] = plugin_items[1]
if plugin_items[2]:
plugin_elem["location3"] = plugin_items[2]
output_info.append(plugin_elem)
vuln = dict(vuln, **scan_info)
vuln = dict(vuln, **host_info)
if output_info:
vuln["plugin_output"] = output_info
entry = NessusObject(
vuln.get("timestamp"), self.sourcetype, self.source,
vuln)
self._print_stream(entry)
count += 1
what I've done as you can see if just replace the "ports" data with "plugin_output" data and the error received is
AttributeError: 'unicode' object has no attribute key
Well after further efforts I was able to figure out what I needed to do with the code. It was much easier than I thought it would be but sometime when learning a new language its hard to envision what is needed. Code posted below.
def _collect_one_host_scan_info(self, host_id, sid, scan_info):
"""
The method to collect all the vulnerabilities of one host and generate
the event data.
"""
count = 0
host_uri = self.endpoint + '/' + str(sid) + '/hosts/' + str(host_id)
result = self.client.request(host_uri).get("content")
# if there is exception in request, return None
if result is None:
_LOGGER.info("There is exception in request, return None")
return None
else:
host_info = result.get("info", {})
host_end_time = host_info.get("host_end", "")
if self.ckpt.is_new_host_scan(host_end_time,
self.config.get("start_date")):
self.source = self.url + self.endpoint + '/' + str(
sid) + '/hosts/' + str(host_id)
for vuln in result.get("vulnerabilities", []):
vuln["sid"] = sid
vuln["host_id"] = host_id
plugin_id = vuln.get("plugin_id", "")
# get plugin_output data
plugin_output_info = []
if plugin_id:
plugin_uri = "{}/plugins/{}".format(host_uri,
plugin_id)
plugin_outputs = self.client.request(plugin_uri).get(
"content", {}).get("outputs", [])
data_output = []
for output in plugin_outputs:
items = output.get("plugin_output", 'no value')
item = str(items)
#clean = re.sub('[^a-zA-Z0-9-()_*.(:\\)]', ' ', item)
plugin_output_info.append(item)
# get the port info
port_info = []
if plugin_id:
plugin_uri = "{}/plugins/{}".format(host_uri,
plugin_id)
plugin_outputs = self.client.request(plugin_uri).get(
"content", {}).get("outputs", [])
ports = []
for output in plugin_outputs:
ports.extend(output.get("ports", {}).keys())
for port in ports:
port_elem = {}
port_items = re.split(r"\s*/\s*", port)
port_elem["port"] = int(port_items[0])
if port_items[1]:
port_elem["transport"] = port_items[1]
if port_items[2]:
port_elem["protocol"] = port_items[2]
port_info.append(port_elem)
vuln = dict(vuln, **scan_info)
vuln = dict(vuln, **host_info)
if port_info:
vuln["ports"] = port_info
if plugin_output_info:
vuln["plugin_output"] = plugin_output_info
entry = NessusObject(
vuln.get("timestamp"), self.sourcetype, self.source,
vuln)
self._print_stream(entry)
count += 1
return count