syslog-ng flush_lines option does not work - syslog-ng

I use syslog-ng 3.5.3 on ubuntu 14.04
I need to reduce system load.
So I would like to use flush_lines option.
I added flush_lines(2) to global options.
I confirm the behavior in terminal with tail -h /var/log/messages
But log messages was written immediately as before.
How can I out put some messages at once?
I use the config below for syslog-ng.
#version: 3.5
#include "scl.conf"
#include "`scl-root`/system/tty10.conf"
# Syslog-ng configuration file, compatible with default Debian syslogd
# installation.
# First, set some global options.
options { chain_hostnames(off); flush_lines(2); use_dns(no); use_fqdn(no);
owner("root"); group("adm"); perm(0640); stats_freq(0);
bad_hostname("^gconfd$");
};
########################
# Sources
########################
# This is the default behavior of sysklogd package
# Logs may come from unix stream, but not from another machine.
#
source s_src {
system();
internal();
};
# If you wish to get logs from remote machine you should uncomment
# this and comment the above source line.
#
#source s_net { tcp(ip(127.0.0.1) port(1000)); };
########################
# Destinations
########################
# First some standard logfile
#
destination d_auth { file("/var/log/auth.log"); };
destination d_cron { file("/var/log/cron.log"); };
destination d_daemon { file("/var/log/daemon.log"); };
destination d_kern { file("/var/log/kern.log"); };
destination d_lpr { file("/var/log/lpr.log"); };
destination d_mail { file("/var/log/mail.log"); };
destination d_syslog { file("/var/log/syslog"); };
destination d_user { file("/var/log/user.log"); };
destination d_uucp { file("/var/log/uucp.log"); };
# This files are the log come from the mail subsystem.
#
destination d_mailinfo { file("/var/log/mail.info"); };
destination d_mailwarn { file("/var/log/mail.warn"); };
destination d_mailerr { file("/var/log/mail.err"); };
# Logging for INN news system
#
destination d_newscrit { file("/var/log/news/news.crit"); };
destination d_newserr { file("/var/log/news/news.err"); };
destination d_newsnotice { file("/var/log/news/news.notice"); };
# Some `catch-all' logfiles.
#
destination d_debug { file("/var/log/debug"); };
destination d_error { file("/var/log/error"); };
destination d_messages { file("/var/log/messages"); };
# The root's console.
#
destination d_console { usertty("root"); };
# Virtual console.
#
destination d_console_all { file(`tty10`); };
# The named pipe /dev/xconsole is for the nsole' utility. To use it,
# you must invoke nsole' with the -file' option:
#
# $ xconsole -file /dev/xconsole [...]
#
destination d_xconsole { pipe("/dev/xconsole"); };
# Send the messages to an other host
#
#destination d_net { tcp("127.0.0.1" port(1000) log_fifo_size(1000)); };
# Debian only
destination d_ppp { file("/var/log/ppp.log"); };
########################
# Filters
########################
# Here's come the filter options. With this rules, we can set which
# message go where.
filter f_dbg { level(debug); };
filter f_info { level(info); };
filter f_notice { level(notice); };
filter f_warn { level(warn); };
filter f_err { level(err); };
filter f_crit { level(crit .. emerg); };
filter f_debug { level(debug) and not facility(auth, authpriv, news, mail); };
filter f_error { level(err .. emerg) ; };
filter f_messages { level(info,notice,warn) and
not facility(auth,authpriv,cron,daemon,mail,news); };
filter f_auth { facility(auth, authpriv) and not filter(f_debug); };
filter f_cron { facility(cron) and not filter(f_debug); };
filter f_daemon { facility(daemon) and not filter(f_debug); };
filter f_kern { facility(kern) and not filter(f_debug); };
filter f_lpr { facility(lpr) and not filter(f_debug); };
filter f_local { facility(local0, local1, local3, local4, local5,
local6, local7) and not filter(f_debug); };
filter f_mail { facility(mail) and not filter(f_debug); };
filter f_news { facility(news) and not filter(f_debug); };
filter f_syslog3 { not facility(auth, authpriv, mail) and not filter(f_debug); };
filter f_user { facility(user) and not filter(f_debug); };
filter f_uucp { facility(uucp) and not filter(f_debug); };
filter f_cnews { level(notice, err, crit) and facility(news); };
filter f_cother { level(debug, info, notice, warn) or facility(daemon, mail); };
filter f_ppp { facility(local2) and not filter(f_debug); };
filter f_console { level(warn .. emerg); };
########################
# Log paths
########################
log { source(s_src); filter(f_auth); destination(d_auth); };
log { source(s_src); filter(f_cron); destination(d_cron); };
log { source(s_src); filter(f_daemon); destination(d_daemon); };
log { source(s_src); filter(f_kern); destination(d_kern); };
log { source(s_src); filter(f_lpr); destination(d_lpr); };
log { source(s_src); filter(f_syslog3); destination(d_syslog); };
log { source(s_src); filter(f_user); destination(d_user); };
log { source(s_src); filter(f_uucp); destination(d_uucp); };
log { source(s_src); filter(f_mail); destination(d_mail); };
#log { source(s_src); filter(f_mail); filter(f_info); destination(d_mailinfo); };
#log { source(s_src); filter(f_mail); filter(f_warn); destination(d_mailwarn); };
#log { source(s_src); filter(f_mail); filter(f_err); destination(d_mailerr); };
log { source(s_src); filter(f_news); filter(f_crit); destination(d_newscrit); };
log { source(s_src); filter(f_news); filter(f_err); destination(d_newserr); };
log { source(s_src); filter(f_news); filter(f_notice); destination(d_newsnotice); };
#log { source(s_src); filter(f_cnews); destination(d_console_all); };
#log { source(s_src); filter(f_cother); destination(d_console_all); };
#log { source(s_src); filter(f_ppp); destination(d_ppp); };
log { source(s_src); filter(f_debug); destination(d_debug); };
log { source(s_src); filter(f_error); destination(d_error); };
log { source(s_src); filter(f_messages); destination(d_messages); };
log { source(s_src); filter(f_console); destination(d_console_all);
destination(d_xconsole); };
log { source(s_src); filter(f_crit); destination(d_console); };
# All messages send to a remote site
#
#log { source(s_src); destination(d_net); };
###
# Include all config files in /etc/syslog-ng/conf.d/
###
#include "/etc/syslog-ng/conf.d/*.conf"

flush_lines determines how many lines to output at once. If you receive messages continuously, increasing flush_lines from 1 to 2 doesn't really have any effect. Increase it to 50 or 100 to see the difference.

Related

Testcafe runs on average 2-3 times slower when using SSL

After installing and using a self-signed SSL certificate by following the official guide here and running our test suites it was discovered that the tests take about 3 times as long. I would expect there to maybe be some sort of delay but that's a lot.
Any ideas on why this is the case?
With SSL: 45 passed (6m 56s)
Without SSL: 45 passed (2m 55s)
.testcaferc.js File
let filtered_tests = [
// Located in editor_page_tests.js
'user_is_able_to_add_a_section_column_row_and_element_to_editor',
'pop_up_element_displays_during_page_preview',
'clicking_button_shows_hides_elements',
'user_is_able_to_save_when_adding_a_video',
'user_is_able_to_save_when_leaving_video_embed_blank',
// Located in courses_page_tests.js
'user_can_preview_existing_course',
'optin_user_can_navigate_through_course',
'manually_added_user_can_navigate_through_course',
// Located in funnels_page_tests.js
'user_can_navigate_through_funnel',
// Located in developer_portal_page_tests.js
'user_can_navigate_to_developer_portal_page',
// Located in members_page_tests.js
'user_can_navigate_to_team_members_page',
// Located in teams_dashboard_pages_tests.js
'user_can_navigate_to_team_dashboard_page',
// Located in workspaces_page_tests.js
'user_can_navigate_to_team_workspaces_page'
]
let isolated_tests = [
// Located in sites_overview_page_tests.js
'user_can_make_site_public',
'user_can_edit_site_name',
'user_can_make_site_password_protected',
'user_can_make_site_private',
'user_can_edit_site_domain',
'changing_site_domain_updates_funnel_urls',
// Located in account_details_page_tests.js
'user_can_edit_account_name',
// Located in account_details_page_tests.js
'user_can_edit_account_password',
// Located in general_page_tests.js
'user_can_edit_workspace_details'
]
let full_filter = filtered_tests.concat(isolated_tests)
let build_filter = () => {
// Purpose: To set the suite that we want to run. (Sanity, Smoke, Regression)
var filter = {}
if(process.env.SCOPE) {
filter = {
testGrep: `^(?!.*(${filtered_tests.join('|')})).*$`,
testMeta: {
scope: process.env.SCOPE
}
}
} else {
filter = {
testGrep: `^(?!.*(${full_filter.join('|')})).*$`
}
}
return filter
}
let determine_concurrency = () => {
// Purpose: To set concurrency dependant on the scope that is passed
let concurrency = 5
if (process.env.SCOPE == 'isolated') {
concurrency = 1
}
return concurrency
}
module.exports = {
assertionTimeout: 5000,
browsers: [
"chrome:headless --window-size=1920,1159 --allow-insecure-localhost"
],
concurrency: determine_concurrency(),
cache: true,
clientScripts: "scripts/hide_notifications.js",
filter: build_filter(),
hostname: "localhost",
pageLoadTimeout: 30000,
quarantineMode: {
successThreshold: 1,
attemptLimit: 3
},
reporter: [
{
name: "spec",
output: "artifacts/reports/spec_results"
},
{
name: "xunit",
output: "artifacts/reports/xunit_results.xml"
}
],
screenshots: {
path: "artifacts/screenshots",
pathPattern: "${TEST}_${DATE}_${TIME}.png",
takeOnFails: true,
thumbnails: false
},
selectorTimeout: 5000,
skipJsErrors: true,
skipUncaughtErrors: true,
src: "tests/**",
ssl: {
pfx: "ssl/testingdomain.pfx",
rejectUnauthorized: true
},
videoEncodingOptions: {
aspect: "16:9",
framerate: 30
},
videoOptions: {
failedOnly: true,
pathPattern: "${TEST}_${DATE}_${TIME}.mp4",
singleFile: false
},
videoPath: "artifacts/recordings"
}

Can rollup-plugins access the AST created by previous plugins in the plugin chain?

We use multiple rollup-plugins that parse their input to an AST. As they run on the same files, each file is parsed multiple times. Can this be optimized, so that each file is parsed only once? Minimal example:
// rollup.config.js
import {createFilter} from '#rollup/pluginutils';
import {simple} from 'acorn-walk';
import {attachComments} from 'astravel';
import {generate} from 'astring';
export default {
input: 'src/main.js',
output: {file: 'bundle.js', format: 'cjs'},
plugins: [{
name: 'plugin1',
transform(code, id) {
const comments = [];
const ast = this.parse(code, {onComment: comments});
attachComments(ast, comments);
simple(ast, {
Identifier(n) {
// rewrite wrong to right
if (n.name === 'wrong') n.name = 'right';
}
});
return {
code: generate(ast, {comments: true}),
ast,
map: null /* minimal example, won't create a source map here */
};
}
}, {
name: 'plugin2',
transform(code, id) {
const comments = [];
const ast = this.parse(code, {onComment: comments});
attachComments(ast, comments);
simple(ast, {
CallExpression(n) {
// rewrite mylog(...) to console.log(...)
if (n.callee.type === 'Identifier' && n.callee.name === 'mylog') {
n.callee = {
type: 'MemberExpression',
object: {type: 'Identifier', name: 'console', start: n.start, end: n.end},
property: {type: 'Identifier', name: 'log', start: n.start, end: n.end},
computed: false,
start: n.start,
end: n.end
}
}
}
});
return {
code: generate(ast, {comments: true}),
ast,
map: null /* minimal example, won't create a source map here */
};
}
}]
};
Now I understand that transform() can return an AST, so that parsing doesn't have to happen twice. And I understand that this.parse() uses the rollup-internal acorn instance. My simple mind thought that this.parse() could return the AST created by previous transform() calls, if available. But I assume that all sorts of demons await on that road, e.g. when this.parse() was called with different options.
Is there a different way achieve what I described? A different hook maybe?
I would love to not have all plugins in one and switching them on and off via options (I see that this would be a solution, but a really cumbersome one).

Custom directive to check list length for input types

I tried my best to write a custom directive in apollo server express to validate if an input type field of type [Int] does not have more than max length but do not know if its the right way to do. Appreciate if somebody could help me correct any mistakes in the code below.
// schema.js
directive #listLength(max: Int) on INPUT_FIELD_DEFINITION
input FiltersInput {
filters: Filters
}
input Filters {
keys: [Int] #listLength(max: 10000)
}
// Custom directive
const { SchemaDirectiveVisitor } = require('apollo-server-express');
import {
GraphQLList,
GraphQLScalarType,
GraphQLInt,
Kind,
DirectiveLocation,
GraphQLDirective
} from "graphql";
export class ListLengthDirective extends SchemaDirectiveVisitor {
static getDirectiveDeclaration(directiveName) {
return new GraphQLDirective({
name: directiveName,
locations: [DirectiveLocation.INPUT_FIELD_DEFINITION],
args: {
max: { type: GraphQLInt },
}
});
}
// Replace field.type with a custom GraphQLScalarType that enforces the
// length restriction.
wrapType(field) {
const fieldName = field.astNode.name.value;
const { type } = field;
if (field.type instanceof GraphQLList) {
field.type = new LimitedLengthType(fieldName, type, this.args.max);
} else {
throw new Error(`Not a scalar type: ${field.type}`);
}
}
visitInputFieldDefinition(field) {
this.wrapType(field);
}
}
class LimitedLengthType extends GraphQLScalarType {
constructor(name, type, maxLength) {
super({
name,
serialize(value) {
return type.serialize(value);
},
parseValue(value) {
value = type.serialize(value);
return type.parseValue(value);
},
parseLiteral(ast) {
switch (ast.kind) {
case Kind.LIST:
if (ast.values.length > maxLength) {
throw {
code: 400,
message: `'${name}' parameter cannot extend ${maxLength} values`,
};
}
const arrayOfInts = ast.values.map(valueObj => parseInt(valueObj['value']));
return arrayOfInts;
}
throw new Error('ast kind should be Int of ListValue')
},
});
}
}
Does this look right?
Thanks

How to add custom blocks / containers in Vuepress?

I've set up a website in VuePress and I found that it supports markdown-it's :::danger, :::tip, :::info etc to generate custom containers.
I was wondering if this could be extended in a way, to use for example :::card or :::example or whatever you want.
I found https://github.com/posva/markdown-it-custom-block, but can't find out how to implement it.
This is what've got in my config.js
markdown: {
// options for markdown-it-anchor
anchor: { permalink: false },
// options for markdown-it-toc
toc: { includeLevel: [1, 2] },
extendMarkdown: md => {
md.use(require("markdown-it-container"), "card", {
validate: function(params) {
return params.trim().match(/^card\s+(.*)$/);
},
render: function(tokens, idx) {
var m = tokens[idx].info.trim().match(/^card\s+(.*)$/);
if (tokens[idx].nesting === 1) {
// opening tag
return (
"<card><summary>" + md.utils.escapeHtml(m[1]) + "</summary>\n"
);
} else {
// closing tag
return "</card>\n";
}
}
});
}
}
Any advice is much appreciated!
The script you have will work with ::: card, in order to get it to work change
extendMarkdown: md => {...
to
config: md => {...
This took me a while to figure out. It's a version conflict - that's why it's currently not working.

Print a string to stdout using Logstash 1.4?

So I was testing this config for using metrics from the Logstash website here.
input {
generator {
type => "generated"
}
}
filter {
if [type] == "generated" {
metrics {
meter => "events"
add_tag => "metric"
}
}
}
output {
# only emit events with the 'metric' tag
if "metric" in [tags] {
stdout {
message => "rate: %{events.rate_1m}"
}
}
}
But it looks like the "message" field for stdout was deprecated. What is the correct way to do this in Logstash 1.4?
So figured it out after looking at the JIRA page for Logstash.
NOTE: The metrics only print or "flush" every 5 seconds so if you are generating logs for less than 5 seconds, you won't see a metrics print statement
Looks like it should be:
output {
if "metric" in [tags]
{
stdout {
codec => line {
format => "Rate: %{events.rate_1m}"
}
}
}
}