Add commands to client language - vscode-extensions

I previously made an extension with a language support for g-code. Now I'm converting it into a language server. The problem is that my extension had some commands which I registered on the client side. When using the exact same code on the language client (the client of the language server) it does not work. Does someone have an idea why that could be?
I tried copy pasting all the dependencies etc. but with no success.
Here is the source code of the package.json of the language client:
{
"name": "lsp-sample-client",
"description": "VSCode part of a language server",
"author": "Microsoft Corporation",
"license": "MIT",
"version": "0.0.1",
"publisher": "vscode",
"engines": {
"vscode": "^1.63.0"
},
"main": "./out/extension.js",
"activationEvents": [
"onLanguage:gcode",
"onLanguage:cpl"
],
"contributes": {
"commands": [
{
"command": "lineNumberer.Renumber1",
"title": "Renumber Step 1"
},
{
"command": "lineNumberer.Renumber10",
"title": "Renumber Step 10"
},
{
"command": "lineNumberer.Renumber100",
"title": "Renumber Step 100"
},
{
"command": "lineNumberer.Renumber1000",
"title": "Renumber Step 1000"
}
],
"menus": {
"editor/context": [
{
"command": "lineNumberer.Renumber1",
"title": "Renumber Step 1"
},
{
"when": "editorLangId == gcode || editorLangId == cpl",
"command": "lineNumberer.Renumber10",
"title": "Renumber Step 10"
},
{
"when": "editorLangId == gcode || editorLangId == cpl",
"command": "lineNumberer.Renumber100",
"title": "Renumber Step 100"
},
{
"when": "editorLangId == gcode || editorLangId == cpl",
"command": "lineNumberer.Renumber1000",
"title": "Renumber Step 1000"
}
]
}
},
"dependencies": {
"vscode-languageclient": "^7.0.0"
},
"devDependencies": {
"#types/vscode": "^1.63.0",
"#vscode/test-electron": "^2.1.2"
}
}
Here is the content of the client extension.ts:
/* --------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
* ------------------------------------------------------------------------------------------ */
import * as path from 'path';
import { workspace, ExtensionContext, commands } from 'vscode';
import {
LanguageClient,
LanguageClientOptions,
ServerOptions,
TransportKind
} from 'vscode-languageclient/node';
import { incrementLineNumbersBy } from './lineNumberer/lineNumberer';
let client: LanguageClient;
export function activate(context: ExtensionContext) {
const renumber1 = commands.registerCommand('lineNumberer.Renumber1', () => {
const step = 1;
incrementLineNumbersBy(step);
});
const renumber10 = commands.registerCommand('lineNumberer.Renumber10', () => {
const step = 10;
incrementLineNumbersBy(step);
});
const renumber100 = commands.registerCommand('lineNumberer.Renumber100', () => {
const step = 100;
incrementLineNumbersBy(step);
});
const renumber1000 = commands.registerCommand('lineNumberer.Renumber1000', () => {
const step = 1000;
incrementLineNumbersBy(step);
});
context.subscriptions.push(renumber1);
context.subscriptions.push(renumber10);
context.subscriptions.push(renumber100);
context.subscriptions.push(renumber1000);
// The server is implemented in node
const serverModule = context.asAbsolutePath(
path.join('server', 'out', 'server.js')
);
// If the extension is launched in debug mode then the debug server options are used
// Otherwise the run options are used
const serverOptions: ServerOptions = {
run: { module: serverModule, transport: TransportKind.ipc },
debug: {
module: serverModule,
transport: TransportKind.ipc,
}
};
// Options to control the language client
const clientOptions: LanguageClientOptions = {
// Register the server for gcode and cpl documents
documentSelector: [
{ scheme: 'file', language: 'gcode' },
{ scheme: 'file', language: 'cpl' }
],
synchronize: {
// Notify the server about file changes to '.clientrc files contained in the workspace
fileEvents: workspace.createFileSystemWatcher('**/.clientrc')
}
};
// Create the language client and start the client.
client = new LanguageClient(
'languageServerExample',
'Language Server Example',
serverOptions,
clientOptions
);
// Start the client. This will also launch the server
client.start();
}
export function deactivate(): Thenable<void> | undefined {
if (!client) {
return undefined;
}
return client.stop();
}

I managed to solve the problem myself. The problem was that you have to add the contributes commands part in the ./package.json (the outer most package.json) and not the one from the language client.

Related

getExpoPushTokenAsync not working on Standalone app

const token = await Notifications.getExpoPushTokenAsync()
await axiosConfig.post("pushToken", { token });
this code works perfectly on (Production mode and Expo Client app)
When i use expo build:android for APK. app crash when I call this function
try out:
For a ./google-services.json:
{
"project_info": {
"project_number": "…",
"firebase_url": "…",
"project_id": "…",
"storage_bucket": "…"
},
"client": [
{
"client_info": {
"mobilesdk_app_id": "…",
"android_client_info": {
"package_name": "…"
}
},
"api_key": [
{
"current_key": "KEY_KEY_KEY_KEY_KEY"
}
]
},
please make app.json:
{
"expo": {
"…": "…",
"android": {
"package": "…",
"googleServicesFile": "./google-services.json",
"config": {
"googleSignIn": {
"apiKey": "KEY_KEY_KEY_KEY_KEY"
}
}
https://github.com/expo/expo/issues/7727#issuecomment-611544439
I had similar problem and I solved it by enabling my notification for Expo client on my device.

Convert File in MS Graph API on SPFx return undefined

When i try to download a file from API Graph accesing to Drive or Sites with javascript on SPFx this return undefined.
my webpart code:
import { Version } from '#microsoft/sp-core-library';
import {
BaseClientSideWebPart,
IPropertyPaneConfiguration,
PropertyPaneTextField
} from '#microsoft/sp-webpart-base';
import * as strings from 'Docx2PdfWebPartStrings';
import { MSGraphClient } from '#microsoft/sp-http';
export interface IDocx2PdfWebPartProps {
description: string;
}
export default class Docx2PdfWebPart extends BaseClientSideWebPart<IDocx2PdfWebPartProps> {
public async render(): Promise<void> {
const client: MSGraphClient = await this.context.msGraphClientFactory.getClient();
var tenant = 'test';
var siteID = `${tenant}.sharepoint.com,12adb250-26f4-4dbb-9545-71d029bad763,8fdc3f56-2d6d-42d9-9a4d-d684e73c341e`;
var fileID = '01MBNFB7EIQLARTATNE5G3XDJNYBD2A3IL';
var fileName = 'Test.docx';
//This work
var site = await client.api(`/sites/${tenant}.sharepoint.com:/sites/dev:/drive?$select=id,weburl`).get();
console.log(site);
try {
//This not work
var fileFromDrive = await client.api(`/drive/root:/${fileName}:/content?format=pdf`).get();
console.log(fileFromDrive);
var fileFromSite = await client.api(`/sites/${siteID}/drive/items/${fileID}/content?format=pdf`).get();
console.log(fileFromSite);
} catch (error) {
console.log(error);
}
this.domElement.innerHTML = `<h1>Hola Mundo</h1>`;
}
protected get dataVersion(): Version {
return Version.parse('1.0');
}
protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration {
return {
pages: [
{
header: {
description: strings.PropertyPaneDescription
},
groups: [
{
groupName: strings.BasicGroupName,
groupFields: [
PropertyPaneTextField('description', {
label: strings.DescriptionFieldLabel
})
]
}
]
}
]
};
}
}
The chrome console log
But when i use Graph Explorer it works correctly
This is my package-solution.json
{
"$schema": "https://developer.microsoft.com/json-schemas/spfx-build/package-solution.schema.json",
"solution": {
"name": "docx-2-pdf-client-side-solution",
"id": "f4b5db4f-d9ff-463e-b62e-0cc9c9e94089",
"version": "1.0.0.0",
"includeClientSideAssets": true,
"skipFeatureDeployment": true,
"isDomainIsolated": false,
"webApiPermissionRequests": [
{
"resource": "Microsoft Graph",
"scope": "Sites.Read.All"
},
{
"resource": "Microsoft Graph",
"scope": "Files.Read.All"
},
{
"resource": "Microsoft Graph",
"scope": "Files.ReadWrite.All"
},
{
"resource": "Microsoft Graph",
"scope": "Sites.ReadWrite.All"
}
]
},
"paths": {
"zippedPackage": "solution/docx-2-pdf.sppkg"
}
}
I use the following articles
https://learn.microsoft.com/en-us/graph/api/driveitem-get-content?view=graph-rest-1.0&tabs=javascript
https://learn.microsoft.com/en-us/graph/api/driveitem-get-content-format?view=graph-rest-1.0&tabs=javascript#code-try-1
Try using the callback property instead of await:
client.api(`/drive/root:/${fileName}:/content?format=pdf`).get((err, response) => console.log("your response:", err, response));

Aurelia I18N: Scan html sources for new keys and update translation.json files

Is there any tool to scan aurelia project sources (html, js) files and create (update) keys in translation.json files?
Especially I want to collect keys from HTML files that use TBindingBehavior and TValueConverter translation style.
Disclaimer: The packages suggested, are developed by my employer company.
Following are main steps involved in this process.
Generating i18n keys for the html templates, using gulp-i18n-update-localization-ids
Extract keys and values to an external resource, using gulp-i18n-extract
Manually translate the values for different languages
Compile the translations to generate locale files for different language, using gulp-i18n-compile2
Following are the minimalistic gulp tasks
const gulp = require("gulp");
const path = require("path");
const updateLocalizationIds = require('gulp-i18n-update-localization-ids');
const i18nExtract = require('gulp-i18n-extract');
const i18nCompile = require('gulp-i18n-compile2');
const src = path.resolve(__dirname, "src"),
json = path.resolve(src, "*.r.json"),
html = path.resolve(src, "*.html"),
translations = path.resolve(__dirname, "translations/i18n.json"),
locales = path.resolve(__dirname, "locales"),
i18nGlobalPrefixes = new Map();
const generateI18nKeys = function () {
return gulp.src(html)
.pipe(updateLocalizationIds({
emit: 'onChangeOnly',
ignore: [{ content: v => v.startsWith('${') && v.endsWith('}') }],
idTemplate: updateLocalizationIds.prefixFilename(i18nGlobalPrefixes),
whitelist: [
{ tagName: 'h2' },
{
tagName: 'another-custom-el',
attrs: ['some-other-value1', 'some-other-value2']
}
]
}))
.pipe(gulp.dest(src));
}
const i18nExtractOptions = {
plugIns: [
new i18nExtract.html(),
new i18nExtract.json()
],
markUpdates: true,
defaultLanguages: ['de', "fr"] // add more language here as per your need
};
const extractI18n = function () {
return gulp.src([html, json])
.pipe(i18nExtract.extract(translations, i18nExtractOptions))
.pipe(gulp.dest("."));
}
const compileOptions = {
fileName: "translation.json",
defaultLanguage: "en"
};
const compileI18n = function () {
return gulp.src(translations)
.pipe(i18nCompile(compileOptions))
.pipe(gulp.dest(locales));
}
gulp.task("i18n", gulp.series(generateI18nKeys, extractI18n, compileI18n));
What's happening here?
Let us assume that you have all the html files under src directory. You can also have some plain json files under src that act as external resources. Though it is not really needed, in this example, I have used the extension *.r.json for that (r indicates resource).
The first task generateI18nKeys generates i18n keys for the html templates. For example, it transforms the following edit.html
...
<!--edit.html-->
<h2>some text</h2>
<another-custom-el some-other-value1="value1" some-other-value2="value2"></another-custom-el>
... to the following
<!--edit.html-->
<h2 t="edit.t0">some text</h2>
<another-custom-el some-other-value1="value1" some-other-value2="value2"
t="[some-other-value1]edit.t1;[some-other-value2]edit.t2"></another-custom-el>
Use the whitelist property in the config option for this task, to mark elements and attributes for the key generation target.
In the next step, the keys and the corresponding values are extracted to a json file which looks like as follows.
{
"edit": {
"content": {
"edit.t0": {
"content": "some text",
"lastModified": "2019-05-26T16:23:42.306Z",
"needsUpdate": true,
"translations": {
"de": {
"content": "",
"lastModified": ""
},
"fr": {
"content": "",
"lastModified": ""
}
}
},
"edit.t1": {
"content": "value1",
"lastModified": "2019-05-26T16:23:42.306Z",
"needsUpdate": true,
"translations": {
"de": {
"content": "",
"lastModified": ""
},
"fr": {
"content": "",
"lastModified": ""
}
}
},
"edit.t2": {
"content": "value2",
"lastModified": "2019-05-26T16:23:42.306Z",
"needsUpdate": true,
"translations": {
"de": {
"content": "",
"lastModified": ""
},
"fr": {
"content": "",
"lastModified": ""
}
}
}
},
"src": "src\\edit.html"
}
}
Note that empty contents are generated for the localeIds, specified in the task. You can manually change this file to add translations for every language, configured.
Lastly, the compileI18n task generates files for every language from the last json, that looks something like below.
{
"edit": {
"t0": "some text",
"t1": "value1",
"t2": "value2"
}
}
Note that this file can directly be consumed by the aurelia-i18n plugin. For more details check the package specific documentation.
Hope this helps.

"Requested device not found" when using chrome.tabCapture.capture

Problem
I want to capture the audio output of a tab automatically. I'm currently thinking of doing this using Puppeteer (headful), by loading an extension that uses chrome.tabCapture.capture. From my Puppeteer script, I evaluate code within the extensions background.js to get the tab capture started. However, chrome.runtime.lastError.message is set to Requested device not found.
The extension works as expected outside of Puppeteer and in a Chrome browser.
Any idea why I'm getting Requested device not found?
What does the extension's background.js look like?
function startRecording() {
chrome.tabCapture.capture(options, stream => {
if (stream === null) {
console.log(`Last Error: ${chrome.runtime.lastError.message}`);
return;
}
try {
const recorder = new MediaRecorder(stream);
} catch (err) {
console.log(err.message);
}
recorder.addEventListener('dataavailable', event => {
const { data: blob, timecode } = event;
console.log(`${timecode}: ${blob}`);
});
const timeslice = 60 * 1000;
recorder.start(timeslice);
});
}
What does the relevant part of your Puppeteer script look like?
...
const targets = await browser.targets();
const backgroundPageTarget = targets.find(target => target.type() === 'background_page' && target.url().startsWith('chrome-extension://abcde/'));
const backgroundPage = await backgroundPageTarget.page();
const test = await backgroundPage.evaluate(() => {
startRecording();
return Promise.resolve(42);
});
...
Extension Manifest:
{
"name": "Test",
"description": "",
"version": "1.0",
"icons": {
"128": "icon.png"
},
"manifest_version": 2,
"browser_action": {
"default_popup": "test.html"
},
"background": {
"scripts": [
"background.js"
],
"persistent": true
},
"content_scripts": [
{
"matches": [
"<all_urls>"
],
"all_frames": false,
"js": [
"contentScript.js"
]
}
],
"permissions": [
"activeTab",
"tabs",
"tabCapture",
"storage"
]
}

Elastic Search when to add dynamic mappings

I've been having troubles with Elastic Search (ES) dynamic mappings. Seems like I'm in a catch-22. https://www.elastic.co/guide/en/elasticsearch/guide/current/custom-dynamic-mapping.html
The main goal is to store everything as a string that comes into ES.
What I've tried:
In ES you can't create a dynamic mapping until the index has been
created. Okay, makes sense.
I can't create an empty index, so if
the first item sent into the index is not a string, I can't
re-assign it... I won't know what type of object with be the first
item in the index, it could be any type, due to how the the app accepts a variety of objects/events.
So if I can't create the mapping ahead of time, and I can't insert an empty index to create the mapping, and I can't change the mapping after the fact, how do I deal with the first item if its NOT a string???
Here's what I'm currently doing (using the Javascript Client).
createESIndex = function (esClient){
esClient.index({
index: 'timeline-2015-11-21',
type: 'event',
body: event
},function (error, response) {
if (error) {
logger.log(logger.SEVERITY.ERROR, 'acceptEvent elasticsearch create failed with: '+ error + " req:" + JSON.stringify(event));
console.log(logger.SEVERITY.ERROR, 'acceptEvent elasticsearch create failed with: '+ error + " req:" + JSON.stringify(event));
res.status(500).send('Error saving document');
} else {
res.status(200).send('Accepted');
}
});
}
esClientLookup.getClient( function(esClient) {
esClient.indices.putTemplate({
name: "timeline-mapping-template",
body:{
"template": "timeline-*",
"mappings": {
"event": {
"dynamic_templates": [
{ "timestamp-only": {
"match": "#timestamp",
"match_mapping_type": "date",
"mapping": {
"type": "date",
}
}},
{ "all-others": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "string",
}
}
}
]
}
}
}
}).then(function(res){
console.log("put template response: " + JSON.stringify(res));
createESIndex(esClient);
}, function(error){
console.log(error);
res.status(500).send('Error saving document');
});
});
Index templates to the rescue !! That's exactly what you need, the idea is to create a template of your index and as soon as you wish to store a document in that index, ES will create it for you with the mapping you gave (even dynamic ones)
curl -XPUT localhost:9200/_template/my_template -d '{
"template": "index_name_*",
"settings": {
"number_of_shards": 1
},
"mappings": {
"type_name": {
"dynamic_templates": [
{
"strings": {
"match": "*",
"match_mapping_type": "*",
"mapping": {
"type": "string"
}
}
}
],
"properties": {}
}
}
}'
Then when you index anything in an index whose name matches index_name_*, the index will be created with the dynamic mapping above.
For instance:
curl -XPUT localhost:9200/index_name_1/type_name/1 -d '{
"one": 1,
"two": "two",
"three": true
}'
That will create a new index called index_name_1 with a mapping type for type_name where all properties are string. You can verify that with
curl -XGET localhost:9200/index_name_1/_mapping/type_name
Response:
{
"index_name_1" : {
"mappings" : {
"type_name" : {
"dynamic_templates" : [ {
"strings" : {
"mapping" : {
"type" : "string"
},
"match" : "*",
"match_mapping_type" : "*"
}
} ],
"properties" : {
"one" : {
"type" : "string"
},
"three" : {
"type" : "string"
},
"two" : {
"type" : "string"
}
}
}
}
}
}
Note that if you're willing to do this via the Javascript API, you can use the indices.putTemplate call.
export const user = {
email: {
type: 'text',
},
};
export const activity = {
date: {
type: 'text',
},
};
export const common = {
name: {
type: 'text',
},
};
import { Client } from '#elastic/elasticsearch';
import { user } from './user';
import { activity } from './activity';
import { common } from './common';
export class UserDataFactory {
private schema = {
...user,
...activity,
...common,
relation_type: {
type: 'join',
eager_global_ordinals: true,
relations: {
parent: ['activity'],
},
},
};
constructor(private client: Client) {
Object.setPrototypeOf(this, UserDataFactory.prototype);
}
async create() {
const settings = {
settings: {
analysis: {
normalizer: {
useLowercase: {
filter: ['lowercase'],
},
},
},
},
mappings: {
properties: this.schema,
},
};
const { body } = await this.client.indices.exists({
index: ElasticIndex.UserDataFactory,
});
await Promise.all([
await (async (client) => {
await new Promise(async function (resolve, reject) {
if (!body) {
await client.indices.create({
index: ElasticIndex.UserDataFactory,
});
}
resolve({ body });
});
})(this.client),
]);
await this.client.indices.close({ index: ElasticIndex.UserDataFactory });
await this.client.indices.putSettings({
index: ElasticIndex.UserDataFactory,
body: settings,
});
await this.client.indices.open({
index: ElasticIndex.UserDataFactory,
});
await this.client.indices.putMapping({
index: ElasticIndex.UserDataFactory,
body: {
dynamic: 'strict',
properties: {
...this.schema,
},
},
});
}
}
wrapper.ts
class ElasticWrapper {
private _client: Client = new Client({
node: process.env.elasticsearch_node,
auth: {
username: 'elastic',
password: process.env.elasticsearch_password || 'changeme',
},
ssl: {
ca: process.env.elasticsearch_certificate,
rejectUnauthorized: false,
},
});
get client() {
return this._client;
}
}
export const elasticWrapper = new ElasticWrapper();
index.ts
new UserDataFactory(elasticWrapper.client).create();