I am writing one of my first vitest and my code does a test of the visualViewport when the component is mounted. When I run the test, it fails because the visualViewport is not defined. How would I go about having this defined when the test runs?
onMounted(() => {
if (visualViewport.width > 1024) {
options.isDesktop = true;
}
visualViewport.addEventListener('resize', ({target}) => {
if (target.width > 1024) {
options.isDesktop = true;
} else {
options.isDesktop = false;
}
});
});
ReferenceError: visualViewport is not defined
│ ❯ src/scripts/search-and-filter/App.vue:22:3
│ 20|
│ 21| onMounted(() => {
│ 22| if (visualViewport.width > 1024) {
│ | ^
│ 23| options.isDesktop = true;
│ 24| }
Let me know if more information is needed and I will add it. Thank you for your help.
jsdom is being used so I don't think that is the problem.
I'm new to dynamic blocks and am having some trouble writing rules to listeners on a load balancer that was created using for_each.
Below are the resources I created:
resource "aws_lb_listener" "app_listener_forward" {
for_each = toset(var.app_listener_ports)
load_balancer_arn = aws_lb.app_alb.arn
port = each.value
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-TLS-1-2-Ext-2018-06"
certificate_arn = var.ssl_cert
default_action {
type = "forward"
forward {
dynamic "target_group" {
for_each = aws_lb_target_group.app_tg
content {
arn = target_group.value["arn"]
}
}
stickiness {
enabled = true
duration = 86400
}
}
}
}
resource "aws_lb_listener_rule" "app_https_listener_rule" {
for_each = toset(var.app_listener_ports)
listener_arn = aws_lb_listener.app_listener_forward[each.value].arn
action {
type = "forward"
forward {
dynamic "target_group" {
for_each = aws_lb_target_group.app_tg
content {
arn = target_group.value["arn"]
}
}
}
}
dynamic "condition" {
for_each = var.images
path_pattern {
content {
values = condition.value["paths"]
}
}
}
}
resource "aws_lb_target_group" "app_tg" {
for_each = var.images
name = each.key
port = each.value.port
protocol = "HTTP"
target_type = "ip"
vpc_id = aws_vpc.app_vpc.id
health_check {
interval = 130
timeout = 120
healthy_threshold = 10
unhealthy_threshold = 10
}
stickiness {
type = "lb_cookie"
cookie_duration = 86400
}
}
Below are how the variables are defined:
variable "images" {
type = map(object({
app_port = number
paths = set(string)
}))
{
"app-one" = {
app_port = 3000
paths = [
"/appOne",
"/appOne/*"
]
}
"app-two" = {
app_port = 4000
paths = [
"/appTwo",
"/appTwo/*"
]
}
}
variable "app_listener_ports" {
type = list(string)
default = [
80, 443, 22, 7999, 8999
]
}
Upon executing, I am getting an error dealing with the path_pattern being unexpected:
Error: Unsupported block type
│
│ on alb.tf line 78, in resource "aws_lb_listener_rule" "app_https_listener_rule":
│ 78: path_pattern {
│
│ Blocks of type "path_pattern" are not expected here.
I've tried a few ways to get this dynamic block but am having some difficulty. Any advice would be appreciated.
Thank you!
Try it like this:
dynamic "condition" {
for_each = var.images
content {
path_pattern {
values = condition.value.paths
}
}
}
And change the type of paths from set(string) to list(string).
This is also completely acceptable:
dynamic "condition" {
for_each = var.images
content {
path_pattern {
values = condition.value["paths"]
}
}
}
However, in my opinion here it's better to not use a dynamic block for the condition to maintain readability and maintenance.
condition {
path_pattern {
values = [
"/appOne",
"/appOne/*" ## can also use variables if you prefer !!
]
}
}
I have already answered your original post related to the problem which you had after fixing the dynamic syntax.
Post URL: Error when creating dynamic terraform rule for alb listener rule
Trying to do an AZ Terraform deployment, and failing horribly - looking for some ideas what am I missing. Basically I am trying to deploy 2 (maybe later more) VM-s with variable size of disks, joining them to the domain and add SQL server to them. (Be gentle with me, I am from VMWare-Tf background, this is my first SQL deployment on AZ!)
My module:
## main.tf:
# ----------- NIC --------------------------------
resource "azurerm_network_interface" "nic" {
name = "${var.vm_name}-nic"
resource_group_name = var.rg.name
location = var.location
ip_configuration {
name = "${var.vm_name}-internal"
subnet_id = var.subnet_id
private_ip_address_allocation = "Static"
private_ip_address = var.private_ip
}
dns_servers = var.dns_servers
}
# ----------- VM --------------------------------
resource "azurerm_windows_virtual_machine" "vm" {
/* count = length(var.instances) */
name = var.vm_name
location = var.location
resource_group_name = var.rg.name
network_interface_ids = [azurerm_network_interface.nic.id]
size = var.size
zone = var.zone
admin_username = var.win_admin_user
admin_password = var.win_admin_pw # data.azurerm_key_vault_secret.vmadminpwd.value
enable_automatic_updates = "false"
patch_mode = "Manual"
provision_vm_agent = "true"
tags = var.vm_tags
source_image_reference {
publisher = "MicrosoftSQLServer"
offer = "sql2019-ws2019"
sku = "enterprise"
version = "latest"
}
os_disk {
name = "${var.vm_name}-osdisk"
caching = "ReadWrite"
storage_account_type = "StandardSSD_LRS"
disk_size_gb = 250
}
}
# ----------- DOMAIN JOIN --------------------------------
// Waits for up to 1 hour for the Domain to become available. Will return an error 1 if unsuccessful preventing the member attempting to join.
resource "azurerm_virtual_machine_extension" "wait-for-domain-to-provision" {
name = "TestConnectionDomain"
publisher = "Microsoft.Compute"
type = "CustomScriptExtension"
type_handler_version = "1.9"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"commandToExecute": "powershell.exe -Command \"while (!(Test-Connection -ComputerName ${var.active_directory_domain_name} -Count 1 -Quiet) -and ($retryCount++ -le 360)) { Start-Sleep 10 } \""
}
SETTINGS
}
resource "azurerm_virtual_machine_extension" "join-domain" {
name = azurerm_windows_virtual_machine.vm.name
publisher = "Microsoft.Compute"
type = "JsonADDomainExtension"
type_handler_version = "1.3"
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
settings = <<SETTINGS
{
"Name": "${var.active_directory_domain_name}",
"OUPath": "",
"User": "${var.active_directory_username}#${var.active_directory_domain_name}",
"Restart": "true",
"Options": "3"
}
SETTINGS
protected_settings = <<SETTINGS
{
"Password": "${var.active_directory_password}"
}
SETTINGS
depends_on = [azurerm_virtual_machine_extension.wait-for-domain-to-provision]
}
# ----------- DISKS --------------------------------
resource "azurerm_managed_disk" "data" {
for_each = var.disks
name = "${var.vm_name}-${each.value.name}"
location = var.location
resource_group_name = var.rg.name
storage_account_type = each.value.sa
create_option = each.value.create
disk_size_gb = each.value.size
zone = var.zone
}
resource "azurerm_virtual_machine_data_disk_attachment" "disk-attachment" {
for_each = var.disks
managed_disk_id = azurerm_managed_disk.data[each.key].id
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
lun = each.value.lun
caching = "ReadWrite"
depends_on = [azurerm_windows_virtual_machine.vm]
}
# ----------- SQL --------------------------------
# configure the SQL side of the deployment
resource "azurerm_mssql_virtual_machine" "sqlvm" {
/* count = length(var.instances) */
virtual_machine_id = azurerm_windows_virtual_machine.vm.id
sql_license_type = "PAYG"
r_services_enabled = true
sql_connectivity_port = 1433
sql_connectivity_type = "PRIVATE"
/* sql_connectivity_update_username = var.sqladmin
sql_connectivity_update_password = data.azurerm_key_vault_secret.sqladminpwd.value */
#The storage_configuration block supports the following:
storage_configuration {
disk_type = "NEW" # (Required) The type of disk configuration to apply to the SQL Server. Valid values include NEW, EXTEND, or ADD.
storage_workload_type = "OLTP" # (Required) The type of storage workload. Valid values include GENERAL, OLTP, or DW.
data_settings {
default_file_path = "F:\\Data"
luns = [1]
}
log_settings {
default_file_path = "G:\\Log"
luns = [2]
}
temp_db_settings {
default_file_path = "D:\\TempDb"
luns = [0]
}
}
}
## provider.tf
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = ">=3.0.1"
#configuration_aliases = [azurerm.corp]
}
}
}
variables.tf
# ----------- COMMON --------------------------------
variable "vm_name" {
type = string
}
variable "rg" {
/* type = string */
description = "STACK - resource group"
}
variable "location" {
type = string
description = "STACK - location"
}
# ----------- NIC --------------------------------
variable "subnet_id" {
type = string
description = "STACK - subnet"
}
variable "private_ip" {
}
variable "dns_servers" {
}
# ----------- VM --------------------------------
variable "size" {
description = "VM - size"
type = string
}
variable "win_admin_user" {
sensitive = true
type = string
}
variable "win_admin_pw" {
sensitive = true
type = string
}
variable "os_storage_type" {
type = string
}
variable "vm_tags" {
type = map(any)
}
variable "zone" {
#type = list
description = "VM AZ"
}
# ----------- DOMAIN JOIN --------------------------------
variable "active_directory_domain_name" {
type = string
}
variable "active_directory_username" {
sensitive = true
}
variable "active_directory_password" {
sensitive = true
}
# ----------- SQL --------------------------------
variable "sql_maint_day" {
type = string
description = "SQL - maintenance day"
}
variable "sql_maint_length_min" {
type = number
description = "SQL - maintenance duration (min)"
}
variable "sql_maint_start_hour" {
type = number
description = "SQL- maintenance start (hour of the day)"
}
# ----------- DISKS --------------------------------
/* variable "disk_storage_account" {
type = string
default = "Standard_LRS"
description = "DATA DISKS - storage account type"
}
variable "disk_create_method" {
type = string
default = "Empty"
description = "DATA DISKS - creation method"
}
variable "disk_size0" {
type = number
}
variable "disk_size1" {
type = number
}
variable "disk_size2" {
type = number
}
variable "lun0" {
type = number
default = 0
}
variable "lun1" {
type = number
default = 1
}
variable "lun2" {
default = 2
type = number
} */
/* variable "disks" {
description = "List of disks to create"
type = map(any)
default = {
disk0 = {
name = "data0"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
}
disk1 = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
}
}
} */
variable "disks" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
}
the actual deployment:
main.tf
/*
PS /home/fabrice> Get-AzVMSize -Location northeurope | where-object {$_.Name -like "*ds13*"}
*/
module "uat_set" {
source = "../modules/vm"
providers = {
azurerm = azurerm.cbank-test
}
for_each = var.uat_set
active_directory_domain_name = local.uat_ad_domain
active_directory_password = var.domain_admin_password
active_directory_username = var.domain_admin_username
disks = var.disk_allocation
dns_servers = local.dns_servers
location = local.uat_location
os_storage_type = local.uat_storage_type
private_ip = each.value.private_ip
rg = data.azurerm_resource_group.main
size = each.value.vm_size
sql_maint_day = local.uat_sql_maintenance_day
sql_maint_length_min = local.uat_sql_maintenance_min
sql_maint_start_hour = local.uat_sql_maintenance_start_hour
subnet_id = data.azurerm_subnet.main.id
vm_name = each.key
vm_tags = var.default_tags
win_admin_pw = var.admin_password
win_admin_user = var.admin_username
zone = each.value.zone[0]
}
variable "uat_set" {
description = "List of VM-s to create"
type = map(any)
default = {
UAT-SQLDB-NE-01 = {
private_ip = "192.168.32.8"
vm_size = "Standard_DS13-4_v2"
zone = ["1"]
}
UAT-SQLDB-NE-02 = {
private_ip = "192.168.32.10"
vm_size = "Standard_DS13-4_v2"
zone = ["2"]
}
}
}
variable "disk_allocation" {
type = map(object({
name = string
size = number
create = string
sa = string
lun = number
}))
default = {
"temp" = {
name = "temp"
size = 200
create = "Empty"
sa = "Standard_LRS"
lun = 0
},
"disk1" = {
name = "data1"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 1
},
"disk2" = {
name = "data2"
size = 500
create = "Empty"
sa = "Standard_LRS"
lun = 2
}
}
}
locals {
dns_servers = ["192.168.34.5", "192.168.34.10"]
uat_storage_type = "Standard_LRS"
uat_sql_maintenance_day = "Saturday"
uat_sql_maintenance_min = 180
uat_sql_maintenance_start_hour = 23
uat_ad_domain = "civbdev.local"
uat_location = "North Europe"
}
## variables.tf
# new build variables
variable "Environment" {
default = "DEV"
description = "this is the environment variable used to intperpolate with others vars"
}
variable "default_tags" {
type = map(any)
default = {
Environment = "DEV"
Product = "dev-XXXtemplateXXX"
Terraformed = "https://AllicaBankLtd#dev.azure.com/XXXtemplateXXX/Terraform/DEV"
}
}
variable "admin_username" {
sensitive = true
}
variable "admin_password" {
sensitive = true
}
variable "domain_admin_username" {
sensitive = true
}
variable "domain_admin_password" {
sensitive = true
}
Resources create OK, except the SQL-part
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
╷
│ Error: A resource with the ID "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-01" already exists - to be managed via Terraform this resource needs to be imported into the State. Please see the resource documentation for "azurerm_mssql_virtual_machine" for more information.
│
│ with module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm,
│ on ../modules/vm/main.tf line 115, in resource "azurerm_mssql_virtual_machine" "sqlvm":
│ 115: resource "azurerm_mssql_virtual_machine" "sqlvm" {
│
╵
Any notions please what I might be missing?
Ta,
Fabrice
UPDATE:
Thanks for those who replied. Just to confirm: it is not an already existing resource. I get this error straight at the time of the creation of these VM-s.
For example, these are my vm-s after the Terraform run (none of them has the sql extension)
Plan even states it will create these:
Terraform will perform the following actions:
# module.uat_set["UAT-SQLDB-NE-01"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-01"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
# module.uat_set["UAT-SQLDB-NE-02"].azurerm_mssql_virtual_machine.sqlvm will be created
+ resource "azurerm_mssql_virtual_machine" "sqlvm" {
+ id = (known after apply)
+ r_services_enabled = true
+ sql_connectivity_port = 1433
+ sql_connectivity_type = "PRIVATE"
+ sql_license_type = "PAYG"
+ virtual_machine_id = "/subscriptions/..../providers/Microsoft.Compute/virtualMachines/UAT-SQLDB-NE-02"
+ storage_configuration {
+ disk_type = "NEW"
+ storage_workload_type = "OLTP"
+ data_settings {
+ default_file_path = "F:\\Data"
+ luns = [
+ 1,
]
}
+ log_settings {
+ default_file_path = "G:\\Log"
+ luns = [
+ 2,
]
}
+ temp_db_settings {
+ default_file_path = "Z:\\TempDb"
+ luns = [
+ 0,
]
}
}
}
Plan: 2 to add, 0 to change, 0 to destroy.
Presumably, if these resources would exist somehow - which would be odd, as Tf just created the VM-s - then it would not say in the plan that it will create it now, would it?
So the error is quite the source of my confusion, since if the VM just got created, the creation of the extension failed - how could it possibly be existing?
In this case you should probably just import the modules as the error suggest to your terraform state.
For example
terraform import module.uat_set[\"UAT-SQLDB-NE-02\"].azurerm_mssql_virtual_machine.sqlvm "/subscriptions/<..redacted...>/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachines/UAT-SQLDB-NE-02"
I tried to run following terraform code o create test smb share but got error
see code
provider "aws" {
region = "us-east-1"
}
resource "aws_storagegateway_smb_file_share" "test_smb_share" {
authentication = "ActiveDirectory"
gateway_arn = "arn:aws:storagegateway:us-east-1:145429107744:gateway/sgw-4xxxxxxx"
default_storage_class = "S3_STANDARD"
location_arn = "arn:aws:s3:::xxxxxxxx"
role_arn = "arn:aws:iam::145429107744:role/service-role/StorageGatewayBucketAccessRolee896cdf0-cb46-4471-a0de-119f69f87e"
valid_user_list = ["#Domain Admins","#Admins"]
kms_encrypted = "true"
kms_key_arn = "arn:aws:kms:us-east-1:145429107744:key/8c4b962b-c00a-4a32-8fbd-76b174efb609"
tags = {
atomdev = "prod"
atomdomain = "xxxxxx"
atomos = "file system"
atompid = "32"
atomrole = "storage"
}
}
aws_storagegateway_smb_file_share.test_smb_share: Creating...
╷
│ Error: error creating Storage Gateway SMB File Share: InvalidGatewayRequestException: OverlappingLocations
│ {
│ RespMetadata: {
│ StatusCode: 400,
│ RequestID: "e8f7466d-23af-4a4c-a457-d39a0f99406d"
│ },
│ Error_: {
│ ErrorCode: "OverlappingLocations"
│ },
│ Message_: "OverlappingLocations"
│ }
│
│ with aws_storagegateway_smb_file_share.test_smb_share,
│ on main.tf line 5, in resource "aws_storagegateway_smb_file_share" "test_smb_share":
│ 5: resource "aws_storagegateway_smb_file_share" "test_smb_share" {
│
any idea?
If you try to run two shares on the same SGW that have overlapping S3 locations, you'll get this error. For example:
\\my-s3-bucket\folder1\data
\\my-s3-bucket\folder1
^ those would overlap, since one would contain a subset of the other
I just purchased the Keen theme from Bootstrap and whilst following the instructions to the letter I ran into an error executing 'npm run build' (see below). I've looked at the web pack.config.js file (see below), but as it's something I've not looked at in depth I can't figure out where the problem is. Any help will be greatly appreciated, thanks!
Error output:
0 info it worked if it ends with ok
1 verbose cli [ '/usr/local/bin/node', '/usr/local/bin/npm', 'run', 'build' ]
2 info using npm#6.13.6
3 info using node#v13.8.0
4 verbose run-script [ 'prebuild', 'build', 'postbuild' ]
5 info lifecycle keen#1.4.2~prebuild: keen#1.4.2
6 info lifecycle keen#1.4.2~build: keen#1.4.2
7 verbose lifecycle keen#1.4.2~build: unsafe-perm in lifecycle true
8 verbose lifecycle keen#1.4.2~build: PATH: /usr/local/lib/node_modules/npm/node_modules/npm-lifecycle/node-gyp-bin:/Users/julian/Downloads/compile/node_modules/.bin:/Users/julian/.rvm/gems/ruby-2.6.5/bin:/Users/julian/.rvm/gems/ruby-2.6.5#global/bin:/Users/julian/.rvm/rubies/ruby-2.6.5/bin:/Users/julian/.cargo/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Applications/VMware Fusion.app/Contents/Public/:/Users/julian/.rvm/bin
9 verbose lifecycle keen#1.4.2~build: CWD: /Users/julian/Downloads/compile
10 silly lifecycle keen#1.4.2~build: Args: [ '-c', 'webpack' ]
11 silly lifecycle keen#1.4.2~build: Returned: code: 1 signal: null
12 info lifecycle keen#1.4.2~build: Failed to exec build script
13 verbose stack Error: keen#1.4.2 build: `webpack`
13 verbose stack Exit status 1
13 verbose stack at EventEmitter.<anonymous> (/usr/local/lib/node_modules/npm/node_modules/npm-lifecycle/index.js:332:16)
13 verbose stack at EventEmitter.emit (events.js:321:20)
13 verbose stack at ChildProcess.<anonymous> (/usr/local/lib/node_modules/npm/node_modules/npm-lifecycle/lib/spawn.js:55:14)
13 verbose stack at ChildProcess.emit (events.js:321:20)
13 verbose stack at maybeClose (internal/child_process.js:1026:16)
13 verbose stack at Process.ChildProcess._handle.onexit (internal/child_process.js:286:5)
14 verbose pkgid keen#1.4.2
15 verbose cwd /Users/julian/Downloads/compile
16 verbose Darwin 19.3.0
17 verbose argv "/usr/local/bin/node" "/usr/local/bin/npm" "run" "build"
18 verbose node v13.8.0
19 verbose npm v6.13.6
20 error code ELIFECYCLE
21 error errno 1
22 error keen#1.4.2 build: `webpack`
22 error Exit status 1
23 error Failed at the keen#1.4.2 build script.
23 error This is probably not a problem with npm. There is likely additional logging output above.
24 verbose exit [ 1, true ]
webpack.config.js:
/**
* Main file of webpack config.
* Please do not modified unless you know what to do
*/
const path = require("path");
const glob = require("glob");
const webpack = require("webpack");
const fs = require("fs");
const parser = require("comment-parser");
const MiniCssExtractPlugin = require("mini-css-extract-plugin");
const CopyWebpackPlugin = require("copy-webpack-plugin");
const WebpackRTLPlugin = require("webpack-rtl-plugin");
const TerserJSPlugin = require("terser-webpack-plugin");
const OptimizeCSSAssetsPlugin = require("optimize-css-assets-webpack-plugin");
const WebpackMessages = require("webpack-messages");
const ExcludeAssetsPlugin = require("webpack-exclude-assets-plugin");
const atob = require("atob");
const slash = require("slash");
const del = require("del");
// optional
const ReplaceInFileWebpackPlugin = require("replace-in-file-webpack-plugin");
/**
* Known issues:
* 1) Remove webpack bootstrap for single-module apps
* https://github.com/webpack/webpack/issues/2638
*/
// arguments/params from the line command
const args = {};
// remove first 2 unused elements from array
const argv = JSON.parse(process.env.npm_config_argv).cooked.slice(2);
argv.forEach((arg, i) => {
if (arg.match(/^--/)) {
const next = argv[i + 1];
args[arg] = true;
if (next && !next.match(/^--/)) {
args[arg] = argv[i + 1];
}
}
});
// read parameters from the command executed by user
const rtl = args["--rtl"] || false;
const prod = args["--prod"] || false;
const css = args["--css"] || true;
const js = args["--js"] || true;
// theme name
const themeName = "keen";
// global variables
const release = true;
const apiUrl = false; // boolean
const rootPath = path.resolve(__dirname, "..");
const frameworkPath = path.resolve(__dirname, "..");
const distPath = rootPath + "/dist";
const configPath = rootPath + "/tools";
const assetDistPath = distPath + "/assets";
const srcPath = rootPath + "/src/assets";
// page scripts and styles
const pageScripts = glob.sync(srcPath + "/js/pages/**/!(_*).js");
const pagesScss = glob.sync(srcPath + "/sass/pages/**/!(_*).scss");
const extraPlugins = [];
const filesConfig = [];
const imageReference = {};
const exclude = [];
const nodeMedia = [];
// remove older folders and files
// (async () => {
// await del.sync(assetDistPath, {force: true});
// })();
// get all assets config
let files = glob.sync(configPath + "/webpack/**/*.js");
// parse comments to get the output location
files.forEach((filename) => {
// get file content
const text = fs.readFileSync(filename).toString();
// use parser plugin to parse the comment.
const parsed = parser(text);
if (parsed.length > 0 && parsed[0].tags.length > 0) {
// push to list
filesConfig.push({
filename: filename,
params: parsed[0].tags,
});
}
});
const entries = {};
filesConfig.forEach((file) => {
let output = "";
file.params.forEach((param) => {
// get output path
if (param.tag === "output") {
output = param.name;
}
});
entries[output] = file.filename;
});
// process skin scss
const skinScss = glob.sync(srcPath + "/sass/**/skins/**/!(_*|style*).scss");
skinScss.forEach((file) => {
const matched = file.match(/sass\/global\/layout\/(.*?)\.scss$/);
if (matched) {
entries["css/skins/" + matched[1].replace(/\/skins\//, "/")] = file;
}
});
// process pages scss
pagesScss.forEach((file) => {
const matched = file.match(/\/(pages\/.*?)\.scss$/);
if (matched) {
// keep image reference for output path rewrite
const imgMatched = fs.readFileSync(file).toString().match(/['|"](.*?\.(gif|png|jpe?g))['|"]/g);
if (imgMatched) {
imgMatched.forEach((img) => {
img = img.replace(/^['|"](.+(?=['|"]$))['|"]$/, '$1');
imageReference[path.basename(img)] = "css/" + matched[1] + ".css";
});
}
entries['css/' + matched[1]] = file;
}
});
// auto get page scripts from source
pageScripts.forEach(function (jsPath) {
const matched = jsPath.match(/js\/(pages\/.*?)\.js$/);
entries["js/" + matched[1]] = jsPath;
});
if (release) {
// copy html by demo
extraPlugins.push(new CopyWebpackPlugin([{
from: rootPath + "/src",
to: distPath,
}]));
}
if ((/true/i).test(rtl)) {
// enable rtl for css
extraPlugins.push(new WebpackRTLPlugin({
filename: "[name].rtl.css",
}));
}
if (!(/true/i).test(js)) {
// exclude js files
exclude.push('\.js$');
}
if (!(/true/i).test(css)) {
// exclude css files
exclude.push('\.s?css$');
}
if (exclude.length) {
// add plugin for exclude assets (js/css)
extraPlugins.push(new ExcludeAssetsPlugin({
path: exclude
}));
}
if (apiUrl) {
// replace api url to point to server
extraPlugins.push(new ReplaceInFileWebpackPlugin([{
dir: assetDistPath + "/js",
test: /\.js$/,
rules: [{
search: /inc\/api\//i,
replace: 'https://keenthemes.com/' + themeName + '/tools/preview/'
}]
}]));
}
const mainConfig = function () {
return {
// enabled/disable optimizations
mode: (/true/i).test(prod) ? "production" : "development",
// console logs output, https://webpack.js.org/configuration/stats/
stats: "errors-warnings",
performance: {
// disable warnings hint
hints: false
},
optimization: {
// js and css minimizer
minimizer: [new TerserJSPlugin({}), new OptimizeCSSAssetsPlugin({})],
},
entry: entries,
output: {
// main output path in assets folder
path: assetDistPath,
// output path based on the entries' filename
filename: "[name].js"
},
resolve: {
alias: {
"morris.js": "morris.js/morris.js",
"jquery-ui": "jquery-ui",
}
},
plugins: [
// webpack log message
new WebpackMessages({
name: themeName,
logger: str => console.log(`>> ${str}`)
}),
// create css file
new MiniCssExtractPlugin({
filename: "[name].css",
}),
// copy media
new CopyWebpackPlugin([{
from: srcPath + "/media",
to: assetDistPath + "/media",
}]),
{
apply: (compiler) => {
// hook name
compiler.hooks.afterEmit.tap('AfterEmitPlugin', (compilation) => {
filesConfig.forEach((file) => {
let output = "";
file.params.forEach((param) => {
// get output path
if (param.tag === "output") {
output = param.name;
}
if (param.tag === "images") {
param.name.split(",").forEach((file) => {
if (file) {
const outputPath = assetDistPath + "/" + pathWithoutFile(output) + "/images/";
// create dir
fs.mkdirSync(outputPath, {recursive: true});
// copy image
fs.copyFileSync(fs.realpathSync(srcPath + "/" + file), outputPath + path.basename(file));
}
});
}
});
});
});
}
},
].concat(extraPlugins),
module: {
rules: [
{
test: /\.css$/,
use: [
MiniCssExtractPlugin.loader,
"css-loader",
],
},
{
test: /\.scss$/,
use: [
MiniCssExtractPlugin.loader,
"css-loader",
{
loader: "sass-loader",
options: {
sourceMap: true,
// // use for separate css pages (custom pages, eg. wizard, invoices, etc.)
// includePaths: demos.map((demo) => {
// return slash(srcPath) + "/sass/theme/demos/" + demo;
// })
}
},
]
},
{
test: /\.(ttf|otf|eot|svg|woff(2)?)(\?[a-z0-9]+)?$/,
include: [
path.resolve(__dirname, "node_modules"),
rootPath,
frameworkPath,
],
use: [
{
loader: "file-loader",
options: {
// prevent name become hash
name: "[name].[ext]",
// move files
outputPath: "plugins/global/fonts",
// rewrite path in css
publicPath: "fonts",
}
}
]
},
{
test: /\.(gif|png|jpe?g)$/,
include: [
path.resolve(__dirname, "node_modules"),
rootPath,
],
use: [{
loader: "file-loader",
options: {
// prevent name become hash
name: "[name].[ext]",
// move files
outputPath: (url, resourcePath) => {
// look for node_modules plugin
const matched = slash(resourcePath).match(/node_modules\/(.*?)\//);
if (matched) {
for (let i = 0; i < filesConfig.length; i++) {
if (filesConfig[i].filename.match(new RegExp(matched[1]))) {
let output = "";
filesConfig[i].params.forEach((param) => {
// get output path without filename
if (param.tag === "output") {
output = pathWithoutFile(param.name);
}
});
nodeMedia[url] = output + "/images/" + url;
return output + "/images/" + url;
}
}
}
// the rest of images put in media/misc/
return "media/misc/" + url;
},
// rewrite path in css
publicPath: (url, resourcePath) => {
if (imageReference[url]) {
// fix image rewrite path
const filePath = pathWithoutFile(imageReference[url]);
return slash(path.relative(assetDistPath + "/" + filePath, assetDistPath + "/media") + "/" + url);
}
if (nodeMedia[url]) {
return "images/" + url;
}
return "../../media/misc/" + url;
},
}
}]
},
]
},
// webpack dev server config
devServer: {
contentBase: distPath,
compress: true,
port: 3000
}
}
};
module.exports = function () {
return [mainConfig()];
};
function pathWithoutFile(filname) {
return filname.substring(0, filname.lastIndexOf("/"));
}
I just realized I forgot to include the src directory in the same parent directory as the included tools directory (:face_palm!)