Add USB, Notifications, Network plugins and reusable EmployeeSearch component
New Plugins: - USB plugin: Device checkout/checkin with employee lookup, checkout history - Notifications plugin: Announcements with types, scheduling, shopfloor display - Network plugin: Network device management with subnets and VLANs - Equipment and Computers plugins: Asset type separation Frontend: - EmployeeSearch component: Reusable employee lookup with autocomplete - USB views: List, detail, checkout/checkin modals - Notifications views: List, form with recognition mode - Network views: Device list, detail, form - Calendar view with FullCalendar integration - Shopfloor and TV dashboard views - Reports index page - Map editor for asset positioning - Light/dark mode fixes for map tooltips Backend: - Employee search API with external lookup service - Collector API for PowerShell data collection - Reports API endpoints - Slides API for TV dashboard - Fixed AppVersion model (removed BaseModel inheritance) - Added checkout_name column to usbcheckouts table Styling: - Unified detail page styles - Improved pagination (page numbers instead of prev/next) - Dark/light mode theme improvements Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
1
scripts/migration/__init__.py
Normal file
1
scripts/migration/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Data migration scripts for VBScript ShopDB to Flask migration."""
|
||||
88
scripts/migration/fix_legacy_schema.sql
Normal file
88
scripts/migration/fix_legacy_schema.sql
Normal file
@@ -0,0 +1,88 @@
|
||||
-- =============================================================================
|
||||
-- Legacy Schema Migration Script
|
||||
-- Adds missing columns to make VBScript ShopDB compatible with Flask ShopDB
|
||||
-- =============================================================================
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- appowners table
|
||||
-- -----------------------------------------------------------------------------
|
||||
ALTER TABLE appowners
|
||||
ADD COLUMN IF NOT EXISTS email VARCHAR(100) NULL,
|
||||
ADD COLUMN IF NOT EXISTS phone VARCHAR(50) NULL,
|
||||
ADD COLUMN IF NOT EXISTS createddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS modifieddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS isactive TINYINT(1) DEFAULT 1;
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- pctypes / computertypes alignment
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- The Flask app uses pctype table but expects certain columns
|
||||
ALTER TABLE pctype
|
||||
ADD COLUMN IF NOT EXISTS createddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS modifieddate DATETIME DEFAULT NOW();
|
||||
|
||||
-- Ensure isactive is correct type
|
||||
ALTER TABLE pctype MODIFY COLUMN isactive TINYINT(1) DEFAULT 1;
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- statuses table (if needed - Flask uses assetstatuses)
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- assetstatuses already populated earlier
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- subnets table
|
||||
-- -----------------------------------------------------------------------------
|
||||
ALTER TABLE subnets
|
||||
ADD COLUMN IF NOT EXISTS createddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS modifieddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS isactive TINYINT(1) DEFAULT 1;
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- vlans table
|
||||
-- -----------------------------------------------------------------------------
|
||||
ALTER TABLE vlans
|
||||
ADD COLUMN IF NOT EXISTS createddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS modifieddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS isactive TINYINT(1) DEFAULT 1;
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- usbdevices table
|
||||
-- -----------------------------------------------------------------------------
|
||||
ALTER TABLE usbdevices
|
||||
ADD COLUMN IF NOT EXISTS createddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS modifieddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS isactive TINYINT(1) DEFAULT 1;
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- usbcheckouts table
|
||||
-- -----------------------------------------------------------------------------
|
||||
ALTER TABLE usbcheckouts
|
||||
ADD COLUMN IF NOT EXISTS createddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS modifieddate DATETIME DEFAULT NOW();
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- notifications table
|
||||
-- -----------------------------------------------------------------------------
|
||||
ALTER TABLE notifications
|
||||
ADD COLUMN IF NOT EXISTS createddate DATETIME DEFAULT NOW(),
|
||||
ADD COLUMN IF NOT EXISTS modifieddate DATETIME DEFAULT NOW();
|
||||
|
||||
-- Copy dates from existing columns if they exist
|
||||
UPDATE notifications SET createddate = startdate WHERE createddate IS NULL;
|
||||
UPDATE notifications SET modifieddate = startdate WHERE modifieddate IS NULL;
|
||||
|
||||
-- -----------------------------------------------------------------------------
|
||||
-- Verify key counts
|
||||
-- -----------------------------------------------------------------------------
|
||||
SELECT 'Migration complete. Record counts:' as status;
|
||||
SELECT 'vendors' as tbl, COUNT(*) as cnt FROM vendors
|
||||
UNION ALL SELECT 'models', COUNT(*) FROM models
|
||||
UNION ALL SELECT 'machinetypes', COUNT(*) FROM machinetypes
|
||||
UNION ALL SELECT 'operatingsystems', COUNT(*) FROM operatingsystems
|
||||
UNION ALL SELECT 'businessunits', COUNT(*) FROM businessunits
|
||||
UNION ALL SELECT 'applications', COUNT(*) FROM applications
|
||||
UNION ALL SELECT 'machines', COUNT(*) FROM machines
|
||||
UNION ALL SELECT 'printers', COUNT(*) FROM printers
|
||||
UNION ALL SELECT 'assets', COUNT(*) FROM assets
|
||||
UNION ALL SELECT 'knowledgebase', COUNT(*) FROM knowledgebase
|
||||
UNION ALL SELECT 'notifications', COUNT(*) FROM notifications;
|
||||
285
scripts/migration/migrate_assets.py
Normal file
285
scripts/migration/migrate_assets.py
Normal file
@@ -0,0 +1,285 @@
|
||||
"""
|
||||
Migrate machines table to assets + extension tables.
|
||||
|
||||
This script migrates data from the legacy machines table to the new
|
||||
Asset architecture with plugin-owned extension tables.
|
||||
|
||||
Strategy:
|
||||
1. Preserve IDs: assets.assetid = original machines.machineid
|
||||
2. Create asset record, then type-specific extension record
|
||||
3. Map machine types to asset types:
|
||||
- MachineType = Equipment -> equipment extension
|
||||
- MachineType = PC -> computers extension
|
||||
- MachineType = Network/Camera/etc -> network_devices extension
|
||||
- Printers -> handled separately by printers plugin
|
||||
|
||||
Usage:
|
||||
python -m scripts.migration.migrate_assets --source <connection_string>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_machine_type_mapping():
|
||||
"""Map legacy machine type IDs to asset types."""
|
||||
return {
|
||||
# Equipment types
|
||||
'CNC': 'equipment',
|
||||
'CMM': 'equipment',
|
||||
'Lathe': 'equipment',
|
||||
'Grinder': 'equipment',
|
||||
'EDM': 'equipment',
|
||||
'Mill': 'equipment',
|
||||
'Press': 'equipment',
|
||||
'Robot': 'equipment',
|
||||
'Part Marker': 'equipment',
|
||||
# PC types
|
||||
'PC': 'computer',
|
||||
'Workstation': 'computer',
|
||||
'Laptop': 'computer',
|
||||
'Server': 'computer',
|
||||
# Network types
|
||||
'Switch': 'network_device',
|
||||
'Router': 'network_device',
|
||||
'Access Point': 'network_device',
|
||||
'Camera': 'network_device',
|
||||
'IDF': 'network_device',
|
||||
'MDF': 'network_device',
|
||||
'Firewall': 'network_device',
|
||||
}
|
||||
|
||||
|
||||
def migrate_machine_to_asset(machine_row, asset_type_id, target_session):
|
||||
"""
|
||||
Create an Asset record from a Machine record.
|
||||
|
||||
Args:
|
||||
machine_row: Row from source machines table
|
||||
asset_type_id: Target asset type ID
|
||||
target_session: SQLAlchemy session for target database
|
||||
|
||||
Returns:
|
||||
Created asset ID
|
||||
"""
|
||||
# Insert into assets table
|
||||
target_session.execute(text("""
|
||||
INSERT INTO assets (
|
||||
assetid, assetnumber, name, serialnumber,
|
||||
assettypeid, statusid, locationid, businessunitid,
|
||||
mapleft, maptop, notes, isactive, createddate, modifieddate
|
||||
) VALUES (
|
||||
:assetid, :assetnumber, :name, :serialnumber,
|
||||
:assettypeid, :statusid, :locationid, :businessunitid,
|
||||
:mapleft, :maptop, :notes, :isactive, :createddate, :modifieddate
|
||||
)
|
||||
"""), {
|
||||
'assetid': machine_row['machineid'],
|
||||
'assetnumber': machine_row['machinenumber'],
|
||||
'name': machine_row.get('alias'),
|
||||
'serialnumber': machine_row.get('serialnumber'),
|
||||
'assettypeid': asset_type_id,
|
||||
'statusid': machine_row.get('statusid', 1),
|
||||
'locationid': machine_row.get('locationid'),
|
||||
'businessunitid': machine_row.get('businessunitid'),
|
||||
'mapleft': machine_row.get('mapleft'),
|
||||
'maptop': machine_row.get('maptop'),
|
||||
'notes': machine_row.get('notes'),
|
||||
'isactive': machine_row.get('isactive', True),
|
||||
'createddate': machine_row.get('createddate', datetime.utcnow()),
|
||||
'modifieddate': machine_row.get('modifieddate', datetime.utcnow()),
|
||||
})
|
||||
|
||||
return machine_row['machineid']
|
||||
|
||||
|
||||
def migrate_equipment(machine_row, asset_id, target_session):
|
||||
"""Create equipment extension record."""
|
||||
target_session.execute(text("""
|
||||
INSERT INTO equipment (
|
||||
assetid, equipmenttypeid, vendorid, modelnumberid,
|
||||
requiresmanualconfig, islocationonly, isactive, createddate
|
||||
) VALUES (
|
||||
:assetid, :equipmenttypeid, :vendorid, :modelnumberid,
|
||||
:requiresmanualconfig, :islocationonly, :isactive, :createddate
|
||||
)
|
||||
"""), {
|
||||
'assetid': asset_id,
|
||||
'equipmenttypeid': machine_row.get('machinetypeid'), # May need mapping
|
||||
'vendorid': machine_row.get('vendorid'),
|
||||
'modelnumberid': machine_row.get('modelnumberid'),
|
||||
'requiresmanualconfig': machine_row.get('requiresmanualconfig', False),
|
||||
'islocationonly': machine_row.get('islocationonly', False),
|
||||
'isactive': True,
|
||||
'createddate': datetime.utcnow(),
|
||||
})
|
||||
|
||||
|
||||
def migrate_computer(machine_row, asset_id, target_session):
|
||||
"""Create computer extension record."""
|
||||
target_session.execute(text("""
|
||||
INSERT INTO computers (
|
||||
assetid, computertypeid, vendorid, operatingsystemid,
|
||||
hostname, currentuserid, lastuserid, lastboottime,
|
||||
lastzabbixsync, isvnc, isactive, createddate
|
||||
) VALUES (
|
||||
:assetid, :computertypeid, :vendorid, :operatingsystemid,
|
||||
:hostname, :currentuserid, :lastuserid, :lastboottime,
|
||||
:lastzabbixsync, :isvnc, :isactive, :createddate
|
||||
)
|
||||
"""), {
|
||||
'assetid': asset_id,
|
||||
'computertypeid': machine_row.get('pctypeid'),
|
||||
'vendorid': machine_row.get('vendorid'),
|
||||
'operatingsystemid': machine_row.get('operatingsystemid'),
|
||||
'hostname': machine_row.get('hostname'),
|
||||
'currentuserid': machine_row.get('currentuserid'),
|
||||
'lastuserid': machine_row.get('lastuserid'),
|
||||
'lastboottime': machine_row.get('lastboottime'),
|
||||
'lastzabbixsync': machine_row.get('lastzabbixsync'),
|
||||
'isvnc': machine_row.get('isvnc', False),
|
||||
'isactive': True,
|
||||
'createddate': datetime.utcnow(),
|
||||
})
|
||||
|
||||
|
||||
def migrate_network_device(machine_row, asset_id, target_session):
|
||||
"""Create network device extension record."""
|
||||
target_session.execute(text("""
|
||||
INSERT INTO networkdevices (
|
||||
assetid, networkdevicetypeid, vendorid, hostname,
|
||||
firmwareversion, portcount, ispoe, ismanaged,
|
||||
isactive, createddate
|
||||
) VALUES (
|
||||
:assetid, :networkdevicetypeid, :vendorid, :hostname,
|
||||
:firmwareversion, :portcount, :ispoe, :ismanaged,
|
||||
:isactive, :createddate
|
||||
)
|
||||
"""), {
|
||||
'assetid': asset_id,
|
||||
'networkdevicetypeid': machine_row.get('machinetypeid'), # May need mapping
|
||||
'vendorid': machine_row.get('vendorid'),
|
||||
'hostname': machine_row.get('hostname'),
|
||||
'firmwareversion': machine_row.get('firmwareversion'),
|
||||
'portcount': machine_row.get('portcount'),
|
||||
'ispoe': machine_row.get('ispoe', False),
|
||||
'ismanaged': machine_row.get('ismanaged', False),
|
||||
'isactive': True,
|
||||
'createddate': datetime.utcnow(),
|
||||
})
|
||||
|
||||
|
||||
def run_migration(source_conn_str, target_conn_str, dry_run=False):
|
||||
"""
|
||||
Run the full migration from machines to assets.
|
||||
|
||||
Args:
|
||||
source_conn_str: Connection string for source (VBScript) database
|
||||
target_conn_str: Connection string for target (Flask) database
|
||||
dry_run: If True, don't commit changes
|
||||
"""
|
||||
source_engine = create_engine(source_conn_str)
|
||||
target_engine = create_engine(target_conn_str)
|
||||
|
||||
SourceSession = sessionmaker(bind=source_engine)
|
||||
TargetSession = sessionmaker(bind=target_engine)
|
||||
|
||||
source_session = SourceSession()
|
||||
target_session = TargetSession()
|
||||
|
||||
try:
|
||||
# Get asset type mappings from target database
|
||||
asset_types = {}
|
||||
result = target_session.execute(text("SELECT assettypeid, assettype FROM assettypes"))
|
||||
for row in result:
|
||||
asset_types[row.assettype] = row.assettypeid
|
||||
|
||||
# Get machine type to asset type mapping
|
||||
type_mapping = get_machine_type_mapping()
|
||||
|
||||
# Fetch all machines from source
|
||||
machines = source_session.execute(text("""
|
||||
SELECT m.*, mt.machinetype
|
||||
FROM machines m
|
||||
LEFT JOIN machinetypes mt ON m.machinetypeid = mt.machinetypeid
|
||||
"""))
|
||||
|
||||
migrated = 0
|
||||
errors = 0
|
||||
|
||||
for machine in machines:
|
||||
machine_dict = dict(machine._mapping)
|
||||
|
||||
try:
|
||||
# Determine asset type
|
||||
machine_type_name = machine_dict.get('machinetype', '')
|
||||
asset_type_name = type_mapping.get(machine_type_name, 'equipment')
|
||||
asset_type_id = asset_types.get(asset_type_name)
|
||||
|
||||
if not asset_type_id:
|
||||
logger.warning(f"Unknown asset type for machine {machine_dict['machineid']}: {machine_type_name}")
|
||||
errors += 1
|
||||
continue
|
||||
|
||||
# Create asset record
|
||||
asset_id = migrate_machine_to_asset(machine_dict, asset_type_id, target_session)
|
||||
|
||||
# Create extension record based on type
|
||||
if asset_type_name == 'equipment':
|
||||
migrate_equipment(machine_dict, asset_id, target_session)
|
||||
elif asset_type_name == 'computer':
|
||||
migrate_computer(machine_dict, asset_id, target_session)
|
||||
elif asset_type_name == 'network_device':
|
||||
migrate_network_device(machine_dict, asset_id, target_session)
|
||||
|
||||
migrated += 1
|
||||
|
||||
if migrated % 100 == 0:
|
||||
logger.info(f"Migrated {migrated} machines...")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error migrating machine {machine_dict.get('machineid')}: {e}")
|
||||
errors += 1
|
||||
|
||||
if dry_run:
|
||||
logger.info("Dry run - rolling back changes")
|
||||
target_session.rollback()
|
||||
else:
|
||||
target_session.commit()
|
||||
|
||||
logger.info(f"Migration complete: {migrated} migrated, {errors} errors")
|
||||
|
||||
finally:
|
||||
source_session.close()
|
||||
target_session.close()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Migrate machines to assets')
|
||||
parser.add_argument('--source', required=True, help='Source database connection string')
|
||||
parser.add_argument('--target', help='Target database connection string (default: app config)')
|
||||
parser.add_argument('--dry-run', action='store_true', help='Dry run without committing')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
target = args.target
|
||||
if not target:
|
||||
# Load from Flask config
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
||||
from shopdb import create_app
|
||||
app = create_app()
|
||||
target = app.config['SQLALCHEMY_DATABASE_URI']
|
||||
|
||||
run_migration(args.source, target, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
113
scripts/migration/migrate_communications.py
Normal file
113
scripts/migration/migrate_communications.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""
|
||||
Migrate communications table to use assetid instead of machineid.
|
||||
|
||||
This script updates the communications table FK from machineid to assetid.
|
||||
Since assetid matches the original machineid, this is mostly a schema update.
|
||||
|
||||
Usage:
|
||||
python -m scripts.migration.migrate_communications
|
||||
"""
|
||||
|
||||
import logging
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_migration(conn_str, dry_run=False):
|
||||
"""
|
||||
Update communications to use assetid.
|
||||
|
||||
Args:
|
||||
conn_str: Database connection string
|
||||
dry_run: If True, don't commit changes
|
||||
"""
|
||||
engine = create_engine(conn_str)
|
||||
Session = sessionmaker(bind=engine)
|
||||
session = Session()
|
||||
|
||||
try:
|
||||
# Check if assetid column already exists
|
||||
result = session.execute(text("""
|
||||
SELECT COUNT(*) FROM information_schema.columns
|
||||
WHERE table_name = 'communications' AND column_name = 'assetid'
|
||||
"""))
|
||||
has_assetid = result.scalar() > 0
|
||||
|
||||
if not has_assetid:
|
||||
logger.info("Adding assetid column to communications table...")
|
||||
|
||||
# Add assetid column
|
||||
session.execute(text("""
|
||||
ALTER TABLE communications
|
||||
ADD COLUMN assetid INT NULL
|
||||
"""))
|
||||
|
||||
# Copy machineid values to assetid
|
||||
session.execute(text("""
|
||||
UPDATE communications
|
||||
SET assetid = machineid
|
||||
WHERE machineid IS NOT NULL
|
||||
"""))
|
||||
|
||||
# Add FK constraint (optional, depends on DB)
|
||||
try:
|
||||
session.execute(text("""
|
||||
ALTER TABLE communications
|
||||
ADD CONSTRAINT fk_comm_asset
|
||||
FOREIGN KEY (assetid) REFERENCES assets(assetid)
|
||||
"""))
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not add FK constraint: {e}")
|
||||
|
||||
logger.info("assetid column added and populated")
|
||||
else:
|
||||
logger.info("assetid column already exists")
|
||||
|
||||
# Count records
|
||||
result = session.execute(text("""
|
||||
SELECT COUNT(*) FROM communications WHERE assetid IS NOT NULL
|
||||
"""))
|
||||
count = result.scalar()
|
||||
logger.info(f"Communications with assetid: {count}")
|
||||
|
||||
if dry_run:
|
||||
logger.info("Dry run - rolling back changes")
|
||||
session.rollback()
|
||||
else:
|
||||
session.commit()
|
||||
logger.info("Migration complete")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Migration error: {e}")
|
||||
session.rollback()
|
||||
raise
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
parser = argparse.ArgumentParser(description='Migrate communications to use assetid')
|
||||
parser.add_argument('--connection', help='Database connection string')
|
||||
parser.add_argument('--dry-run', action='store_true', help='Dry run without committing')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
conn_str = args.connection
|
||||
if not conn_str:
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
||||
from shopdb import create_app
|
||||
app = create_app()
|
||||
conn_str = app.config['SQLALCHEMY_DATABASE_URI']
|
||||
|
||||
run_migration(conn_str, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
139
scripts/migration/migrate_notifications.py
Normal file
139
scripts/migration/migrate_notifications.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""
|
||||
Migrate notifications from legacy database.
|
||||
|
||||
This script migrates notification data from the VBScript database
|
||||
to the new notifications plugin schema.
|
||||
|
||||
Usage:
|
||||
python -m scripts.migration.migrate_notifications --source <connection_string>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_notification_type_mapping(target_session):
|
||||
"""Get mapping of type names to IDs in target database."""
|
||||
result = target_session.execute(text(
|
||||
"SELECT notificationtypeid, typename FROM notificationtypes"
|
||||
))
|
||||
return {row.typename.lower(): row.notificationtypeid for row in result}
|
||||
|
||||
|
||||
def run_migration(source_conn_str, target_conn_str, dry_run=False):
|
||||
"""
|
||||
Run notification migration.
|
||||
|
||||
Args:
|
||||
source_conn_str: Source database connection string
|
||||
target_conn_str: Target database connection string
|
||||
dry_run: If True, don't commit changes
|
||||
"""
|
||||
source_engine = create_engine(source_conn_str)
|
||||
target_engine = create_engine(target_conn_str)
|
||||
|
||||
SourceSession = sessionmaker(bind=source_engine)
|
||||
TargetSession = sessionmaker(bind=target_engine)
|
||||
|
||||
source_session = SourceSession()
|
||||
target_session = TargetSession()
|
||||
|
||||
try:
|
||||
# Get type mappings
|
||||
type_mapping = get_notification_type_mapping(target_session)
|
||||
|
||||
# Default type if not found
|
||||
default_type_id = type_mapping.get('general', 1)
|
||||
|
||||
# Fetch notifications from source
|
||||
# Adjust column names based on actual legacy schema
|
||||
notifications = source_session.execute(text("""
|
||||
SELECT n.*, nt.typename
|
||||
FROM notifications n
|
||||
LEFT JOIN notificationtypes nt ON n.notificationtypeid = nt.notificationtypeid
|
||||
"""))
|
||||
|
||||
migrated = 0
|
||||
errors = 0
|
||||
|
||||
for notif in notifications:
|
||||
notif_dict = dict(notif._mapping)
|
||||
|
||||
try:
|
||||
# Map notification type
|
||||
type_name = (notif_dict.get('typename') or 'general').lower()
|
||||
type_id = type_mapping.get(type_name, default_type_id)
|
||||
|
||||
# Insert into target
|
||||
target_session.execute(text("""
|
||||
INSERT INTO notifications (
|
||||
title, message, notificationtypeid,
|
||||
startdate, enddate, ispinned, showbanner, allday,
|
||||
linkurl, affectedsystems, isactive, createddate
|
||||
) VALUES (
|
||||
:title, :message, :notificationtypeid,
|
||||
:startdate, :enddate, :ispinned, :showbanner, :allday,
|
||||
:linkurl, :affectedsystems, :isactive, :createddate
|
||||
)
|
||||
"""), {
|
||||
'title': notif_dict.get('title', 'Untitled'),
|
||||
'message': notif_dict.get('message', ''),
|
||||
'notificationtypeid': type_id,
|
||||
'startdate': notif_dict.get('startdate', datetime.utcnow()),
|
||||
'enddate': notif_dict.get('enddate'),
|
||||
'ispinned': notif_dict.get('ispinned', False),
|
||||
'showbanner': notif_dict.get('showbanner', True),
|
||||
'allday': notif_dict.get('allday', True),
|
||||
'linkurl': notif_dict.get('linkurl'),
|
||||
'affectedsystems': notif_dict.get('affectedsystems'),
|
||||
'isactive': notif_dict.get('isactive', True),
|
||||
'createddate': notif_dict.get('createddate', datetime.utcnow()),
|
||||
})
|
||||
|
||||
migrated += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error migrating notification: {e}")
|
||||
errors += 1
|
||||
|
||||
if dry_run:
|
||||
logger.info("Dry run - rolling back changes")
|
||||
target_session.rollback()
|
||||
else:
|
||||
target_session.commit()
|
||||
|
||||
logger.info(f"Migration complete: {migrated} migrated, {errors} errors")
|
||||
|
||||
finally:
|
||||
source_session.close()
|
||||
target_session.close()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Migrate notifications')
|
||||
parser.add_argument('--source', required=True, help='Source database connection string')
|
||||
parser.add_argument('--target', help='Target database connection string')
|
||||
parser.add_argument('--dry-run', action='store_true', help='Dry run without committing')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
target = args.target
|
||||
if not target:
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
||||
from shopdb import create_app
|
||||
app = create_app()
|
||||
target = app.config['SQLALCHEMY_DATABASE_URI']
|
||||
|
||||
run_migration(args.source, target, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
199
scripts/migration/migrate_usb.py
Normal file
199
scripts/migration/migrate_usb.py
Normal file
@@ -0,0 +1,199 @@
|
||||
"""
|
||||
Migrate USB checkout data from legacy database.
|
||||
|
||||
This script migrates USB device and checkout data from the VBScript database
|
||||
to the new USB plugin schema.
|
||||
|
||||
Usage:
|
||||
python -m scripts.migration.migrate_usb --source <connection_string>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_device_type_mapping(target_session):
|
||||
"""Get mapping of type names to IDs in target database."""
|
||||
result = target_session.execute(text(
|
||||
"SELECT usbdevicetypeid, typename FROM usbdevicetypes"
|
||||
))
|
||||
return {row.typename.lower(): row.usbdevicetypeid for row in result}
|
||||
|
||||
|
||||
def run_migration(source_conn_str, target_conn_str, dry_run=False):
|
||||
"""
|
||||
Run USB device migration.
|
||||
|
||||
Args:
|
||||
source_conn_str: Source database connection string
|
||||
target_conn_str: Target database connection string
|
||||
dry_run: If True, don't commit changes
|
||||
"""
|
||||
source_engine = create_engine(source_conn_str)
|
||||
target_engine = create_engine(target_conn_str)
|
||||
|
||||
SourceSession = sessionmaker(bind=source_engine)
|
||||
TargetSession = sessionmaker(bind=target_engine)
|
||||
|
||||
source_session = SourceSession()
|
||||
target_session = TargetSession()
|
||||
|
||||
try:
|
||||
# Get type mappings
|
||||
type_mapping = get_device_type_mapping(target_session)
|
||||
default_type_id = type_mapping.get('flash drive', 1)
|
||||
|
||||
# Migrate USB devices
|
||||
# Adjust table/column names based on actual legacy schema
|
||||
logger.info("Migrating USB devices...")
|
||||
|
||||
try:
|
||||
devices = source_session.execute(text("""
|
||||
SELECT * FROM usbdevices
|
||||
"""))
|
||||
|
||||
device_id_map = {} # Map old IDs to new IDs
|
||||
|
||||
for device in devices:
|
||||
device_dict = dict(device._mapping)
|
||||
|
||||
# Determine device type
|
||||
type_name = (device_dict.get('typename') or 'flash drive').lower()
|
||||
type_id = type_mapping.get(type_name, default_type_id)
|
||||
|
||||
result = target_session.execute(text("""
|
||||
INSERT INTO usbdevices (
|
||||
serialnumber, label, assetnumber, usbdevicetypeid,
|
||||
capacitygb, vendorid, productid, manufacturer, productname,
|
||||
ischeckedout, currentuserid, currentusername,
|
||||
storagelocation, notes, isactive, createddate
|
||||
) VALUES (
|
||||
:serialnumber, :label, :assetnumber, :usbdevicetypeid,
|
||||
:capacitygb, :vendorid, :productid, :manufacturer, :productname,
|
||||
:ischeckedout, :currentuserid, :currentusername,
|
||||
:storagelocation, :notes, :isactive, :createddate
|
||||
)
|
||||
"""), {
|
||||
'serialnumber': device_dict.get('serialnumber', f"UNKNOWN_{device_dict.get('usbdeviceid', 0)}"),
|
||||
'label': device_dict.get('label'),
|
||||
'assetnumber': device_dict.get('assetnumber'),
|
||||
'usbdevicetypeid': type_id,
|
||||
'capacitygb': device_dict.get('capacitygb'),
|
||||
'vendorid': device_dict.get('vendorid'),
|
||||
'productid': device_dict.get('productid'),
|
||||
'manufacturer': device_dict.get('manufacturer'),
|
||||
'productname': device_dict.get('productname'),
|
||||
'ischeckedout': device_dict.get('ischeckedout', False),
|
||||
'currentuserid': device_dict.get('currentuserid'),
|
||||
'currentusername': device_dict.get('currentusername'),
|
||||
'storagelocation': device_dict.get('storagelocation'),
|
||||
'notes': device_dict.get('notes'),
|
||||
'isactive': device_dict.get('isactive', True),
|
||||
'createddate': device_dict.get('createddate', datetime.utcnow()),
|
||||
})
|
||||
|
||||
# Get the new ID
|
||||
new_id = target_session.execute(text("SELECT LAST_INSERT_ID()")).scalar()
|
||||
device_id_map[device_dict.get('usbdeviceid')] = new_id
|
||||
|
||||
logger.info(f"Migrated {len(device_id_map)} USB devices")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not migrate USB devices: {e}")
|
||||
device_id_map = {}
|
||||
|
||||
# Migrate checkout history
|
||||
logger.info("Migrating USB checkout history...")
|
||||
|
||||
try:
|
||||
checkouts = source_session.execute(text("""
|
||||
SELECT * FROM usbcheckouts
|
||||
"""))
|
||||
|
||||
checkout_count = 0
|
||||
|
||||
for checkout in checkouts:
|
||||
checkout_dict = dict(checkout._mapping)
|
||||
|
||||
old_device_id = checkout_dict.get('usbdeviceid')
|
||||
new_device_id = device_id_map.get(old_device_id)
|
||||
|
||||
if not new_device_id:
|
||||
logger.warning(f"Skipping checkout - device ID {old_device_id} not found in mapping")
|
||||
continue
|
||||
|
||||
target_session.execute(text("""
|
||||
INSERT INTO usbcheckouts (
|
||||
usbdeviceid, userid, username,
|
||||
checkoutdate, checkindate, expectedreturndate,
|
||||
purpose, notes, checkedoutby, checkedinby,
|
||||
isactive, createddate
|
||||
) VALUES (
|
||||
:usbdeviceid, :userid, :username,
|
||||
:checkoutdate, :checkindate, :expectedreturndate,
|
||||
:purpose, :notes, :checkedoutby, :checkedinby,
|
||||
:isactive, :createddate
|
||||
)
|
||||
"""), {
|
||||
'usbdeviceid': new_device_id,
|
||||
'userid': checkout_dict.get('userid', 'unknown'),
|
||||
'username': checkout_dict.get('username'),
|
||||
'checkoutdate': checkout_dict.get('checkoutdate', datetime.utcnow()),
|
||||
'checkindate': checkout_dict.get('checkindate'),
|
||||
'expectedreturndate': checkout_dict.get('expectedreturndate'),
|
||||
'purpose': checkout_dict.get('purpose'),
|
||||
'notes': checkout_dict.get('notes'),
|
||||
'checkedoutby': checkout_dict.get('checkedoutby'),
|
||||
'checkedinby': checkout_dict.get('checkedinby'),
|
||||
'isactive': True,
|
||||
'createddate': checkout_dict.get('createddate', datetime.utcnow()),
|
||||
})
|
||||
|
||||
checkout_count += 1
|
||||
|
||||
logger.info(f"Migrated {checkout_count} checkout records")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not migrate USB checkouts: {e}")
|
||||
|
||||
if dry_run:
|
||||
logger.info("Dry run - rolling back changes")
|
||||
target_session.rollback()
|
||||
else:
|
||||
target_session.commit()
|
||||
|
||||
logger.info("USB migration complete")
|
||||
|
||||
finally:
|
||||
source_session.close()
|
||||
target_session.close()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Migrate USB devices and checkouts')
|
||||
parser.add_argument('--source', required=True, help='Source database connection string')
|
||||
parser.add_argument('--target', help='Target database connection string')
|
||||
parser.add_argument('--dry-run', action='store_true', help='Dry run without committing')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
target = args.target
|
||||
if not target:
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
||||
from shopdb import create_app
|
||||
app = create_app()
|
||||
target = app.config['SQLALCHEMY_DATABASE_URI']
|
||||
|
||||
run_migration(args.source, target, args.dry_run)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
139
scripts/migration/run_migration.py
Normal file
139
scripts/migration/run_migration.py
Normal file
@@ -0,0 +1,139 @@
|
||||
"""
|
||||
Migration orchestrator - runs all migration steps in order.
|
||||
|
||||
This script coordinates the full migration from VBScript ShopDB to Flask.
|
||||
|
||||
Usage:
|
||||
python -m scripts.migration.run_migration --source <connection_string>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def run_full_migration(source_conn_str, target_conn_str, dry_run=False, steps=None):
|
||||
"""
|
||||
Run the full migration process.
|
||||
|
||||
Args:
|
||||
source_conn_str: Source database connection string
|
||||
target_conn_str: Target database connection string
|
||||
dry_run: If True, don't commit changes
|
||||
steps: List of specific steps to run, or None for all
|
||||
"""
|
||||
from . import migrate_assets
|
||||
from . import migrate_communications
|
||||
from . import migrate_notifications
|
||||
from . import migrate_usb
|
||||
from . import verify_migration
|
||||
|
||||
all_steps = [
|
||||
('assets', 'Migrate machines to assets', migrate_assets.run_migration),
|
||||
('communications', 'Update communications FKs', migrate_communications.run_migration),
|
||||
('notifications', 'Migrate notifications', migrate_notifications.run_migration),
|
||||
('usb', 'Migrate USB devices', migrate_usb.run_migration),
|
||||
]
|
||||
|
||||
logger.info("=" * 60)
|
||||
logger.info("SHOPDB MIGRATION")
|
||||
logger.info(f"Started: {datetime.utcnow().isoformat()}")
|
||||
logger.info(f"Dry Run: {dry_run}")
|
||||
logger.info("=" * 60)
|
||||
|
||||
results = {}
|
||||
|
||||
for step_name, description, migration_func in all_steps:
|
||||
if steps and step_name not in steps:
|
||||
logger.info(f"\nSkipping: {description}")
|
||||
continue
|
||||
|
||||
logger.info(f"\n{'=' * 40}")
|
||||
logger.info(f"Step: {description}")
|
||||
logger.info('=' * 40)
|
||||
|
||||
try:
|
||||
# Different migrations have different signatures
|
||||
if step_name == 'communications':
|
||||
migration_func(target_conn_str, dry_run)
|
||||
else:
|
||||
migration_func(source_conn_str, target_conn_str, dry_run)
|
||||
|
||||
results[step_name] = 'SUCCESS'
|
||||
logger.info(f"Step completed: {step_name}")
|
||||
|
||||
except Exception as e:
|
||||
results[step_name] = f'FAILED: {e}'
|
||||
logger.error(f"Step failed: {step_name} - {e}")
|
||||
|
||||
# Ask to continue
|
||||
if not dry_run:
|
||||
response = input("Continue with next step? (y/n): ")
|
||||
if response.lower() != 'y':
|
||||
logger.info("Migration aborted by user")
|
||||
break
|
||||
|
||||
# Run verification
|
||||
logger.info(f"\n{'=' * 40}")
|
||||
logger.info("Running verification...")
|
||||
logger.info('=' * 40)
|
||||
|
||||
try:
|
||||
verify_migration.run_verification(source_conn_str, target_conn_str)
|
||||
results['verification'] = 'SUCCESS'
|
||||
except Exception as e:
|
||||
results['verification'] = f'FAILED: {e}'
|
||||
logger.error(f"Verification failed: {e}")
|
||||
|
||||
# Summary
|
||||
logger.info("\n" + "=" * 60)
|
||||
logger.info("MIGRATION SUMMARY")
|
||||
logger.info("=" * 60)
|
||||
|
||||
for step, result in results.items():
|
||||
status = "OK" if result == 'SUCCESS' else "FAILED"
|
||||
logger.info(f" {step}: {status}")
|
||||
if result != 'SUCCESS':
|
||||
logger.info(f" {result}")
|
||||
|
||||
logger.info("=" * 60)
|
||||
logger.info(f"Completed: {datetime.utcnow().isoformat()}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Run full ShopDB migration')
|
||||
parser.add_argument('--source', required=True, help='Source database connection string')
|
||||
parser.add_argument('--target', help='Target database connection string')
|
||||
parser.add_argument('--dry-run', action='store_true', help='Dry run without committing')
|
||||
parser.add_argument('--steps', nargs='+',
|
||||
choices=['assets', 'communications', 'notifications', 'usb'],
|
||||
help='Specific steps to run')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
target = args.target
|
||||
if not target:
|
||||
import os
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
||||
from shopdb import create_app
|
||||
app = create_app()
|
||||
target = app.config['SQLALCHEMY_DATABASE_URI']
|
||||
|
||||
results = run_full_migration(args.source, target, args.dry_run, args.steps)
|
||||
|
||||
# Exit with error if any step failed
|
||||
if any(r != 'SUCCESS' for r in results.values()):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
174
scripts/migration/verify_migration.py
Normal file
174
scripts/migration/verify_migration.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""
|
||||
Verify data migration integrity.
|
||||
|
||||
This script compares record counts between source and target databases
|
||||
and performs spot checks on data integrity.
|
||||
|
||||
Usage:
|
||||
python -m scripts.migration.verify_migration --source <connection_string>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def verify_counts(source_session, target_session):
|
||||
"""Compare record counts between source and target."""
|
||||
results = {}
|
||||
|
||||
# Define table mappings (source -> target)
|
||||
table_mappings = [
|
||||
('machines', 'assets', 'Machine to Asset'),
|
||||
('communications', 'communications', 'Communications'),
|
||||
('vendors', 'vendors', 'Vendors'),
|
||||
('locations', 'locations', 'Locations'),
|
||||
('businessunits', 'businessunits', 'Business Units'),
|
||||
]
|
||||
|
||||
for source_table, target_table, description in table_mappings:
|
||||
try:
|
||||
source_count = source_session.execute(text(f"SELECT COUNT(*) FROM {source_table}")).scalar()
|
||||
except Exception as e:
|
||||
source_count = f"Error: {e}"
|
||||
|
||||
try:
|
||||
target_count = target_session.execute(text(f"SELECT COUNT(*) FROM {target_table}")).scalar()
|
||||
except Exception as e:
|
||||
target_count = f"Error: {e}"
|
||||
|
||||
match = source_count == target_count if isinstance(source_count, int) and isinstance(target_count, int) else False
|
||||
|
||||
results[description] = {
|
||||
'source': source_count,
|
||||
'target': target_count,
|
||||
'match': match
|
||||
}
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def verify_sample_records(source_session, target_session, sample_size=10):
|
||||
"""Spot-check sample records for data integrity."""
|
||||
issues = []
|
||||
|
||||
# Sample machine -> asset migration
|
||||
try:
|
||||
sample_machines = source_session.execute(text(f"""
|
||||
SELECT machineid, machinenumber, serialnumber, alias
|
||||
FROM machines
|
||||
ORDER BY RAND()
|
||||
LIMIT {sample_size}
|
||||
"""))
|
||||
|
||||
for machine in sample_machines:
|
||||
machine_dict = dict(machine._mapping)
|
||||
|
||||
# Check if asset exists with same ID
|
||||
asset = target_session.execute(text("""
|
||||
SELECT assetid, assetnumber, serialnumber, name
|
||||
FROM assets
|
||||
WHERE assetid = :assetid
|
||||
"""), {'assetid': machine_dict['machineid']}).fetchone()
|
||||
|
||||
if not asset:
|
||||
issues.append(f"Machine {machine_dict['machineid']} not found in assets")
|
||||
continue
|
||||
|
||||
asset_dict = dict(asset._mapping)
|
||||
|
||||
# Verify data matches
|
||||
if machine_dict['machinenumber'] != asset_dict['assetnumber']:
|
||||
issues.append(f"Asset {asset_dict['assetid']}: machinenumber mismatch")
|
||||
if machine_dict.get('serialnumber') != asset_dict.get('serialnumber'):
|
||||
issues.append(f"Asset {asset_dict['assetid']}: serialnumber mismatch")
|
||||
|
||||
except Exception as e:
|
||||
issues.append(f"Could not verify machines: {e}")
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def run_verification(source_conn_str, target_conn_str):
|
||||
"""
|
||||
Run migration verification.
|
||||
|
||||
Args:
|
||||
source_conn_str: Source database connection string
|
||||
target_conn_str: Target database connection string
|
||||
"""
|
||||
source_engine = create_engine(source_conn_str)
|
||||
target_engine = create_engine(target_conn_str)
|
||||
|
||||
SourceSession = sessionmaker(bind=source_engine)
|
||||
TargetSession = sessionmaker(bind=target_engine)
|
||||
|
||||
source_session = SourceSession()
|
||||
target_session = TargetSession()
|
||||
|
||||
try:
|
||||
logger.info("=" * 60)
|
||||
logger.info("MIGRATION VERIFICATION REPORT")
|
||||
logger.info("=" * 60)
|
||||
|
||||
# Verify counts
|
||||
logger.info("\nRecord Count Comparison:")
|
||||
logger.info("-" * 40)
|
||||
counts = verify_counts(source_session, target_session)
|
||||
|
||||
all_match = True
|
||||
for table, result in counts.items():
|
||||
status = "OK" if result['match'] else "MISMATCH"
|
||||
if not result['match']:
|
||||
all_match = False
|
||||
logger.info(f" {table}: Source={result['source']}, Target={result['target']} [{status}]")
|
||||
|
||||
# Verify sample records
|
||||
logger.info("\nSample Record Verification:")
|
||||
logger.info("-" * 40)
|
||||
issues = verify_sample_records(source_session, target_session)
|
||||
|
||||
if issues:
|
||||
for issue in issues:
|
||||
logger.warning(f" ! {issue}")
|
||||
else:
|
||||
logger.info(" All sample records verified OK")
|
||||
|
||||
# Summary
|
||||
logger.info("\n" + "=" * 60)
|
||||
if all_match and not issues:
|
||||
logger.info("VERIFICATION PASSED - Migration looks good!")
|
||||
else:
|
||||
logger.warning("VERIFICATION FOUND ISSUES - Review above")
|
||||
logger.info("=" * 60)
|
||||
|
||||
finally:
|
||||
source_session.close()
|
||||
target_session.close()
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Verify migration integrity')
|
||||
parser.add_argument('--source', required=True, help='Source database connection string')
|
||||
parser.add_argument('--target', help='Target database connection string')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
target = args.target
|
||||
if not target:
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
||||
from shopdb import create_app
|
||||
app = create_app()
|
||||
target = app.config['SQLALCHEMY_DATABASE_URI']
|
||||
|
||||
run_verification(args.source, target)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Reference in New Issue
Block a user