diff --git a/guacamole_test_11_26/.dockerignore b/guacamole_test_11_26/.dockerignore new file mode 100755 index 00000000..4d8045f0 --- /dev/null +++ b/guacamole_test_11_26/.dockerignore @@ -0,0 +1,54 @@ +# Docker ignore file for Remote Access API + +# Git +.git +.gitignore + +# Documentation +*.md +docs/ + +# Environment files +.env +.env.local +.env.*.local + +# Logs +logs/ +*.log +nginx/logs/ + +# SSL certificates (will be mounted as volumes) +nginx/ssl/ + +# IDE files +.vscode/ +.idea/ +*.swp +*.swo + +# OS files +.DS_Store +Thumbs.db + +# Python cache +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python +*.so + +# Virtual environments +venv/ +env/ +ENV/ + +# Testing +.pytest_cache/ +.coverage +htmlcov/ + +# Temporary files +tmp/ +temp/ diff --git a/guacamole_test_11_26/.gitignore b/guacamole_test_11_26/.gitignore new file mode 100755 index 00000000..4fbc09b4 --- /dev/null +++ b/guacamole_test_11_26/.gitignore @@ -0,0 +1,46 @@ +# Docker +.env +docker-compose.override.yml + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.pyc +venv/ +env/ +.venv/ + +# Logs +*.log +logs/ + +# Database +*.db +*.sqlite + +# Generated SQL files with passwords (SECURITY) +*custom*.sql +*-admin-user.sql +!002-create-admin-user.sql # Except default template +*-GENERATED.sql +*-DEFAULT-BACKUP.sql +update-*.sql +create-user-*.sql + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# OS +.DS_Store +Thumbs.db + +# Temporary files +*.tmp +*.temp + +# PostgreSQL data (if running locally) +data/ \ No newline at end of file diff --git a/guacamole_test_11_26/001-create-schema.sql b/guacamole_test_11_26/001-create-schema.sql new file mode 100755 index 00000000..9bcf1c51 --- /dev/null +++ b/guacamole_test_11_26/001-create-schema.sql @@ -0,0 +1,736 @@ +-- +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. +-- + +-- +-- Connection group types +-- + +CREATE TYPE guacamole_connection_group_type AS ENUM( + 'ORGANIZATIONAL', + 'BALANCING' +); + +-- +-- Entity types +-- + +CREATE TYPE guacamole_entity_type AS ENUM( + 'USER', + 'USER_GROUP' +); + +-- +-- Object permission types +-- + +CREATE TYPE guacamole_object_permission_type AS ENUM( + 'READ', + 'UPDATE', + 'DELETE', + 'ADMINISTER' +); + +-- +-- System permission types +-- + +CREATE TYPE guacamole_system_permission_type AS ENUM( + 'CREATE_CONNECTION', + 'CREATE_CONNECTION_GROUP', + 'CREATE_SHARING_PROFILE', + 'CREATE_USER', + 'CREATE_USER_GROUP', + 'ADMINISTER' +); + +-- +-- Guacamole proxy (guacd) encryption methods +-- + +CREATE TYPE guacamole_proxy_encryption_method AS ENUM( + 'NONE', + 'SSL' +); + +-- +-- Table of connection groups. Each connection group has a name. +-- + +CREATE TABLE guacamole_connection_group ( + + connection_group_id serial NOT NULL, + parent_id integer, + connection_group_name varchar(128) NOT NULL, + type guacamole_connection_group_type + NOT NULL DEFAULT 'ORGANIZATIONAL', + + -- Concurrency limits + max_connections integer, + max_connections_per_user integer, + enable_session_affinity boolean NOT NULL DEFAULT FALSE, + + PRIMARY KEY (connection_group_id), + + CONSTRAINT connection_group_name_parent + UNIQUE (connection_group_name, parent_id), + + CONSTRAINT guacamole_connection_group_ibfk_1 + FOREIGN KEY (parent_id) + REFERENCES guacamole_connection_group (connection_group_id) + ON DELETE CASCADE + +); + +CREATE INDEX guacamole_connection_group_parent_id + ON guacamole_connection_group(parent_id); + +-- +-- Table of connections. Each connection has a name, protocol, and +-- associated set of parameters. +-- A connection may belong to a connection group. +-- + +CREATE TABLE guacamole_connection ( + + connection_id serial NOT NULL, + connection_name varchar(128) NOT NULL, + parent_id integer, + protocol varchar(32) NOT NULL, + + -- Concurrency limits + max_connections integer, + max_connections_per_user integer, + + -- Connection Weight + connection_weight integer, + failover_only boolean NOT NULL DEFAULT FALSE, + + -- Guacamole proxy (guacd) overrides + proxy_port integer, + proxy_hostname varchar(512), + proxy_encryption_method guacamole_proxy_encryption_method, + + PRIMARY KEY (connection_id), + + CONSTRAINT connection_name_parent + UNIQUE (connection_name, parent_id), + + CONSTRAINT guacamole_connection_ibfk_1 + FOREIGN KEY (parent_id) + REFERENCES guacamole_connection_group (connection_group_id) + ON DELETE CASCADE + +); + +CREATE INDEX guacamole_connection_parent_id + ON guacamole_connection(parent_id); + +-- +-- Table of base entities which may each be either a user or user group. Other +-- tables which represent qualities shared by both users and groups will point +-- to guacamole_entity, while tables which represent qualities specific to +-- users or groups will point to guacamole_user or guacamole_user_group. +-- + +CREATE TABLE guacamole_entity ( + + entity_id serial NOT NULL, + name varchar(128) NOT NULL, + type guacamole_entity_type NOT NULL, + + PRIMARY KEY (entity_id), + + CONSTRAINT guacamole_entity_name_scope + UNIQUE (type, name) + +); + +-- +-- Table of users. Each user has a unique username and a hashed password +-- with corresponding salt. Although the authentication system will always set +-- salted passwords, other systems may set unsalted passwords by simply not +-- providing the salt. +-- + +CREATE TABLE guacamole_user ( + + user_id serial NOT NULL, + entity_id integer NOT NULL, + + -- Optionally-salted password + password_hash bytea NOT NULL, + password_salt bytea, + password_date timestamptz NOT NULL, + + -- Account disabled/expired status + disabled boolean NOT NULL DEFAULT FALSE, + expired boolean NOT NULL DEFAULT FALSE, + + -- Time-based access restriction + access_window_start time, + access_window_end time, + + -- Date-based access restriction + valid_from date, + valid_until date, + + -- Timezone used for all date/time comparisons and interpretation + timezone varchar(64), + + -- Profile information + full_name varchar(256), + email_address varchar(256), + organization varchar(256), + organizational_role varchar(256), + + PRIMARY KEY (user_id), + + CONSTRAINT guacamole_user_single_entity + UNIQUE (entity_id), + + CONSTRAINT guacamole_user_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) + ON DELETE CASCADE + +); + +-- +-- Table of user groups. Each user group may have an arbitrary set of member +-- users and member groups, with those members inheriting the permissions +-- granted to that group. +-- + +CREATE TABLE guacamole_user_group ( + + user_group_id serial NOT NULL, + entity_id integer NOT NULL, + + -- Group disabled status + disabled boolean NOT NULL DEFAULT FALSE, + + PRIMARY KEY (user_group_id), + + CONSTRAINT guacamole_user_group_single_entity + UNIQUE (entity_id), + + CONSTRAINT guacamole_user_group_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) + ON DELETE CASCADE + +); + +-- +-- Table of users which are members of given user groups. +-- + +CREATE TABLE guacamole_user_group_member ( + + user_group_id integer NOT NULL, + member_entity_id integer NOT NULL, + + PRIMARY KEY (user_group_id, member_entity_id), + + -- Parent must be a user group + CONSTRAINT guacamole_user_group_member_parent + FOREIGN KEY (user_group_id) + REFERENCES guacamole_user_group (user_group_id) ON DELETE CASCADE, + + -- Member may be either a user or a user group (any entity) + CONSTRAINT guacamole_user_group_member_entity + FOREIGN KEY (member_entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +-- +-- Table of sharing profiles. Each sharing profile has a name, associated set +-- of parameters, and a primary connection. The primary connection is the +-- connection that the sharing profile shares, and the parameters dictate the +-- restrictions/features which apply to the user joining the connection via the +-- sharing profile. +-- + +CREATE TABLE guacamole_sharing_profile ( + + sharing_profile_id serial NOT NULL, + sharing_profile_name varchar(128) NOT NULL, + primary_connection_id integer NOT NULL, + + PRIMARY KEY (sharing_profile_id), + + CONSTRAINT sharing_profile_name_primary + UNIQUE (sharing_profile_name, primary_connection_id), + + CONSTRAINT guacamole_sharing_profile_ibfk_1 + FOREIGN KEY (primary_connection_id) + REFERENCES guacamole_connection (connection_id) + ON DELETE CASCADE + +); + +CREATE INDEX guacamole_sharing_profile_primary_connection_id + ON guacamole_sharing_profile(primary_connection_id); + +-- +-- Table of connection parameters. Each parameter is simply a name/value pair +-- associated with a connection. +-- + +CREATE TABLE guacamole_connection_parameter ( + + connection_id integer NOT NULL, + parameter_name varchar(128) NOT NULL, + parameter_value varchar(4096) NOT NULL, + + PRIMARY KEY (connection_id,parameter_name), + + CONSTRAINT guacamole_connection_parameter_ibfk_1 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_connection_parameter_connection_id + ON guacamole_connection_parameter(connection_id); + +-- +-- Table of sharing profile parameters. Each parameter is simply +-- name/value pair associated with a sharing profile. These parameters dictate +-- the restrictions/features which apply to the user joining the associated +-- connection via the sharing profile. +-- + +CREATE TABLE guacamole_sharing_profile_parameter ( + + sharing_profile_id integer NOT NULL, + parameter_name varchar(128) NOT NULL, + parameter_value varchar(4096) NOT NULL, + + PRIMARY KEY (sharing_profile_id, parameter_name), + + CONSTRAINT guacamole_sharing_profile_parameter_ibfk_1 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_sharing_profile_parameter_sharing_profile_id + ON guacamole_sharing_profile_parameter(sharing_profile_id); + +-- +-- Table of arbitrary user attributes. Each attribute is simply a name/value +-- pair associated with a user. Arbitrary attributes are defined by other +-- extensions. Attributes defined by this extension will be mapped to +-- properly-typed columns of a specific table. +-- + +CREATE TABLE guacamole_user_attribute ( + + user_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (user_id, attribute_name), + + CONSTRAINT guacamole_user_attribute_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_user_attribute_user_id + ON guacamole_user_attribute(user_id); + +-- +-- Table of arbitrary user group attributes. Each attribute is simply a +-- name/value pair associated with a user group. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE guacamole_user_group_attribute ( + + user_group_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (user_group_id, attribute_name), + + CONSTRAINT guacamole_user_group_attribute_ibfk_1 + FOREIGN KEY (user_group_id) + REFERENCES guacamole_user_group (user_group_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_user_group_attribute_user_group_id + ON guacamole_user_group_attribute(user_group_id); + +-- +-- Table of arbitrary connection attributes. Each attribute is simply a +-- name/value pair associated with a connection. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE guacamole_connection_attribute ( + + connection_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (connection_id, attribute_name), + + CONSTRAINT guacamole_connection_attribute_ibfk_1 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_connection_attribute_connection_id + ON guacamole_connection_attribute(connection_id); + +-- +-- Table of arbitrary connection group attributes. Each attribute is simply a +-- name/value pair associated with a connection group. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE guacamole_connection_group_attribute ( + + connection_group_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (connection_group_id, attribute_name), + + CONSTRAINT guacamole_connection_group_attribute_ibfk_1 + FOREIGN KEY (connection_group_id) + REFERENCES guacamole_connection_group (connection_group_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_connection_group_attribute_connection_group_id + ON guacamole_connection_group_attribute(connection_group_id); + +-- +-- Table of arbitrary sharing profile attributes. Each attribute is simply a +-- name/value pair associated with a sharing profile. Arbitrary attributes are +-- defined by other extensions. Attributes defined by this extension will be +-- mapped to properly-typed columns of a specific table. +-- + +CREATE TABLE guacamole_sharing_profile_attribute ( + + sharing_profile_id integer NOT NULL, + attribute_name varchar(128) NOT NULL, + attribute_value varchar(4096) NOT NULL, + + PRIMARY KEY (sharing_profile_id, attribute_name), + + CONSTRAINT guacamole_sharing_profile_attribute_ibfk_1 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_sharing_profile_attribute_sharing_profile_id + ON guacamole_sharing_profile_attribute(sharing_profile_id); + +-- +-- Table of connection permissions. Each connection permission grants a user or +-- user group specific access to a connection. +-- + +CREATE TABLE guacamole_connection_permission ( + + entity_id integer NOT NULL, + connection_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, connection_id, permission), + + CONSTRAINT guacamole_connection_permission_ibfk_1 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_connection_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_connection_permission_connection_id + ON guacamole_connection_permission(connection_id); + +CREATE INDEX guacamole_connection_permission_entity_id + ON guacamole_connection_permission(entity_id); + +-- +-- Table of connection group permissions. Each group permission grants a user +-- or user group specific access to a connection group. +-- + +CREATE TABLE guacamole_connection_group_permission ( + + entity_id integer NOT NULL, + connection_group_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, connection_group_id, permission), + + CONSTRAINT guacamole_connection_group_permission_ibfk_1 + FOREIGN KEY (connection_group_id) + REFERENCES guacamole_connection_group (connection_group_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_connection_group_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_connection_group_permission_connection_group_id + ON guacamole_connection_group_permission(connection_group_id); + +CREATE INDEX guacamole_connection_group_permission_entity_id + ON guacamole_connection_group_permission(entity_id); + +-- +-- Table of sharing profile permissions. Each sharing profile permission grants +-- a user or user group specific access to a sharing profile. +-- + +CREATE TABLE guacamole_sharing_profile_permission ( + + entity_id integer NOT NULL, + sharing_profile_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, sharing_profile_id, permission), + + CONSTRAINT guacamole_sharing_profile_permission_ibfk_1 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_sharing_profile_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_sharing_profile_permission_sharing_profile_id + ON guacamole_sharing_profile_permission(sharing_profile_id); + +CREATE INDEX guacamole_sharing_profile_permission_entity_id + ON guacamole_sharing_profile_permission(entity_id); + +-- +-- Table of system permissions. Each system permission grants a user or user +-- group a system-level privilege of some kind. +-- + +CREATE TABLE guacamole_system_permission ( + + entity_id integer NOT NULL, + permission guacamole_system_permission_type NOT NULL, + + PRIMARY KEY (entity_id, permission), + + CONSTRAINT guacamole_system_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_system_permission_entity_id + ON guacamole_system_permission(entity_id); + +-- +-- Table of user permissions. Each user permission grants a user or user group +-- access to another user (the "affected" user) for a specific type of +-- operation. +-- + +CREATE TABLE guacamole_user_permission ( + + entity_id integer NOT NULL, + affected_user_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, affected_user_id, permission), + + CONSTRAINT guacamole_user_permission_ibfk_1 + FOREIGN KEY (affected_user_id) + REFERENCES guacamole_user (user_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_user_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_user_permission_affected_user_id + ON guacamole_user_permission(affected_user_id); + +CREATE INDEX guacamole_user_permission_entity_id + ON guacamole_user_permission(entity_id); + +-- +-- Table of user group permissions. Each user group permission grants a user +-- or user group access to a another user group (the "affected" user group) for +-- a specific type of operation. +-- + +CREATE TABLE guacamole_user_group_permission ( + + entity_id integer NOT NULL, + affected_user_group_id integer NOT NULL, + permission guacamole_object_permission_type NOT NULL, + + PRIMARY KEY (entity_id, affected_user_group_id, permission), + + CONSTRAINT guacamole_user_group_permission_affected_user_group + FOREIGN KEY (affected_user_group_id) + REFERENCES guacamole_user_group (user_group_id) ON DELETE CASCADE, + + CONSTRAINT guacamole_user_group_permission_entity + FOREIGN KEY (entity_id) + REFERENCES guacamole_entity (entity_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_user_group_permission_affected_user_group_id + ON guacamole_user_group_permission(affected_user_group_id); + +CREATE INDEX guacamole_user_group_permission_entity_id + ON guacamole_user_group_permission(entity_id); + +-- +-- Table of connection history records. Each record defines a specific user's +-- session, including the connection used, the start time, and the end time +-- (if any). +-- + +CREATE TABLE guacamole_connection_history ( + + history_id serial NOT NULL, + user_id integer DEFAULT NULL, + username varchar(128) NOT NULL, + remote_host varchar(256) DEFAULT NULL, + connection_id integer DEFAULT NULL, + connection_name varchar(128) NOT NULL, + sharing_profile_id integer DEFAULT NULL, + sharing_profile_name varchar(128) DEFAULT NULL, + start_date timestamptz NOT NULL, + end_date timestamptz DEFAULT NULL, + + PRIMARY KEY (history_id), + + CONSTRAINT guacamole_connection_history_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE SET NULL, + + CONSTRAINT guacamole_connection_history_ibfk_2 + FOREIGN KEY (connection_id) + REFERENCES guacamole_connection (connection_id) ON DELETE SET NULL, + + CONSTRAINT guacamole_connection_history_ibfk_3 + FOREIGN KEY (sharing_profile_id) + REFERENCES guacamole_sharing_profile (sharing_profile_id) ON DELETE SET NULL + +); + +CREATE INDEX guacamole_connection_history_user_id + ON guacamole_connection_history(user_id); + +CREATE INDEX guacamole_connection_history_connection_id + ON guacamole_connection_history(connection_id); + +CREATE INDEX guacamole_connection_history_sharing_profile_id + ON guacamole_connection_history(sharing_profile_id); + +CREATE INDEX guacamole_connection_history_start_date + ON guacamole_connection_history(start_date); + +CREATE INDEX guacamole_connection_history_end_date + ON guacamole_connection_history(end_date); + +CREATE INDEX guacamole_connection_history_connection_id_start_date + ON guacamole_connection_history(connection_id, start_date); + +-- +-- User login/logout history +-- + +CREATE TABLE guacamole_user_history ( + + history_id serial NOT NULL, + user_id integer DEFAULT NULL, + username varchar(128) NOT NULL, + remote_host varchar(256) DEFAULT NULL, + start_date timestamptz NOT NULL, + end_date timestamptz DEFAULT NULL, + + PRIMARY KEY (history_id), + + CONSTRAINT guacamole_user_history_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE SET NULL + +); + +CREATE INDEX guacamole_user_history_user_id + ON guacamole_user_history(user_id); + +CREATE INDEX guacamole_user_history_start_date + ON guacamole_user_history(start_date); + +CREATE INDEX guacamole_user_history_end_date + ON guacamole_user_history(end_date); + +CREATE INDEX guacamole_user_history_user_id_start_date + ON guacamole_user_history(user_id, start_date); + +-- +-- User password history +-- + +CREATE TABLE guacamole_user_password_history ( + + password_history_id serial NOT NULL, + user_id integer NOT NULL, + + -- Salted password + password_hash bytea NOT NULL, + password_salt bytea, + password_date timestamptz NOT NULL, + + PRIMARY KEY (password_history_id), + + CONSTRAINT guacamole_user_password_history_ibfk_1 + FOREIGN KEY (user_id) + REFERENCES guacamole_user (user_id) ON DELETE CASCADE + +); + +CREATE INDEX guacamole_user_password_history_user_id + ON guacamole_user_password_history(user_id); + diff --git a/guacamole_test_11_26/003-create-api-schema.sql b/guacamole_test_11_26/003-create-api-schema.sql new file mode 100755 index 00000000..44b0fcf9 --- /dev/null +++ b/guacamole_test_11_26/003-create-api-schema.sql @@ -0,0 +1,135 @@ +-- +-- API Schema for Remote Access Platform +-- Isolated from Guacamole schema for security +-- + +-- Create API schema +CREATE SCHEMA IF NOT EXISTS api; + +-- User saved machines +CREATE TABLE api.user_saved_machines ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + + -- Guacamole user reference (via username, linked to guacamole_entity.name) + user_id VARCHAR(255) NOT NULL, + + -- Machine data + name VARCHAR(255) NOT NULL, + hostname VARCHAR(255) NOT NULL, + port INTEGER NOT NULL, + protocol VARCHAR(50) NOT NULL, -- rdp, ssh, vnc, telnet + os VARCHAR(255), -- OS (e.g., Windows Server 2019, Ubuntu 22.04) + + -- Optional credentials (passwords NOT stored, provided per-connection via HTTPS) + username VARCHAR(255), + + -- Metadata + description TEXT, + tags TEXT[], -- Tag array for grouping + is_favorite BOOLEAN DEFAULT FALSE, + + -- Timestamps + created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + last_connected_at TIMESTAMP WITH TIME ZONE, + + -- Integrity constraints + CONSTRAINT valid_protocol CHECK (protocol IN ('rdp', 'ssh', 'vnc', 'telnet')), + CONSTRAINT valid_port CHECK (port > 0 AND port < 65536), + CONSTRAINT valid_hostname CHECK (char_length(hostname) > 0), + CONSTRAINT valid_name CHECK (char_length(name) > 0) +); + +-- Search indexes +CREATE INDEX idx_api_user_machines_user_id ON api.user_saved_machines(user_id); +CREATE INDEX idx_api_user_machines_protocol ON api.user_saved_machines(protocol); +CREATE INDEX idx_api_user_machines_tags ON api.user_saved_machines USING GIN(tags); +CREATE INDEX idx_api_user_machines_favorite ON api.user_saved_machines(is_favorite) WHERE is_favorite = TRUE; +CREATE INDEX idx_api_user_machines_created ON api.user_saved_machines(created_at DESC); + +-- Auto-update updated_at function +CREATE OR REPLACE FUNCTION api.update_modified_column() +RETURNS TRIGGER AS $$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Auto-update updated_at trigger +CREATE TRIGGER update_user_saved_machines_modtime + BEFORE UPDATE ON api.user_saved_machines + FOR EACH ROW + EXECUTE FUNCTION api.update_modified_column(); + +-- Connection history (statistics and audit) +CREATE TABLE api.connection_history ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + user_id VARCHAR(255) NOT NULL, + machine_id UUID REFERENCES api.user_saved_machines(id) ON DELETE CASCADE, + + -- Connection data + connected_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), + disconnected_at TIMESTAMP WITH TIME ZONE, + duration_seconds INTEGER, + + -- Status + success BOOLEAN DEFAULT TRUE, + error_message TEXT, + + -- Metadata + client_ip VARCHAR(45), -- IPv4/IPv6 + user_agent TEXT +); + +-- History indexes +CREATE INDEX idx_api_connection_history_user_id ON api.connection_history(user_id); +CREATE INDEX idx_api_connection_history_machine_id ON api.connection_history(machine_id); +CREATE INDEX idx_api_connection_history_connected_at ON api.connection_history(connected_at DESC); + +-- User statistics view +CREATE OR REPLACE VIEW api.user_machine_stats AS +SELECT + m.user_id, + m.id as machine_id, + m.name, + m.hostname, + m.protocol, + COUNT(h.id) as total_connections, + MAX(h.connected_at) as last_connection, + AVG(h.duration_seconds) as avg_duration_seconds, + SUM(CASE WHEN h.success = TRUE THEN 1 ELSE 0 END) as successful_connections, + SUM(CASE WHEN h.success = FALSE THEN 1 ELSE 0 END) as failed_connections +FROM + api.user_saved_machines m +LEFT JOIN + api.connection_history h ON m.id = h.machine_id +GROUP BY + m.user_id, m.id, m.name, m.hostname, m.protocol; + +-- Documentation comments +COMMENT ON SCHEMA api IS 'API-specific tables, isolated from Guacamole schema'; +COMMENT ON TABLE api.user_saved_machines IS 'User-saved machines for quick access. Passwords are NOT stored - provided per-connection via HTTPS.'; +COMMENT ON TABLE api.connection_history IS 'Audit log of all connections to saved machines'; +COMMENT ON COLUMN api.user_saved_machines.tags IS 'Array of tags for categorization (e.g., ["production", "web-servers"])'; +COMMENT ON VIEW api.user_machine_stats IS 'Aggregated statistics per machine per user'; + +-- Application grants (if using separate user) +-- GRANT USAGE ON SCHEMA api TO guacamole_user; +-- GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA api TO guacamole_user; +-- GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA api TO guacamole_user; +-- GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA api TO guacamole_user; + +-- Test data (remove in production) +-- INSERT INTO api.user_saved_machines (user_id, name, hostname, port, protocol, os, username, description, tags) VALUES +-- ('guacadmin', 'Test Windows Server', '192.168.1.100', 3389, 'rdp', 'Windows Server 2019', 'Administrator', 'Windows test machine', ARRAY['test', 'windows']), +-- ('guacadmin', 'Test Linux Server', '192.168.1.101', 22, 'ssh', 'Ubuntu 22.04 LTS', 'root', 'Ubuntu server for testing', ARRAY['test', 'linux']); + +-- Migration completion +DO $$ +BEGIN + RAISE NOTICE 'API schema created successfully'; + RAISE NOTICE 'Tables: user_saved_machines, connection_history'; + RAISE NOTICE 'View: user_machine_stats'; +END $$; + diff --git a/guacamole_test_11_26/api/Dockerfile b/guacamole_test_11_26/api/Dockerfile new file mode 100755 index 00000000..dd2df03e --- /dev/null +++ b/guacamole_test_11_26/api/Dockerfile @@ -0,0 +1,18 @@ +FROM python:3.11-slim + +WORKDIR /app + +# Копируем файл зависимостей +COPY requirements.txt . + +# Устанавливаем зависимости +RUN pip install --no-cache-dir -r requirements.txt + +# Копируем код приложения +COPY . . + +# Открываем порт +EXPOSE 8000 + +# Запускаем приложение +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] \ No newline at end of file diff --git a/guacamole_test_11_26/api/core/__init__.py b/guacamole_test_11_26/api/core/__init__.py new file mode 100755 index 00000000..204b6632 --- /dev/null +++ b/guacamole_test_11_26/api/core/__init__.py @@ -0,0 +1,35 @@ +""" +Core module for Remote Access API. + +Provides: +- Authentication and authorization (JWT, Guacamole integration) +- Security features (CSRF, rate limiting, brute force protection) +- Storage and session management (Redis, PostgreSQL) +- Audit logging and WebSocket notifications +- Role and permission system +""" + +from .guacamole_auth import GuacamoleAuthenticator +from .models import ( + ConnectionRequest, + ConnectionResponse, + LoginRequest, + LoginResponse, + UserInfo, + UserRole, +) +from .permissions import PermissionChecker +from .utils import create_jwt_token, verify_jwt_token + +__all__ = [ + "ConnectionRequest", + "ConnectionResponse", + "GuacamoleAuthenticator", + "LoginRequest", + "LoginResponse", + "PermissionChecker", + "UserInfo", + "UserRole", + "create_jwt_token", + "verify_jwt_token", +] \ No newline at end of file diff --git a/guacamole_test_11_26/api/core/audit_logger.py b/guacamole_test_11_26/api/core/audit_logger.py new file mode 100755 index 00000000..66c4088b --- /dev/null +++ b/guacamole_test_11_26/api/core/audit_logger.py @@ -0,0 +1,380 @@ +""" +Immutable audit logging with HMAC signatures. +""" + +import hashlib +import hmac +import json +import os +from collections import Counter +from datetime import datetime, timezone +from pathlib import Path +from typing import Any, Dict, Optional, Union + +import structlog + +logger = structlog.get_logger(__name__) + + +class ImmutableAuditLogger: + """Immutable audit logger with HMAC signatures to prevent log tampering.""" + + def __init__(self) -> None: + """Initialize the immutable audit logger.""" + self.hmac_secret = os.getenv( + "AUDIT_HMAC_SECRET", "default_audit_secret_change_me" + ) + log_path_str = os.getenv( + "AUDIT_LOG_PATH", "/var/log/remote_access_audit.log" + ) + self.audit_log_path = Path(log_path_str) + self.audit_log_path.parent.mkdir(parents=True, exist_ok=True) + self.audit_logger = structlog.get_logger("audit") + + logger.info( + "Immutable audit logger initialized", + audit_log_path=str(self.audit_log_path), + ) + + def _generate_hmac_signature(self, data: str) -> str: + """ + Generate HMAC signature for data. + + Args: + data: Data to sign. + + Returns: + HMAC signature in hex format. + """ + return hmac.new( + self.hmac_secret.encode("utf-8"), + data.encode("utf-8"), + hashlib.sha256, + ).hexdigest() + + def _verify_hmac_signature(self, data: str, signature: str) -> bool: + """ + Verify HMAC signature. + + Args: + data: Data to verify. + signature: Signature to verify. + + Returns: + True if signature is valid. + """ + expected_signature = self._generate_hmac_signature(data) + return hmac.compare_digest(expected_signature, signature) + + def log_security_event( + self, + event_type: str, + client_ip: str, + user_agent: Optional[str] = None, + details: Optional[Dict[str, Any]] = None, + severity: str = "info", + username: Optional[str] = None, + ) -> bool: + """ + Log security event with immutable record. + + Args: + event_type: Event type. + client_ip: Client IP address. + user_agent: Client user agent. + details: Additional details. + severity: Severity level. + username: Username if applicable. + + Returns: + True if logging succeeded. + """ + try: + event_data = { + "event_type": "security_event", + "security_event_type": event_type, + "timestamp": datetime.now(timezone.utc).isoformat(), + "client_ip": client_ip, + "user_agent": user_agent or "unknown", + "severity": severity, + "username": username, + "details": details or {}, + } + return self._write_immutable_log(event_data) + except Exception as e: + logger.error("Failed to log security event", error=str(e)) + return False + + def log_audit_event( + self, + action: str, + resource: str, + client_ip: str, + user_agent: Optional[str] = None, + result: str = "success", + details: Optional[Dict[str, Any]] = None, + username: Optional[str] = None, + ) -> bool: + """ + Log audit event with immutable record. + + Args: + action: Action performed. + resource: Resource affected. + client_ip: Client IP address. + user_agent: Client user agent. + result: Action result. + details: Additional details. + username: Username. + + Returns: + True if logging succeeded. + """ + try: + event_data = { + "event_type": "audit_event", + "action": action, + "resource": resource, + "timestamp": datetime.now(timezone.utc).isoformat(), + "client_ip": client_ip, + "user_agent": user_agent or "unknown", + "result": result, + "username": username, + "details": details or {}, + } + return self._write_immutable_log(event_data) + except Exception as e: + logger.error("Failed to log audit event", error=str(e)) + return False + + def log_authentication_event( + self, + event_type: str, + username: str, + client_ip: str, + success: bool, + details: Optional[Dict[str, Any]] = None, + ) -> bool: + """ + Log authentication event. + + Args: + event_type: Event type (login, logout, failed_login, etc.). + username: Username. + client_ip: Client IP address. + success: Operation success status. + details: Additional details. + + Returns: + True if logging succeeded. + """ + try: + event_data = { + "event_type": "authentication_event", + "auth_event_type": event_type, + "timestamp": datetime.now(timezone.utc).isoformat(), + "username": username, + "client_ip": client_ip, + "success": success, + "details": details or {}, + } + return self._write_immutable_log(event_data) + except Exception as e: + logger.error("Failed to log authentication event", error=str(e)) + return False + + def log_connection_event( + self, + event_type: str, + connection_id: str, + username: str, + client_ip: str, + details: Optional[Dict[str, Any]] = None, + ) -> bool: + """ + Log connection event. + + Args: + event_type: Event type (created, deleted, expired, etc.). + connection_id: Connection ID. + username: Username. + client_ip: Client IP address. + details: Additional details. + + Returns: + True if logging succeeded. + """ + try: + event_data = { + "event_type": "connection_event", + "connection_event_type": event_type, + "timestamp": datetime.now(timezone.utc).isoformat(), + "connection_id": connection_id, + "username": username, + "client_ip": client_ip, + "details": details or {}, + } + return self._write_immutable_log(event_data) + except Exception as e: + logger.error("Failed to log connection event", error=str(e)) + return False + + def _write_immutable_log(self, event_data: Dict[str, Any]) -> bool: + """ + Write immutable log entry with HMAC signature. + + Args: + event_data: Event data. + + Returns: + True if write succeeded. + """ + try: + json_data = json.dumps(event_data, ensure_ascii=False, sort_keys=True) + signature = self._generate_hmac_signature(json_data) + log_entry = { + "data": event_data, + "signature": signature, + "log_timestamp": datetime.now(timezone.utc).isoformat(), + } + + with self.audit_log_path.open("a", encoding="utf-8") as f: + f.write(json.dumps(log_entry, ensure_ascii=False) + "\n") + f.flush() + + self.audit_logger.info( + "Audit event logged", + event_type=event_data.get("event_type"), + signature=signature[:16] + "...", + ) + return True + except Exception as e: + logger.error("Failed to write immutable log", error=str(e)) + return False + + def verify_log_integrity( + self, log_file_path: Optional[Union[str, Path]] = None + ) -> Dict[str, Any]: + """ + Verify audit log integrity. + + Args: + log_file_path: Path to log file (defaults to main log file). + + Returns: + Integrity verification result. + """ + try: + file_path = ( + Path(log_file_path) if log_file_path else self.audit_log_path + ) + + if not file_path.exists(): + return { + "status": "error", + "message": "Log file does not exist", + "file_path": str(file_path), + } + + valid_entries = 0 + invalid_entries = 0 + total_entries = 0 + + with file_path.open("r", encoding="utf-8") as f: + for line in f: + if not line.strip(): + continue + + total_entries += 1 + + try: + log_entry = json.loads(line) + + if "data" not in log_entry or "signature" not in log_entry: + invalid_entries += 1 + continue + + json_data = json.dumps( + log_entry["data"], ensure_ascii=False, sort_keys=True + ) + + if self._verify_hmac_signature( + json_data, log_entry["signature"] + ): + valid_entries += 1 + else: + invalid_entries += 1 + except (json.JSONDecodeError, KeyError, ValueError): + invalid_entries += 1 + + return { + "status": "success", + "file_path": str(file_path), + "total_entries": total_entries, + "valid_entries": valid_entries, + "invalid_entries": invalid_entries, + "integrity_percentage": ( + (valid_entries / total_entries * 100) if total_entries > 0 else 0 + ), + } + except Exception as e: + logger.error("Failed to verify log integrity", error=str(e)) + return { + "status": "error", + "message": str(e), + "file_path": str(log_file_path or self.audit_log_path), + } + + def get_audit_stats(self) -> Dict[str, Any]: + """ + Get audit log statistics. + + Returns: + Audit log statistics. + """ + try: + if not self.audit_log_path.exists(): + return { + "status": "no_log_file", + "file_path": str(self.audit_log_path), + } + + file_size = self.audit_log_path.stat().st_size + event_types: Counter[str] = Counter() + total_entries = 0 + + with self.audit_log_path.open("r", encoding="utf-8") as f: + for line in f: + if not line.strip(): + continue + + try: + log_entry = json.loads(line) + if ( + "data" in log_entry + and "event_type" in log_entry["data"] + ): + event_type = log_entry["data"]["event_type"] + event_types[event_type] += 1 + total_entries += 1 + except (json.JSONDecodeError, KeyError): + continue + + return { + "status": "success", + "file_path": str(self.audit_log_path), + "file_size_bytes": file_size, + "total_entries": total_entries, + "event_types": dict(event_types), + "hmac_secret_configured": bool( + self.hmac_secret + and self.hmac_secret != "default_audit_secret_change_me" + ), + } + except Exception as e: + logger.error("Failed to get audit stats", error=str(e)) + return {"status": "error", "message": str(e)} + + +# Global instance for use in API +immutable_audit_logger = ImmutableAuditLogger() diff --git a/guacamole_test_11_26/api/core/brute_force_protection.py b/guacamole_test_11_26/api/core/brute_force_protection.py new file mode 100755 index 00000000..69cfc0cb --- /dev/null +++ b/guacamole_test_11_26/api/core/brute_force_protection.py @@ -0,0 +1,327 @@ +"""Brute-force protection for login endpoint.""" + +from typing import Any, Dict, Tuple + +import structlog + +from .rate_limiter import redis_rate_limiter + +logger = structlog.get_logger(__name__) + +# Backoff constants +MAX_BACKOFF_SECONDS = 300 +MIN_FAILED_ATTEMPTS_FOR_BACKOFF = 2 +EXPONENTIAL_BACKOFF_BASE = 2 + +# Default limits +DEFAULT_MAX_LOGIN_ATTEMPTS_PER_IP = 5 +DEFAULT_MAX_LOGIN_ATTEMPTS_PER_USER = 10 +DEFAULT_LOGIN_WINDOW_MINUTES = 15 +DEFAULT_USER_LOCKOUT_MINUTES = 60 + +# Block types +BLOCK_TYPE_RATE_LIMIT = "rate_limit" +BLOCK_TYPE_IP_BLOCKED = "ip_blocked" +BLOCK_TYPE_USER_LOCKED = "user_locked" +BLOCK_TYPE_EXPONENTIAL_BACKOFF = "exponential_backoff" +BLOCK_TYPE_ALLOWED = "allowed" +BLOCK_TYPE_ERROR_FALLBACK = "error_fallback" + +# Response messages +MSG_RATE_LIMIT_EXCEEDED = "Rate limit exceeded" +MSG_IP_BLOCKED = "Too many failed attempts from this IP" +MSG_USER_LOCKED = "User account temporarily locked" +MSG_LOGIN_ALLOWED = "Login allowed" +MSG_LOGIN_ALLOWED_ERROR = "Login allowed (protection error)" + +# Default failure reason +DEFAULT_FAILURE_REASON = "invalid_credentials" + +# Empty string for clearing +EMPTY_USERNAME = "" +EMPTY_IP = "" + + +class BruteForceProtection: + """Protection against brute-force attacks on login endpoint.""" + + def __init__(self) -> None: + """Initialize brute-force protection.""" + self.max_login_attempts_per_ip = DEFAULT_MAX_LOGIN_ATTEMPTS_PER_IP + self.max_login_attempts_per_user = DEFAULT_MAX_LOGIN_ATTEMPTS_PER_USER + self.login_window_minutes = DEFAULT_LOGIN_WINDOW_MINUTES + self.user_lockout_minutes = DEFAULT_USER_LOCKOUT_MINUTES + self.exponential_backoff_base = EXPONENTIAL_BACKOFF_BASE + + def check_login_allowed( + self, client_ip: str, username: str + ) -> Tuple[bool, str, Dict[str, Any]]: + """ + Check if login is allowed for given IP and user. + + Args: + client_ip: Client IP address. + username: Username. + + Returns: + Tuple of (allowed: bool, reason: str, details: Dict[str, Any]). + """ + try: + allowed, headers = redis_rate_limiter.check_login_rate_limit( + client_ip, username + ) + + if not allowed: + return ( + False, + MSG_RATE_LIMIT_EXCEEDED, + { + "type": BLOCK_TYPE_RATE_LIMIT, + "client_ip": client_ip, + "username": username, + "headers": headers, + }, + ) + + failed_counts = redis_rate_limiter.get_failed_login_count( + client_ip, username, self.user_lockout_minutes + ) + + if failed_counts["ip_failed_count"] >= self.max_login_attempts_per_ip: + return ( + False, + MSG_IP_BLOCKED, + { + "type": BLOCK_TYPE_IP_BLOCKED, + "client_ip": client_ip, + "failed_count": failed_counts["ip_failed_count"], + "max_attempts": self.max_login_attempts_per_ip, + "window_minutes": self.login_window_minutes, + }, + ) + + if failed_counts["user_failed_count"] >= self.max_login_attempts_per_user: + return ( + False, + MSG_USER_LOCKED, + { + "type": BLOCK_TYPE_USER_LOCKED, + "username": username, + "failed_count": failed_counts["user_failed_count"], + "max_attempts": self.max_login_attempts_per_user, + "lockout_minutes": self.user_lockout_minutes, + }, + ) + + backoff_seconds = self._calculate_backoff_time( + client_ip, username, failed_counts + ) + if backoff_seconds > 0: + return ( + False, + f"Please wait {backoff_seconds} seconds before next attempt", + { + "type": BLOCK_TYPE_EXPONENTIAL_BACKOFF, + "wait_seconds": backoff_seconds, + "client_ip": client_ip, + "username": username, + }, + ) + + return ( + True, + MSG_LOGIN_ALLOWED, + { + "type": BLOCK_TYPE_ALLOWED, + "client_ip": client_ip, + "username": username, + "failed_counts": failed_counts, + }, + ) + + except Exception as e: + logger.error( + "Error checking login permission", + client_ip=client_ip, + username=username, + error=str(e), + ) + return ( + True, + MSG_LOGIN_ALLOWED_ERROR, + {"type": BLOCK_TYPE_ERROR_FALLBACK, "error": str(e)}, + ) + + def record_failed_login( + self, + client_ip: str, + username: str, + failure_reason: str = DEFAULT_FAILURE_REASON, + ) -> None: + """ + Record failed login attempt. + + Args: + client_ip: Client IP address. + username: Username. + failure_reason: Failure reason. + """ + try: + redis_rate_limiter.record_failed_login(client_ip, username) + + logger.warning( + "Failed login attempt recorded", + client_ip=client_ip, + username=username, + failure_reason=failure_reason, + ) + + except Exception as e: + logger.error( + "Failed to record failed login attempt", + client_ip=client_ip, + username=username, + error=str(e), + ) + + def record_successful_login(self, client_ip: str, username: str) -> None: + """ + Record successful login (clear failed attempts). + + Args: + client_ip: Client IP address. + username: Username. + """ + try: + redis_rate_limiter.clear_failed_logins(client_ip, username) + + logger.info( + "Successful login recorded, failed attempts cleared", + client_ip=client_ip, + username=username, + ) + + except Exception as e: + logger.error( + "Failed to record successful login", + client_ip=client_ip, + username=username, + error=str(e), + ) + + def _calculate_backoff_time( + self, client_ip: str, username: str, failed_counts: Dict[str, int] + ) -> int: + """ + Calculate wait time for exponential backoff. + + Args: + client_ip: Client IP address. + username: Username. + failed_counts: Failed attempt counts. + + Returns: + Wait time in seconds. + """ + try: + max_failed = max( + failed_counts["ip_failed_count"], + failed_counts["user_failed_count"], + ) + + if max_failed <= MIN_FAILED_ATTEMPTS_FOR_BACKOFF: + return 0 + + backoff_seconds = min( + self.exponential_backoff_base + ** (max_failed - MIN_FAILED_ATTEMPTS_FOR_BACKOFF), + MAX_BACKOFF_SECONDS, + ) + + return backoff_seconds + + except Exception as e: + logger.error("Error calculating backoff time", error=str(e)) + return 0 + + def get_protection_stats(self) -> Dict[str, Any]: + """ + Get brute-force protection statistics. + + Returns: + Protection statistics dictionary. + """ + try: + rate_limit_stats = redis_rate_limiter.get_rate_limit_stats() + + return { + "max_login_attempts_per_ip": self.max_login_attempts_per_ip, + "max_login_attempts_per_user": self.max_login_attempts_per_user, + "login_window_minutes": self.login_window_minutes, + "user_lockout_minutes": self.user_lockout_minutes, + "exponential_backoff_base": self.exponential_backoff_base, + "rate_limit_stats": rate_limit_stats, + } + + except Exception as e: + logger.error("Failed to get protection stats", error=str(e)) + return {"error": str(e)} + + def force_unlock_user(self, username: str, unlocked_by: str) -> bool: + """ + Force unlock user (for administrators). + + Args: + username: Username to unlock. + unlocked_by: Who unlocked the user. + + Returns: + True if unlock successful. + """ + try: + redis_rate_limiter.clear_failed_logins(EMPTY_IP, username) + + logger.info("User force unlocked", username=username, unlocked_by=unlocked_by) + + return True + + except Exception as e: + logger.error( + "Failed to force unlock user", + username=username, + unlocked_by=unlocked_by, + error=str(e), + ) + return False + + def force_unlock_ip(self, client_ip: str, unlocked_by: str) -> bool: + """ + Force unlock IP (for administrators). + + Args: + client_ip: IP address to unlock. + unlocked_by: Who unlocked the IP. + + Returns: + True if unlock successful. + """ + try: + redis_rate_limiter.clear_failed_logins(client_ip, EMPTY_USERNAME) + + logger.info( + "IP force unlocked", client_ip=client_ip, unlocked_by=unlocked_by + ) + + return True + + except Exception as e: + logger.error( + "Failed to force unlock IP", + client_ip=client_ip, + unlocked_by=unlocked_by, + error=str(e), + ) + return False + + +brute_force_protection = BruteForceProtection() diff --git a/guacamole_test_11_26/api/core/csrf_protection.py b/guacamole_test_11_26/api/core/csrf_protection.py new file mode 100755 index 00000000..2281d5ba --- /dev/null +++ b/guacamole_test_11_26/api/core/csrf_protection.py @@ -0,0 +1,361 @@ +"""CSRF protection using Double Submit Cookie pattern.""" + +import hashlib +import json +import os +import secrets +import time +from datetime import datetime, timedelta +from typing import Any, Dict, FrozenSet + +import redis +import structlog + +logger = structlog.get_logger(__name__) + +# Redis configuration +REDIS_DEFAULT_HOST = "redis" +REDIS_DEFAULT_PORT = "6379" +REDIS_DEFAULT_DB = 0 + +# Token configuration +REDIS_KEY_PREFIX = "csrf:token:" +CSRF_TOKEN_TTL_SECONDS = 3600 +TOKEN_SIZE_BYTES = 32 +SECRET_KEY_SIZE_BYTES = 32 +TOKEN_PARTS_COUNT = 3 +TOKEN_PREVIEW_LENGTH = 16 +SCAN_BATCH_SIZE = 100 + +# Redis TTL special values +TTL_KEY_NOT_EXISTS = -2 +TTL_KEY_NO_EXPIRY = -1 + +# Protected HTTP methods +PROTECTED_METHODS: FrozenSet[str] = frozenset({"POST", "PUT", "DELETE", "PATCH"}) + +# Excluded endpoints (no CSRF protection) +EXCLUDED_ENDPOINTS: FrozenSet[str] = frozenset({ + "/auth/login", + "/health", + "/health/detailed", + "/health/ready", + "/health/live", + "/health/routing", + "/metrics", + "/docs", + "/openapi.json", +}) + + +class CSRFProtection: + """CSRF protection with Double Submit Cookie pattern.""" + + def __init__(self) -> None: + """ + Initialize CSRF protection. + + Raises: + RuntimeError: If Redis connection fails. + """ + self._redis_client = redis.Redis( + host=os.getenv("REDIS_HOST", REDIS_DEFAULT_HOST), + port=int(os.getenv("REDIS_PORT", REDIS_DEFAULT_PORT)), + password=os.getenv("REDIS_PASSWORD"), + db=REDIS_DEFAULT_DB, + decode_responses=True, + ) + + self._csrf_token_ttl = CSRF_TOKEN_TTL_SECONDS + self._token_size = TOKEN_SIZE_BYTES + self._secret_key = secrets.token_bytes(SECRET_KEY_SIZE_BYTES) + + try: + self._redis_client.ping() + logger.info("CSRF Protection connected to Redis successfully") + except Exception as e: + logger.error("Failed to connect to Redis for CSRF", error=str(e)) + raise RuntimeError(f"Redis connection failed: {e}") from e + + self._protected_methods: FrozenSet[str] = PROTECTED_METHODS + self._excluded_endpoints: FrozenSet[str] = EXCLUDED_ENDPOINTS + + def generate_csrf_token(self, user_id: str) -> str: + """ + Generate CSRF token for user. + + Args: + user_id: User ID. + + Returns: + CSRF token. + """ + try: + random_bytes = secrets.token_bytes(self._token_size) + + timestamp = str(int(time.time())) + data_to_sign = f"{user_id}:{timestamp}:{random_bytes.hex()}" + signature = hashlib.sha256( + f"{data_to_sign}:{self._secret_key.hex()}".encode() + ).hexdigest() + + csrf_token = f"{random_bytes.hex()}:{timestamp}:{signature}" + + now = datetime.now() + expires_at = now + timedelta(seconds=self._csrf_token_ttl) + token_data = { + "user_id": user_id, + "created_at": now.isoformat(), + "expires_at": expires_at.isoformat(), + "used": False, + } + + redis_key = f"{REDIS_KEY_PREFIX}{csrf_token}" + self._redis_client.setex( + redis_key, self._csrf_token_ttl, json.dumps(token_data) + ) + + logger.debug( + "CSRF token generated in Redis", + user_id=user_id, + token_preview=csrf_token[:TOKEN_PREVIEW_LENGTH] + "...", + expires_at=expires_at.isoformat(), + ) + + return csrf_token + + except Exception as e: + logger.error("Failed to generate CSRF token", user_id=user_id, error=str(e)) + raise + + def validate_csrf_token(self, token: str, user_id: str) -> bool: + """ + Validate CSRF token. + + Args: + token: CSRF token. + user_id: User ID. + + Returns: + True if token is valid. + """ + try: + if not token or not user_id: + return False + + redis_key = f"{REDIS_KEY_PREFIX}{token}" + token_json = self._redis_client.get(redis_key) + + if not token_json: + logger.warning( + "CSRF token not found in Redis", + token_preview=token[:TOKEN_PREVIEW_LENGTH] + "...", + user_id=user_id, + ) + return False + + token_data = json.loads(token_json) + + expires_at = datetime.fromisoformat(token_data["expires_at"]) + if datetime.now() > expires_at: + logger.warning( + "CSRF token expired", + token_preview=token[:TOKEN_PREVIEW_LENGTH] + "...", + user_id=user_id, + ) + self._redis_client.delete(redis_key) + return False + + if token_data["user_id"] != user_id: + logger.warning( + "CSRF token user mismatch", + token_preview=token[:TOKEN_PREVIEW_LENGTH] + "...", + expected_user=user_id, + actual_user=token_data["user_id"], + ) + return False + + if not self._verify_token_signature(token, user_id): + logger.warning( + "CSRF token signature invalid", + token_preview=token[:TOKEN_PREVIEW_LENGTH] + "...", + user_id=user_id, + ) + self._redis_client.delete(redis_key) + return False + + token_data["used"] = True + ttl = self._redis_client.ttl(redis_key) + if ttl > 0: + self._redis_client.setex(redis_key, ttl, json.dumps(token_data)) + + logger.debug( + "CSRF token validated successfully", + token_preview=token[:TOKEN_PREVIEW_LENGTH] + "...", + user_id=user_id, + ) + + return True + + except Exception as e: + logger.error( + "Error validating CSRF token", + token_preview=token[:TOKEN_PREVIEW_LENGTH] + "..." if token else "none", + user_id=user_id, + error=str(e), + ) + return False + + def _verify_token_signature(self, token: str, user_id: str) -> bool: + """ + Verify token signature. + + Args: + token: CSRF token. + user_id: User ID. + + Returns: + True if signature is valid. + """ + try: + parts = token.split(":") + if len(parts) != TOKEN_PARTS_COUNT: + return False + + random_hex, timestamp, signature = parts + + data_to_sign = f"{user_id}:{timestamp}:{random_hex}" + expected_signature = hashlib.sha256( + f"{data_to_sign}:{self._secret_key.hex()}".encode() + ).hexdigest() + + return signature == expected_signature + + except Exception: + return False + + def should_protect_endpoint(self, method: str, path: str) -> bool: + """ + Check if endpoint needs CSRF protection. + + Args: + method: HTTP method. + path: Endpoint path. + + Returns: + True if CSRF protection is needed. + """ + if method not in self._protected_methods: + return False + + if path in self._excluded_endpoints: + return False + + for excluded_path in self._excluded_endpoints: + if path.startswith(excluded_path): + return False + + return True + + def cleanup_expired_tokens(self) -> None: + """ + Clean up expired CSRF tokens from Redis. + + Note: Redis automatically removes keys with expired TTL. + """ + try: + pattern = f"{REDIS_KEY_PREFIX}*" + keys = list(self._redis_client.scan_iter(match=pattern, count=SCAN_BATCH_SIZE)) + + cleaned_count = 0 + for key in keys: + ttl = self._redis_client.ttl(key) + if ttl == TTL_KEY_NOT_EXISTS: + cleaned_count += 1 + elif ttl == TTL_KEY_NO_EXPIRY: + self._redis_client.delete(key) + cleaned_count += 1 + + if cleaned_count > 0: + logger.info( + "CSRF tokens cleanup completed", + cleaned_count=cleaned_count, + remaining_count=len(keys) - cleaned_count, + ) + except Exception as e: + logger.error("Failed to cleanup expired CSRF tokens", error=str(e)) + + def get_csrf_stats(self) -> Dict[str, Any]: + """ + Get CSRF token statistics from Redis. + + Returns: + Dictionary with CSRF statistics. + """ + try: + pattern = f"{REDIS_KEY_PREFIX}*" + keys = list(self._redis_client.scan_iter(match=pattern, count=SCAN_BATCH_SIZE)) + + active_tokens = 0 + used_tokens = 0 + + for key in keys: + try: + token_json = self._redis_client.get(key) + if token_json: + token_data = json.loads(token_json) + active_tokens += 1 + if token_data.get("used", False): + used_tokens += 1 + except Exception: + continue + + return { + "total_tokens": len(keys), + "active_tokens": active_tokens, + "used_tokens": used_tokens, + "token_ttl_seconds": self._csrf_token_ttl, + "protected_methods": sorted(self._protected_methods), + "excluded_endpoints": sorted(self._excluded_endpoints), + "storage": "Redis", + } + except Exception as e: + logger.error("Failed to get CSRF stats", error=str(e)) + return {"error": str(e), "storage": "Redis"} + + def revoke_user_tokens(self, user_id: str) -> None: + """ + Revoke all user tokens from Redis. + + Args: + user_id: User ID. + """ + try: + pattern = f"{REDIS_KEY_PREFIX}*" + keys = list(self._redis_client.scan_iter(match=pattern, count=SCAN_BATCH_SIZE)) + + revoked_count = 0 + for key in keys: + try: + token_json = self._redis_client.get(key) + if token_json: + token_data = json.loads(token_json) + if token_data.get("user_id") == user_id: + self._redis_client.delete(key) + revoked_count += 1 + except Exception: + continue + + if revoked_count > 0: + logger.info( + "Revoked user CSRF tokens from Redis", + user_id=user_id, + count=revoked_count, + ) + except Exception as e: + logger.error( + "Failed to revoke user CSRF tokens", user_id=user_id, error=str(e) + ) + + +csrf_protection = CSRFProtection() diff --git a/guacamole_test_11_26/api/core/guacamole_auth.py b/guacamole_test_11_26/api/core/guacamole_auth.py new file mode 100755 index 00000000..ff4bfc9c --- /dev/null +++ b/guacamole_test_11_26/api/core/guacamole_auth.py @@ -0,0 +1,485 @@ +"""Integration with Guacamole API for authentication and user management.""" + +import os +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional + +import requests +import structlog + +from .models import UserRole +from .permissions import PermissionChecker +from .session_storage import session_storage +from .utils import create_jwt_token + +logger = structlog.get_logger(__name__) + + +class GuacamoleAuthenticator: + """Class for authentication via Guacamole API.""" + + def __init__(self) -> None: + """ + Initialize Guacamole authenticator. + + Raises: + ValueError: If system credentials are not set in environment variables. + """ + self.base_url = os.getenv("GUACAMOLE_URL", "http://guacamole:8080") + self.session = requests.Session() + + self._system_token: Optional[str] = None + self._system_token_expires: Optional[datetime] = None + + self._system_username = os.getenv("SYSTEM_ADMIN_USERNAME") + self._system_password = os.getenv("SYSTEM_ADMIN_PASSWORD") + + if not self._system_username or not self._system_password: + raise ValueError( + "SYSTEM_ADMIN_USERNAME and SYSTEM_ADMIN_PASSWORD environment " + "variables are required. Set these in your .env or " + "production.env file for security. Never use default " + "credentials in production!" + ) + + def get_system_token(self) -> str: + """ + Get system user token for administrative operations. + + Returns: + System user token. + + Raises: + RuntimeError: If system user authentication fails. + """ + if ( + self._system_token is None + or self._system_token_expires is None + or self._system_token_expires <= datetime.now() + ): + logger.debug("Refreshing system token", username=self._system_username) + + auth_url = f"{self.base_url}/guacamole/api/tokens" + auth_data = { + "username": self._system_username, + "password": self._system_password, + } + + try: + response = self.session.post(auth_url, data=auth_data, timeout=10) + response.raise_for_status() + + auth_result = response.json() + self._system_token = auth_result.get("authToken") + + if not self._system_token: + raise RuntimeError("No authToken in response") + + self._system_token_expires = datetime.now() + timedelta(hours=7) + + logger.info( + "System token refreshed successfully", + username=self._system_username, + expires_at=self._system_token_expires.isoformat(), + ) + + except requests.exceptions.RequestException as e: + logger.error( + "Failed to authenticate system user", + username=self._system_username, + error=str(e), + ) + raise RuntimeError( + f"Failed to authenticate system user: {e}" + ) from e + except Exception as e: + logger.error( + "Unexpected error during system authentication", + username=self._system_username, + error=str(e), + ) + raise + + return self._system_token + + def authenticate_user( + self, username: str, password: str + ) -> Optional[Dict[str, Any]]: + """ + Authenticate user via Guacamole API. + + Args: + username: Username in Guacamole. + password: Password in Guacamole. + + Returns: + Dictionary with user information or None if authentication fails. + """ + auth_url = f"{self.base_url}/guacamole/api/tokens" + auth_data = {"username": username, "password": password} + + try: + logger.debug("Attempting user authentication", username=username) + + response = self.session.post(auth_url, data=auth_data, timeout=10) + + if response.status_code != 200: + logger.info( + "Authentication failed", + username=username, + status_code=response.status_code, + response=response.text[:200], + ) + return None + + auth_result = response.json() + auth_token = auth_result.get("authToken") + + if not auth_token: + logger.warning( + "No authToken in successful response", + username=username, + response=auth_result, + ) + return None + + user_info = self.get_user_info(auth_token) + if not user_info: + logger.warning( + "Failed to get user info after authentication", username=username + ) + return None + + system_permissions = user_info.get("systemPermissions", []) + user_role = PermissionChecker.determine_role_from_permissions( + system_permissions + ) + + result = { + "username": username, + "auth_token": auth_token, + "role": user_role.value, + "permissions": system_permissions, + "full_name": user_info.get("fullName"), + "email": user_info.get("emailAddress"), + "organization": user_info.get("organization"), + "organizational_role": user_info.get("organizationalRole"), + } + + logger.info( + "User authenticated successfully", + username=username, + role=user_role.value, + permissions_count=len(system_permissions), + ) + + return result + + except requests.exceptions.RequestException as e: + logger.error( + "Network error during authentication", username=username, error=str(e) + ) + return None + except Exception as e: + logger.error( + "Unexpected error during authentication", username=username, error=str(e) + ) + return None + + def get_user_info(self, auth_token: str) -> Optional[Dict[str, Any]]: + """ + Get user information via Guacamole API. + + Args: + auth_token: User authentication token. + + Returns: + Dictionary with user information or None. + """ + user_url = f"{self.base_url}/guacamole/api/session/data/postgresql/self" + headers = {"Guacamole-Token": auth_token} + + try: + response = self.session.get(user_url, headers=headers, timeout=10) + + if response.status_code != 200: + logger.warning( + "Failed to get user info", + status_code=response.status_code, + response=response.text[:200], + ) + return None + + user_data = response.json() + username = user_data.get("username") + + if not username: + logger.warning("No username in user info response") + return None + + permissions_url = ( + f"{self.base_url}/guacamole/api/session/data/postgresql/" + f"users/{username}/permissions" + ) + + try: + perm_response = self.session.get( + permissions_url, headers=headers, timeout=10 + ) + + if perm_response.status_code == 200: + permissions_data = perm_response.json() + system_permissions = permissions_data.get("systemPermissions", []) + + logger.info( + "System permissions retrieved", + username=username, + system_permissions=system_permissions, + permissions_count=len(system_permissions), + ) + else: + logger.warning( + "Failed to get user permissions", + username=username, + status_code=perm_response.status_code, + response=perm_response.text[:200], + ) + system_permissions = [] + + except Exception as e: + logger.warning( + "Error getting user permissions", username=username, error=str(e) + ) + system_permissions = [] + + user_data["systemPermissions"] = system_permissions + + attributes = user_data.get("attributes", {}) + user_data.update( + { + "fullName": attributes.get("guac-full-name"), + "emailAddress": attributes.get("guac-email-address"), + "organization": attributes.get("guac-organization"), + "organizationalRole": attributes.get("guac-organizational-role"), + } + ) + + logger.info( + "User info retrieved successfully", + username=username, + system_permissions=system_permissions, + permissions_count=len(system_permissions), + ) + + return user_data + + except requests.exceptions.RequestException as e: + logger.error("Network error getting user info", error=str(e)) + return None + except Exception as e: + logger.error("Unexpected error getting user info", error=str(e)) + return None + + def create_jwt_for_user(self, user_info: Dict[str, Any]) -> str: + """ + Create JWT token for user with session storage. + + Args: + user_info: User information from authenticate_user. + + Returns: + JWT token. + """ + session_id = session_storage.create_session( + user_info=user_info, + guac_token=user_info["auth_token"], + expires_in_minutes=int( + os.getenv("JWT_ACCESS_TOKEN_EXPIRE_MINUTES", "60") + ), + ) + + return create_jwt_token(user_info, session_id) + + def get_user_connections(self, auth_token: str) -> List[Dict[str, Any]]: + """ + Get list of user connections. + + Args: + auth_token: User authentication token. + + Returns: + List of connections. + """ + connections_url = ( + f"{self.base_url}/guacamole/api/session/data/postgresql/connections" + ) + headers = {"Guacamole-Token": auth_token} + + try: + response = self.session.get(connections_url, headers=headers, timeout=10) + + if response.status_code != 200: + logger.warning( + "Failed to get user connections", status_code=response.status_code + ) + return [] + + connections_data = response.json() + + if isinstance(connections_data, dict): + connections = list(connections_data.values()) + else: + connections = connections_data + + logger.debug("Retrieved user connections", count=len(connections)) + + return connections + + except requests.exceptions.RequestException as e: + logger.error("Network error getting connections", error=str(e)) + return [] + except Exception as e: + logger.error("Unexpected error getting connections", error=str(e)) + return [] + + def create_connection_with_token( + self, connection_config: Dict[str, Any], auth_token: str + ) -> Optional[Dict[str, Any]]: + """ + Create connection using user token. + + Args: + connection_config: Connection configuration. + auth_token: User authentication token. + + Returns: + Information about created connection or None. + """ + create_url = ( + f"{self.base_url}/guacamole/api/session/data/postgresql/connections" + ) + headers = { + "Content-Type": "application/json", + "Guacamole-Token": auth_token, + } + + try: + response = self.session.post( + create_url, headers=headers, json=connection_config, timeout=30 + ) + + if response.status_code not in [200, 201]: + logger.error( + "Failed to create connection", + status_code=response.status_code, + response=response.text[:500], + ) + return None + + created_connection = response.json() + connection_id = created_connection.get("identifier") + + if not connection_id: + logger.error( + "No connection ID in response", response=created_connection + ) + return None + + logger.info( + "Connection created successfully", + connection_id=connection_id, + protocol=connection_config.get("protocol"), + hostname=connection_config.get("parameters", {}).get("hostname"), + ) + + return created_connection + + except requests.exceptions.RequestException as e: + logger.error("Network error creating connection", error=str(e)) + return None + except Exception as e: + logger.error("Unexpected error creating connection", error=str(e)) + return None + + def delete_connection_with_token( + self, connection_id: str, auth_token: str + ) -> bool: + """ + Delete connection using user token. + + Args: + connection_id: Connection ID to delete. + auth_token: User authentication token. + + Returns: + True if deletion successful, False otherwise. + """ + delete_url = ( + f"{self.base_url}/guacamole/api/session/data/postgresql/" + f"connections/{connection_id}" + ) + headers = {"Guacamole-Token": auth_token} + + try: + response = self.session.delete(delete_url, headers=headers, timeout=10) + + if response.status_code == 204: + logger.info("Connection deleted successfully", connection_id=connection_id) + return True + + logger.warning( + "Failed to delete connection", + connection_id=connection_id, + status_code=response.status_code, + response=response.text[:200], + ) + return False + + except requests.exceptions.RequestException as e: + logger.error( + "Network error deleting connection", + connection_id=connection_id, + error=str(e), + ) + return False + except Exception as e: + logger.error( + "Unexpected error deleting connection", + connection_id=connection_id, + error=str(e), + ) + return False + + def validate_token(self, auth_token: str) -> bool: + """ + Validate Guacamole token. + + Args: + auth_token: Token to validate. + + Returns: + True if token is valid, False otherwise. + """ + try: + user_info = self.get_user_info(auth_token) + return user_info is not None + except Exception: + return False + + def refresh_user_token( + self, username: str, current_token: str + ) -> Optional[str]: + """ + Refresh user token (if supported by Guacamole). + + Args: + username: Username. + current_token: Current token. + + Returns: + New token or None. + """ + logger.debug( + "Token refresh requested but not supported by Guacamole", username=username + ) + return None \ No newline at end of file diff --git a/guacamole_test_11_26/api/core/kms_provider.py b/guacamole_test_11_26/api/core/kms_provider.py new file mode 100755 index 00000000..08f58dc7 --- /dev/null +++ b/guacamole_test_11_26/api/core/kms_provider.py @@ -0,0 +1,474 @@ +"""Module for working with real KMS/HSM systems.""" + +import base64 +import logging +import os +from abc import ABC, abstractmethod +from typing import Any, Dict + +import boto3 +import requests +from botocore.exceptions import ClientError + +logger = logging.getLogger(__name__) + + +class KMSProvider(ABC): + """Abstract class for KMS providers.""" + + @abstractmethod + def encrypt(self, plaintext: bytes, key_id: str) -> bytes: + """ + Encrypt data using KMS. + + Args: + plaintext: Data to encrypt. + key_id: Key identifier. + + Returns: + Encrypted data. + """ + pass + + @abstractmethod + def decrypt(self, ciphertext: bytes, key_id: str) -> bytes: + """ + Decrypt data using KMS. + + Args: + ciphertext: Encrypted data. + key_id: Key identifier. + + Returns: + Decrypted data. + """ + pass + + @abstractmethod + def generate_data_key( + self, key_id: str, key_spec: str = "AES_256" + ) -> Dict[str, bytes]: + """ + Generate data encryption key. + + Args: + key_id: Key identifier. + key_spec: Key specification. + + Returns: + Dictionary with 'plaintext' and 'ciphertext' keys. + """ + pass + +class AWSKMSProvider(KMSProvider): + """AWS KMS provider.""" + + def __init__(self, region_name: str = "us-east-1") -> None: + """ + Initialize AWS KMS provider. + + Args: + region_name: AWS region name. + """ + self.kms_client = boto3.client("kms", region_name=region_name) + self.region_name = region_name + + def encrypt(self, plaintext: bytes, key_id: str) -> bytes: + """Encrypt data using AWS KMS.""" + try: + response = self.kms_client.encrypt(KeyId=key_id, Plaintext=plaintext) + return response["CiphertextBlob"] + except ClientError as e: + logger.error("AWS KMS encryption failed: %s", e) + raise + + def decrypt(self, ciphertext: bytes, key_id: str) -> bytes: + """Decrypt data using AWS KMS.""" + try: + response = self.kms_client.decrypt( + CiphertextBlob=ciphertext, KeyId=key_id + ) + return response["Plaintext"] + except ClientError as e: + logger.error("AWS KMS decryption failed: %s", e) + raise + + def generate_data_key( + self, key_id: str, key_spec: str = "AES_256" + ) -> Dict[str, bytes]: + """Generate data encryption key.""" + try: + response = self.kms_client.generate_data_key( + KeyId=key_id, KeySpec=key_spec + ) + return { + "plaintext": response["Plaintext"], + "ciphertext": response["CiphertextBlob"], + } + except ClientError as e: + logger.error("AWS KMS data key generation failed: %s", e) + raise + +class GoogleCloudKMSProvider(KMSProvider): + """Google Cloud KMS provider.""" + + def __init__(self, project_id: str, location: str = "global") -> None: + """ + Initialize Google Cloud KMS provider. + + Args: + project_id: Google Cloud project ID. + location: Key location. + """ + self.project_id = project_id + self.location = location + self.base_url = ( + f"https://cloudkms.googleapis.com/v1/projects/{project_id}" + f"/locations/{location}" + ) + + def encrypt(self, plaintext: bytes, key_id: str) -> bytes: + """Encrypt data using Google Cloud KMS.""" + try: + url = ( + f"{self.base_url}/keyRings/default/cryptoKeys/{key_id}:encrypt" + ) + + response = requests.post( + url, + json={"plaintext": base64.b64encode(plaintext).decode()}, + headers={ + "Authorization": f"Bearer {self._get_access_token()}" + }, + timeout=30, + ) + response.raise_for_status() + + return base64.b64decode(response.json()["ciphertext"]) + + except requests.RequestException as e: + logger.error("Google Cloud KMS encryption failed: %s", e) + raise RuntimeError( + f"Google Cloud KMS encryption failed: {e}" + ) from e + + def decrypt(self, ciphertext: bytes, key_id: str) -> bytes: + """Decrypt data using Google Cloud KMS.""" + try: + url = ( + f"{self.base_url}/keyRings/default/cryptoKeys/{key_id}:decrypt" + ) + + response = requests.post( + url, + json={"ciphertext": base64.b64encode(ciphertext).decode()}, + headers={ + "Authorization": f"Bearer {self._get_access_token()}" + }, + timeout=30, + ) + response.raise_for_status() + + return base64.b64decode(response.json()["plaintext"]) + + except requests.RequestException as e: + logger.error("Google Cloud KMS decryption failed: %s", e) + raise RuntimeError( + f"Google Cloud KMS decryption failed: {e}" + ) from e + + def generate_data_key( + self, key_id: str, key_spec: str = "AES_256" + ) -> Dict[str, bytes]: + """Generate data encryption key.""" + try: + url = ( + f"{self.base_url}/keyRings/default/cryptoKeys/{key_id}" + ":generateDataKey" + ) + + response = requests.post( + url, + json={"keySpec": key_spec}, + headers={ + "Authorization": f"Bearer {self._get_access_token()}" + }, + timeout=30, + ) + response.raise_for_status() + + data = response.json() + return { + "plaintext": base64.b64decode(data["plaintext"]), + "ciphertext": base64.b64decode(data["ciphertext"]), + } + + except requests.RequestException as e: + logger.error("Google Cloud KMS data key generation failed: %s", e) + raise RuntimeError( + f"Google Cloud KMS data key generation failed: {e}" + ) from e + + def _get_access_token(self) -> str: + """ + Get access token for Google Cloud API. + + Note: In production, use service account or metadata server. + """ + return os.getenv("GOOGLE_CLOUD_ACCESS_TOKEN", "") + +class YubiHSMProvider(KMSProvider): + """YubiHSM provider (hardware security module).""" + + def __init__(self, hsm_url: str, auth_key_id: int) -> None: + """ + Initialize YubiHSM provider. + + Args: + hsm_url: YubiHSM URL. + auth_key_id: Authentication key ID. + """ + self.hsm_url = hsm_url + self.auth_key_id = auth_key_id + + def encrypt(self, plaintext: bytes, key_id: str) -> bytes: + """Encrypt data using YubiHSM.""" + try: + response = requests.post( + f"{self.hsm_url}/api/v1/encrypt", + json={"key_id": key_id, "plaintext": plaintext.hex()}, + headers={"Authorization": f"Bearer {self._get_hsm_token()}"}, + timeout=30, + ) + response.raise_for_status() + + return bytes.fromhex(response.json()["ciphertext"]) + + except requests.RequestException as e: + logger.error("YubiHSM encryption failed: %s", e) + raise RuntimeError(f"YubiHSM encryption failed: {e}") from e + + def decrypt(self, ciphertext: bytes, key_id: str) -> bytes: + """Decrypt data using YubiHSM.""" + try: + response = requests.post( + f"{self.hsm_url}/api/v1/decrypt", + json={"key_id": key_id, "ciphertext": ciphertext.hex()}, + headers={"Authorization": f"Bearer {self._get_hsm_token()}"}, + timeout=30, + ) + response.raise_for_status() + + return bytes.fromhex(response.json()["plaintext"]) + + except requests.RequestException as e: + logger.error("YubiHSM decryption failed: %s", e) + raise RuntimeError(f"YubiHSM decryption failed: {e}") from e + + def generate_data_key( + self, key_id: str, key_spec: str = "AES_256" + ) -> Dict[str, bytes]: + """Generate data encryption key.""" + try: + response = requests.post( + f"{self.hsm_url}/api/v1/generate-data-key", + json={"key_id": key_id, "key_spec": key_spec}, + headers={"Authorization": f"Bearer {self._get_hsm_token()}"}, + timeout=30, + ) + response.raise_for_status() + + data = response.json() + return { + "plaintext": bytes.fromhex(data["plaintext"]), + "ciphertext": bytes.fromhex(data["ciphertext"]), + } + + except requests.RequestException as e: + logger.error("YubiHSM data key generation failed: %s", e) + raise RuntimeError( + f"YubiHSM data key generation failed: {e}" + ) from e + + def _get_hsm_token(self) -> str: + """ + Get token for YubiHSM. + + Note: In production, use proper YubiHSM authentication. + """ + return os.getenv("YUBIHSM_TOKEN", "") + +class SecureKeyManager: + """Key manager using real KMS/HSM systems.""" + + def __init__(self, kms_provider: KMSProvider, master_key_id: str) -> None: + """ + Initialize secure key manager. + + Args: + kms_provider: KMS provider instance. + master_key_id: Master key identifier. + """ + self.kms_provider = kms_provider + self.master_key_id = master_key_id + self.key_cache: Dict[str, bytes] = {} + + def encrypt_session_key(self, session_key: bytes, session_id: str) -> bytes: + """ + Encrypt session key using KMS/HSM. + + Args: + session_key: Session key to encrypt. + session_id: Session ID for context. + + Returns: + Encrypted session key. + """ + try: + encrypted_key = self.kms_provider.encrypt( + session_key, self.master_key_id + ) + + logger.info( + "Session key encrypted with KMS/HSM", + extra={ + "session_id": session_id, + "key_length": len(session_key), + "encrypted_length": len(encrypted_key), + }, + ) + + return encrypted_key + + except Exception as e: + logger.error( + "Failed to encrypt session key with KMS/HSM", + extra={"session_id": session_id, "error": str(e)}, + ) + raise + + def decrypt_session_key( + self, encrypted_session_key: bytes, session_id: str + ) -> bytes: + """ + Decrypt session key using KMS/HSM. + + Args: + encrypted_session_key: Encrypted session key. + session_id: Session ID for context. + + Returns: + Decrypted session key. + """ + try: + decrypted_key = self.kms_provider.decrypt( + encrypted_session_key, self.master_key_id + ) + + logger.info( + "Session key decrypted with KMS/HSM", + extra={"session_id": session_id, "key_length": len(decrypted_key)}, + ) + + return decrypted_key + + except Exception as e: + logger.error( + "Failed to decrypt session key with KMS/HSM", + extra={"session_id": session_id, "error": str(e)}, + ) + raise + + def generate_encryption_key(self, session_id: str) -> Dict[str, bytes]: + """ + Generate encryption key using KMS/HSM. + + Args: + session_id: Session ID. + + Returns: + Dictionary with 'plaintext' and 'ciphertext' keys. + """ + try: + key_data = self.kms_provider.generate_data_key(self.master_key_id) + + logger.info( + "Encryption key generated with KMS/HSM", + extra={ + "session_id": session_id, + "key_length": len(key_data["plaintext"]), + }, + ) + + return key_data + + except Exception as e: + logger.error( + "Failed to generate encryption key with KMS/HSM", + extra={"session_id": session_id, "error": str(e)}, + ) + raise + + +class KMSProviderFactory: + """Factory for creating KMS providers.""" + + @staticmethod + def create_provider(provider_type: str, **kwargs: Any) -> KMSProvider: + """ + Create KMS provider by type. + + Args: + provider_type: Provider type ('aws', 'gcp', 'yubihsm'). + **kwargs: Provider-specific arguments. + + Returns: + KMS provider instance. + + Raises: + ValueError: If provider type is unsupported. + """ + if provider_type == "aws": + return AWSKMSProvider(**kwargs) + if provider_type == "gcp": + return GoogleCloudKMSProvider(**kwargs) + if provider_type == "yubihsm": + return YubiHSMProvider(**kwargs) + + raise ValueError(f"Unsupported KMS provider: {provider_type}") + + +def get_secure_key_manager() -> SecureKeyManager: + """ + Get configured secure key manager. + + Returns: + Configured SecureKeyManager instance. + + Raises: + ValueError: If provider type is unsupported. + """ + provider_type = os.getenv("KMS_PROVIDER", "aws") + master_key_id = os.getenv("KMS_MASTER_KEY_ID", "alias/session-keys") + + if provider_type == "aws": + provider = AWSKMSProvider( + region_name=os.getenv("AWS_REGION", "us-east-1") + ) + elif provider_type == "gcp": + provider = GoogleCloudKMSProvider( + project_id=os.getenv("GCP_PROJECT_ID", ""), + location=os.getenv("GCP_LOCATION", "global"), + ) + elif provider_type == "yubihsm": + provider = YubiHSMProvider( + hsm_url=os.getenv("YUBIHSM_URL", ""), + auth_key_id=int(os.getenv("YUBIHSM_AUTH_KEY_ID", "0")), + ) + else: + raise ValueError(f"Unsupported KMS provider: {provider_type}") + + return SecureKeyManager(provider, master_key_id) + + +secure_key_manager = get_secure_key_manager() diff --git a/guacamole_test_11_26/api/core/log_sanitizer.py b/guacamole_test_11_26/api/core/log_sanitizer.py new file mode 100755 index 00000000..99f2fe32 --- /dev/null +++ b/guacamole_test_11_26/api/core/log_sanitizer.py @@ -0,0 +1,286 @@ +"""Log sanitization for removing sensitive information from logs.""" + +import json +import re +from typing import Any, Dict + +import structlog + +logger = structlog.get_logger(__name__) + + +class LogSanitizer: + """Class for cleaning logs from sensitive information.""" + + def __init__(self) -> None: + """Initialize LogSanitizer with sensitive fields and patterns.""" + self.sensitive_fields = { + "password", + "passwd", + "pwd", + "secret", + "token", + "key", + "auth_token", + "guac_token", + "jwt_token", + "access_token", + "refresh_token", + "api_key", + "private_key", + "encryption_key", + "session_id", + "cookie", + "authorization", + "credential", + "credentials", + "global_credentials", + "machine_credentials", + "ssh_password", + "ssh_username", + "credential_hash", + "password_hash", + "password_salt", + "encrypted_password", + } + + self.sensitive_patterns = [ + r'password["\']?\s*[:=]\s*["\']?([^"\'\\s]+)["\']?', + r'token["\']?\s*[:=]\s*["\']?([^"\'\\s]+)["\']?', + r'key["\']?\s*[:=]\s*["\']?([^"\'\\s]+)["\']?', + r'secret["\']?\s*[:=]\s*["\']?([^"\'\\s]+)["\']?', + r'authorization["\']?\s*[:=]\s*["\']?([^"\'\\s]+)["\']?', + ] + + self.jwt_pattern = re.compile( + r"\b[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\b" + ) + self.api_key_pattern = re.compile(r"\b[A-Za-z0-9]{32,}\b") + + def mask_sensitive_value(self, value: str, mask_char: str = "*") -> str: + """ + Mask sensitive value. + + Args: + value: Value to mask. + mask_char: Character to use for masking. + + Returns: + Masked value. + """ + if not value or len(value) <= 4: + return mask_char * 4 + + if len(value) <= 8: + return value[:2] + mask_char * (len(value) - 4) + value[-2:] + + return value[:4] + mask_char * (len(value) - 8) + value[-4:] + + def sanitize_string(self, text: str) -> str: + """ + Clean string from sensitive information. + + Args: + text: Text to clean. + + Returns: + Cleaned text. + """ + if not isinstance(text, str): + return text + + sanitized = text + + sanitized = self.jwt_pattern.sub( + lambda m: self.mask_sensitive_value(m.group(0)), sanitized + ) + + sanitized = self.api_key_pattern.sub( + lambda m: self.mask_sensitive_value(m.group(0)), sanitized + ) + + for pattern in self.sensitive_patterns: + sanitized = re.sub( + pattern, + lambda m: m.group(0).replace( + m.group(1), self.mask_sensitive_value(m.group(1)) + ), + sanitized, + flags=re.IGNORECASE, + ) + + return sanitized + + def sanitize_dict( + self, data: Dict[str, Any], max_depth: int = 10 + ) -> Dict[str, Any]: + """ + Recursively clean dictionary from sensitive information. + + Args: + data: Dictionary to clean. + max_depth: Maximum recursion depth. + + Returns: + Cleaned dictionary. + """ + if max_depth <= 0: + return {"error": "max_depth_exceeded"} + + if not isinstance(data, dict): + return data + + sanitized = {} + + for key, value in data.items(): + key_lower = key.lower() + is_sensitive_key = any( + sensitive_field in key_lower + for sensitive_field in self.sensitive_fields + ) + + if is_sensitive_key: + if isinstance(value, str): + sanitized[key] = self.mask_sensitive_value(value) + elif isinstance(value, (dict, list)): + sanitized[key] = self.sanitize_value(value, max_depth - 1) + else: + sanitized[key] = "[MASKED]" + else: + sanitized[key] = self.sanitize_value(value, max_depth - 1) + + return sanitized + + def sanitize_value(self, value: Any, max_depth: int = 10) -> Any: + """ + Clean value of any type. + + Args: + value: Value to clean. + max_depth: Maximum recursion depth. + + Returns: + Cleaned value. + """ + if max_depth <= 0: + return "[max_depth_exceeded]" + + if isinstance(value, str): + return self.sanitize_string(value) + + if isinstance(value, dict): + return self.sanitize_dict(value, max_depth) + + if isinstance(value, list): + return [ + self.sanitize_value(item, max_depth - 1) for item in value + ] + + if isinstance(value, (int, float, bool, type(None))): + return value + + return self.sanitize_string(str(value)) + + def sanitize_log_event( + self, event_dict: Dict[str, Any] + ) -> Dict[str, Any]: + """ + Clean log event from sensitive information. + + Args: + event_dict: Log event dictionary. + + Returns: + Cleaned event dictionary. + """ + try: + sanitized_event = event_dict.copy() + sanitized_event = self.sanitize_dict(sanitized_event) + + special_fields = ["request_body", "response_body", "headers"] + for field in special_fields: + if field in sanitized_event: + sanitized_event[field] = self.sanitize_value( + sanitized_event[field] + ) + + return sanitized_event + + except Exception as e: + logger.error("Error sanitizing log event", error=str(e)) + return { + "error": "sanitization_failed", + "original_error": str(e), + } + + def sanitize_json_string(self, json_string: str) -> str: + """ + Clean JSON string from sensitive information. + + Args: + json_string: JSON string to clean. + + Returns: + Cleaned JSON string. + """ + try: + data = json.loads(json_string) + sanitized_data = self.sanitize_value(data) + return json.dumps(sanitized_data, ensure_ascii=False) + + except json.JSONDecodeError: + return self.sanitize_string(json_string) + except Exception as e: + logger.error("Error sanitizing JSON string", error=str(e)) + return json_string + + def is_sensitive_field(self, field_name: str) -> bool: + """ + Check if field is sensitive. + + Args: + field_name: Field name. + + Returns: + True if field is sensitive. + """ + field_lower = field_name.lower() + return any( + sensitive_field in field_lower + for sensitive_field in self.sensitive_fields + ) + + def get_sanitization_stats(self) -> Dict[str, Any]: + """ + Get sanitization statistics. + + Returns: + Sanitization statistics dictionary. + """ + return { + "sensitive_fields_count": len(self.sensitive_fields), + "sensitive_patterns_count": len(self.sensitive_patterns), + "sensitive_fields": list(self.sensitive_fields), + "jwt_pattern_active": bool(self.jwt_pattern), + "api_key_pattern_active": bool(self.api_key_pattern), + } + + +log_sanitizer = LogSanitizer() + + +def sanitize_log_processor( + logger: Any, name: str, event_dict: Dict[str, Any] +) -> Dict[str, Any]: + """ + Processor for structlog for automatic log sanitization. + + Args: + logger: Logger instance. + name: Logger name. + event_dict: Event dictionary. + + Returns: + Sanitized event dictionary. + """ + return log_sanitizer.sanitize_log_event(event_dict) diff --git a/guacamole_test_11_26/api/core/middleware.py b/guacamole_test_11_26/api/core/middleware.py new file mode 100755 index 00000000..dc6e5046 --- /dev/null +++ b/guacamole_test_11_26/api/core/middleware.py @@ -0,0 +1,296 @@ +"""Authentication and authorization middleware.""" + +from typing import Any, Awaitable, Callable, Dict, Optional + +from fastapi import HTTPException, Request, Response +from fastapi.responses import JSONResponse +import structlog + +from .models import UserRole +from .permissions import PermissionChecker +from .session_storage import session_storage +from .utils import extract_token_from_header, verify_jwt_token + +logger = structlog.get_logger(__name__) + +# Public endpoints that don't require authentication +PUBLIC_PATHS = { + "/", + "/api/health", + "/api/docs", + "/api/openapi.json", + "/api/redoc", + "/favicon.ico", + "/api/auth/login", +} + +# Static file prefixes for FastAPI (Swagger UI) +STATIC_PREFIXES = [ + "/static/", + "/docs/", + "/redoc/", + "/api/static/", + "/api/docs/", + "/api/redoc/", +] + + +async def jwt_auth_middleware( + request: Request, call_next: Callable[[Request], Awaitable[Response]] +) -> Response: + """ + Middleware for JWT token verification and user authentication. + + Supports JWT token in Authorization header: Bearer + """ + if request.method == "OPTIONS": + return await call_next(request) + + path = request.url.path + if path in PUBLIC_PATHS: + return await call_next(request) + + if any(path.startswith(prefix) for prefix in STATIC_PREFIXES): + return await call_next(request) + + user_token: Optional[str] = None + user_info: Optional[Dict[str, Any]] = None + auth_method: Optional[str] = None + + try: + auth_header = request.headers.get("Authorization") + if auth_header: + jwt_token = extract_token_from_header(auth_header) + if jwt_token: + jwt_payload = verify_jwt_token(jwt_token) + if jwt_payload: + session_id = jwt_payload.get("session_id") + if session_id: + session_data = session_storage.get_session(session_id) + if session_data: + user_token = session_data.get("guac_token") + else: + logger.warning( + "Session not found in Redis", + session_id=session_id, + username=jwt_payload.get("username"), + ) + else: + user_token = jwt_payload.get("guac_token") + + user_info = { + "username": jwt_payload["username"], + "role": jwt_payload["role"], + "permissions": jwt_payload.get("permissions", []), + "full_name": jwt_payload.get("full_name"), + "email": jwt_payload.get("email"), + "organization": jwt_payload.get("organization"), + "organizational_role": jwt_payload.get("organizational_role"), + } + auth_method = "jwt" + + logger.debug( + "JWT authentication successful", + username=user_info["username"], + role=user_info["role"], + has_session=session_id is not None, + has_token=user_token is not None, + ) + + if not user_token or not user_info: + logger.info( + "Authentication required", + path=path, + method=request.method, + client_ip=request.client.host if request.client else "unknown", + ) + + return JSONResponse( + status_code=401, + content={ + "error": "Authentication required", + "message": ( + "Provide JWT token in Authorization header. " + "Get token via /auth/login" + ), + "login_endpoint": "/auth/login", + }, + ) + + user_role = UserRole(user_info["role"]) + allowed, reason = PermissionChecker.check_endpoint_access( + user_role, request.method, path + ) + + if not allowed: + logger.warning( + "Access denied to endpoint", + username=user_info["username"], + role=user_info["role"], + endpoint=f"{request.method} {path}", + reason=reason, + ) + + return JSONResponse( + status_code=403, + content={ + "error": "Access denied", + "message": reason, + "required_role": "Higher privileges required", + }, + ) + + request.state.user_token = user_token + request.state.user_info = user_info + request.state.auth_method = auth_method + + logger.debug( + "Authentication and authorization successful", + username=user_info["username"], + role=user_info["role"], + auth_method=auth_method, + endpoint=f"{request.method} {path}", + ) + + response = await call_next(request) + + if hasattr(request.state, "user_info"): + response.headers["X-User"] = request.state.user_info["username"] + response.headers["X-User-Role"] = request.state.user_info["role"] + response.headers["X-Auth-Method"] = request.state.auth_method + + return response + + except HTTPException: + raise + except Exception as e: + logger.error( + "Unexpected error in auth middleware", + error=str(e), + path=path, + method=request.method, + ) + + return JSONResponse( + status_code=500, + content={ + "error": "Internal server error", + "message": "Authentication system error", + }, + ) + + +def get_current_user(request: Request) -> Optional[Dict[str, Any]]: + """ + Get current user information from request.state. + + Args: + request: FastAPI Request object. + + Returns: + User information dictionary or None. + """ + return getattr(request.state, "user_info", None) + + +def get_current_user_token(request: Request) -> Optional[str]: + """ + Get current user token from request.state. + + Args: + request: FastAPI Request object. + + Returns: + User token string or None. + """ + return getattr(request.state, "user_token", None) + + +def require_role(required_role: UserRole) -> Callable: + """ + Decorator to check user role. + + Args: + required_role: Required user role. + + Returns: + Function decorator. + """ + def decorator(func: Callable) -> Callable: + async def wrapper(request: Request, *args: Any, **kwargs: Any) -> Any: + user_info = get_current_user(request) + if not user_info: + raise HTTPException( + status_code=401, detail="Authentication required" + ) + + user_role = UserRole(user_info["role"]) + permission = f"role_{required_role.value}" + if not PermissionChecker.check_permission(user_role, permission): + raise HTTPException( + status_code=403, + detail=f"Role {required_role.value} required", + ) + + return await func(request, *args, **kwargs) + + return wrapper + + return decorator + + +def require_permission(permission: str) -> Callable: + """ + Decorator to check specific permission. + + Args: + permission: Required permission string. + + Returns: + Function decorator. + """ + def decorator(func: Callable) -> Callable: + async def wrapper(request: Request, *args: Any, **kwargs: Any) -> Any: + user_info = get_current_user(request) + if not user_info: + raise HTTPException( + status_code=401, detail="Authentication required" + ) + + user_role = UserRole(user_info["role"]) + if not PermissionChecker.check_permission(user_role, permission): + raise HTTPException( + status_code=403, + detail=f"Permission '{permission}' required", + ) + + return await func(request, *args, **kwargs) + + return wrapper + + return decorator + + +async def validate_connection_ownership( + request: Request, connection_id: str +) -> bool: + """ + Check user permissions for connection management. + + Args: + request: FastAPI Request object. + connection_id: Connection ID. + + Returns: + True if user can manage the connection, False otherwise. + """ + user_info = get_current_user(request) + if not user_info: + return False + + user_role = UserRole(user_info["role"]) + + if PermissionChecker.can_delete_any_connection(user_role): + return True + + return True \ No newline at end of file diff --git a/guacamole_test_11_26/api/core/models.py b/guacamole_test_11_26/api/core/models.py new file mode 100755 index 00000000..8fdb4f65 --- /dev/null +++ b/guacamole_test_11_26/api/core/models.py @@ -0,0 +1,343 @@ +""" +Pydantic models for authentication system. +""" + +from enum import Enum +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Field + + +class UserRole(str, Enum): + """User roles in the system.""" + + GUEST = "GUEST" + USER = "USER" + ADMIN = "ADMIN" + SUPER_ADMIN = "SUPER_ADMIN" + + +class LoginRequest(BaseModel): + """Authentication request.""" + + username: str = Field(..., description="Username in Guacamole") + password: str = Field(..., description="Password in Guacamole") + + +class LoginResponse(BaseModel): + """Successful authentication response.""" + + access_token: str = Field(..., description="JWT access token") + token_type: str = Field(default="bearer", description="Token type") + expires_in: int = Field(..., description="Token lifetime in seconds") + user_info: Dict[str, Any] = Field(..., description="User information") + + +class UserInfo(BaseModel): + """User information.""" + + username: str = Field(..., description="Username") + role: UserRole = Field(..., description="User role") + permissions: List[str] = Field( + default_factory=list, description="System permissions" + ) + full_name: Optional[str] = Field(None, description="Full name") + email: Optional[str] = Field(None, description="Email address") + organization: Optional[str] = Field(None, description="Organization") + organizational_role: Optional[str] = Field(None, description="Job title") + + +class ConnectionRequest(BaseModel): + """Connection creation request. + + Requires JWT token in Authorization header: Bearer + Get token via /auth/login + """ + + hostname: str = Field(..., description="IP address or hostname") + protocol: str = Field( + default="rdp", description="Connection protocol (rdp, vnc, ssh)" + ) + username: Optional[str] = Field( + None, description="Username for remote machine connection" + ) + password: Optional[str] = Field( + None, + description="Encrypted password for remote machine connection (Base64 AES-256-GCM)", + ) + port: Optional[int] = Field( + None, description="Port (default used if not specified)" + ) + ttl_minutes: Optional[int] = Field( + default=60, description="Connection lifetime in minutes" + ) + + enable_sftp: Optional[bool] = Field( + default=True, description="Enable SFTP for SSH (file browser with drag'n'drop)" + ) + sftp_root_directory: Optional[str] = Field( + default="/", description="Root directory for SFTP (default: /)" + ) + sftp_server_alive_interval: Optional[int] = Field( + default=0, description="SFTP keep-alive interval in seconds (0 = disabled)" + ) + + +class ConnectionResponse(BaseModel): + """Connection creation response.""" + + connection_id: str = Field(..., description="Created connection ID") + connection_url: str = Field(..., description="URL to access connection") + status: str = Field(..., description="Connection status") + expires_at: str = Field(..., description="Connection expiration time") + ttl_minutes: int = Field(..., description="TTL in minutes") + + +class RefreshTokenRequest(BaseModel): + """Token refresh request.""" + + refresh_token: str = Field(..., description="Refresh token") + + +class LogoutRequest(BaseModel): + """Logout request.""" + + token: str = Field(..., description="Token to revoke") + + +class PermissionCheckRequest(BaseModel): + """Permission check request.""" + + action: str = Field(..., description="Action to check") + resource: Optional[str] = Field(None, description="Resource (optional)") + + +class PermissionCheckResponse(BaseModel): + """Permission check response.""" + + allowed: bool = Field(..., description="Whether action is allowed") + reason: Optional[str] = Field(None, description="Denial reason (if applicable)") + + +class SavedMachineCreate(BaseModel): + """Saved machine create/update request.""" + + name: str = Field(..., min_length=1, max_length=255, description="Machine name") + hostname: str = Field( + ..., min_length=1, max_length=255, description="IP address or hostname" + ) + port: int = Field(..., gt=0, lt=65536, description="Connection port") + protocol: str = Field( + ..., description="Connection protocol (rdp, ssh, vnc, telnet)" + ) + os: Optional[str] = Field( + None, + max_length=255, + description="Operating system (e.g., Windows Server 2019, Ubuntu 22.04)", + ) + description: Optional[str] = Field(None, description="Machine description") + tags: Optional[List[str]] = Field( + default_factory=list, description="Tags for grouping" + ) + is_favorite: bool = Field(default=False, description="Favorite machine") + + class Config: + json_schema_extra = { + "example": { + "name": "Production Web Server", + "hostname": "192.168.1.100", + "port": 3389, + "protocol": "rdp", + "os": "Windows Server 2019", + "description": "Main production web server", + "tags": ["production", "web"], + "is_favorite": True, + } + } + + +class SavedMachineUpdate(BaseModel): + """Saved machine partial update request.""" + + name: Optional[str] = Field(None, min_length=1, max_length=255) + hostname: Optional[str] = Field(None, min_length=1, max_length=255) + port: Optional[int] = Field(None, gt=0, lt=65536) + protocol: Optional[str] = None + os: Optional[str] = Field(None, max_length=255, description="Operating system") + description: Optional[str] = None + tags: Optional[List[str]] = None + is_favorite: Optional[bool] = None + + +class SavedMachineResponse(BaseModel): + """Saved machine information response.""" + + id: str = Field(..., description="Machine UUID") + user_id: str = Field(..., description="Owner user ID") + name: str = Field(..., description="Machine name") + hostname: str = Field(..., description="IP address or hostname") + port: int = Field(..., description="Connection port") + protocol: str = Field(..., description="Connection protocol") + os: Optional[str] = Field(None, description="Operating system") + description: Optional[str] = Field(None, description="Description") + tags: List[str] = Field(default_factory=list, description="Tags") + is_favorite: bool = Field(default=False, description="Favorite") + created_at: str = Field(..., description="Creation date (ISO 8601)") + updated_at: str = Field(..., description="Update date (ISO 8601)") + last_connected_at: Optional[str] = Field( + None, description="Last connection (ISO 8601)" + ) + connection_stats: Optional[Dict[str, Any]] = Field( + None, description="Connection statistics" + ) + + class Config: + from_attributes = True + + +class SavedMachineList(BaseModel): + """Saved machines list.""" + + total: int = Field(..., description="Total number of machines") + machines: List[SavedMachineResponse] = Field(..., description="List of machines") + + +class ConnectionHistoryCreate(BaseModel): + """Connection history record creation request.""" + + machine_id: str = Field(..., description="Machine UUID") + success: bool = Field(default=True, description="Successful connection") + error_message: Optional[str] = Field(None, description="Error message") + duration_seconds: Optional[int] = Field( + None, description="Connection duration in seconds" + ) + + +class ConnectionHistoryResponse(BaseModel): + """Connection history record information response.""" + + id: str = Field(..., description="Record UUID") + user_id: str = Field(..., description="User ID") + machine_id: str = Field(..., description="Machine UUID") + connected_at: str = Field(..., description="Connection time (ISO 8601)") + disconnected_at: Optional[str] = Field( + None, description="Disconnection time (ISO 8601)" + ) + duration_seconds: Optional[int] = Field(None, description="Duration") + success: bool = Field(..., description="Successful connection") + error_message: Optional[str] = Field(None, description="Error message") + client_ip: Optional[str] = Field(None, description="Client IP") + + class Config: + from_attributes = True + + +class BulkHealthCheckRequest(BaseModel): + """Bulk machine availability check request.""" + + machine_ids: List[str] = Field( + ..., min_items=1, max_items=200, description="List of machine IDs to check" + ) + timeout: int = Field( + default=5, ge=1, le=30, description="Timeout for each check in seconds" + ) + check_port: bool = Field(default=True, description="Check connection port") + + +class BulkHealthCheckResult(BaseModel): + """Single machine check result.""" + + machine_id: str = Field(..., description="Machine ID") + machine_name: str = Field(..., description="Machine name") + hostname: str = Field(..., description="Hostname/IP") + status: str = Field(..., description="success, failed, timeout") + available: bool = Field(..., description="Machine is available") + response_time_ms: Optional[int] = Field( + None, description="Response time in milliseconds" + ) + error: Optional[str] = Field(None, description="Error message") + checked_at: str = Field(..., description="Check time (ISO 8601)") + + +class BulkHealthCheckResponse(BaseModel): + """Bulk availability check response.""" + + total: int = Field(..., description="Total number of machines") + success: int = Field(..., description="Number of successful checks") + failed: int = Field(..., description="Number of failed checks") + available: int = Field(..., description="Number of available machines") + unavailable: int = Field(..., description="Number of unavailable machines") + results: List[BulkHealthCheckResult] = Field(..., description="Detailed results") + execution_time_ms: int = Field( + ..., description="Total execution time in milliseconds" + ) + started_at: str = Field(..., description="Start time (ISO 8601)") + completed_at: str = Field(..., description="Completion time (ISO 8601)") + + +class SSHCredentials(BaseModel): + """SSH credentials for machine.""" + + username: str = Field(..., min_length=1, max_length=255, description="SSH username") + password: str = Field( + ..., min_length=1, description="SSH password (will be encrypted in transit)" + ) + + +class BulkSSHCommandRequest(BaseModel): + """Bulk SSH command execution request.""" + + machine_ids: List[str] = Field( + ..., min_items=1, max_items=100, description="List of machine IDs" + ) + machine_hostnames: Optional[Dict[str, str]] = Field( + None, + description="Optional hostname/IP for non-saved machines {machine_id: hostname}", + ) + command: str = Field( + ..., min_length=1, max_length=500, description="SSH command to execute" + ) + credentials_mode: str = Field( + ..., description="Credentials mode: 'global' (same for all), 'custom' (per-machine)" + ) + global_credentials: Optional[SSHCredentials] = Field( + None, description="Shared credentials for all machines (mode 'global')" + ) + machine_credentials: Optional[Dict[str, SSHCredentials]] = Field( + None, description="Individual credentials (mode 'custom')" + ) + timeout: int = Field( + default=30, ge=5, le=300, description="Command execution timeout (seconds)" + ) + + +class BulkSSHCommandResult(BaseModel): + """Single machine SSH command execution result.""" + + machine_id: str = Field(..., description="Machine ID") + machine_name: str = Field(..., description="Machine name") + hostname: str = Field(..., description="Hostname/IP") + status: str = Field(..., description="success, failed, timeout, no_credentials") + exit_code: Optional[int] = Field(None, description="Command exit code") + stdout: Optional[str] = Field(None, description="Stdout output") + stderr: Optional[str] = Field(None, description="Stderr output") + error: Optional[str] = Field(None, description="Error message") + execution_time_ms: Optional[int] = Field( + None, description="Execution time in milliseconds" + ) + executed_at: str = Field(..., description="Execution time (ISO 8601)") + + +class BulkSSHCommandResponse(BaseModel): + """Bulk SSH command execution response.""" + + total: int = Field(..., description="Total number of machines") + success: int = Field(..., description="Number of successful executions") + failed: int = Field(..., description="Number of failed executions") + results: List[BulkSSHCommandResult] = Field(..., description="Detailed results") + execution_time_ms: int = Field( + ..., description="Total execution time in milliseconds" + ) + command: str = Field(..., description="Executed command") + started_at: str = Field(..., description="Start time (ISO 8601)") + completed_at: str = Field(..., description="Completion time (ISO 8601)") \ No newline at end of file diff --git a/guacamole_test_11_26/api/core/permissions.py b/guacamole_test_11_26/api/core/permissions.py new file mode 100755 index 00000000..7f7c6dbd --- /dev/null +++ b/guacamole_test_11_26/api/core/permissions.py @@ -0,0 +1,292 @@ +"""Permission and role system for Remote Access API.""" + +from typing import Dict, FrozenSet, List, Optional, Tuple + +import structlog + +from .models import UserRole + +logger = structlog.get_logger(__name__) + + +class PermissionChecker: + """User permission checker class.""" + + ROLE_MAPPING: Dict[str, UserRole] = { + "ADMINISTER": UserRole.SUPER_ADMIN, + "CREATE_USER": UserRole.ADMIN, + "CREATE_CONNECTION": UserRole.USER, + } + + ROLE_PERMISSIONS: Dict[UserRole, FrozenSet[str]] = { + UserRole.GUEST: frozenset({ + "view_own_connections", + "view_own_profile" + }), + UserRole.USER: frozenset({ + "view_own_connections", + "view_own_profile", + "create_connections", + "delete_own_connections", + }), + UserRole.ADMIN: frozenset({ + "view_own_connections", + "view_own_profile", + "create_connections", + "delete_own_connections", + "view_all_connections", + "delete_any_connection", + "view_system_stats", + "view_system_metrics", + }), + UserRole.SUPER_ADMIN: frozenset({ + "view_own_connections", + "view_own_profile", + "create_connections", + "delete_own_connections", + "view_all_connections", + "delete_any_connection", + "view_system_stats", + "view_system_metrics", + "reset_system_stats", + "manage_users", + "view_system_logs", + "change_system_config", + }), + } + + ENDPOINT_PERMISSIONS = { + "POST /connect": "create_connections", + "GET /connections": "view_own_connections", + "DELETE /connections": "delete_own_connections", + "GET /stats": "view_system_stats", + "GET /metrics": "view_system_metrics", + "POST /stats/reset": "reset_system_stats", + "GET /auth/profile": "view_own_profile", + "GET /auth/permissions": "view_own_profile", + } + + @classmethod + def determine_role_from_permissions(cls, guacamole_permissions: List[str]) -> UserRole: + """ + Determine user role based on Guacamole system permissions. + + Args: + guacamole_permissions: List of system permissions from Guacamole. + + Returns: + User role. + """ + for permission, role in cls.ROLE_MAPPING.items(): + if permission in guacamole_permissions: + logger.debug( + "Role determined from permission", + permission=permission, + role=role.value, + all_permissions=guacamole_permissions, + ) + return role + + logger.debug( + "No system permissions found, assigning GUEST role", + permissions=guacamole_permissions, + ) + return UserRole.GUEST + + @classmethod + def get_role_permissions(cls, role: UserRole) -> FrozenSet[str]: + """ + Get all permissions for a role. + + Args: + role: User role. + + Returns: + Frozen set of permissions. + """ + return cls.ROLE_PERMISSIONS.get(role, frozenset()) + + @classmethod + def check_permission(cls, user_role: UserRole, permission: str) -> bool: + """ + Check if role has specific permission. + + Args: + user_role: User role. + permission: Permission to check. + + Returns: + True if permission exists, False otherwise. + """ + role_permissions = cls.get_role_permissions(user_role) + has_permission = permission in role_permissions + + logger.debug( + "Permission check", + role=user_role.value, + permission=permission, + allowed=has_permission, + ) + + return has_permission + + @classmethod + def check_endpoint_access( + cls, user_role: UserRole, method: str, path: str + ) -> Tuple[bool, Optional[str]]: + """ + Check endpoint access. + + Args: + user_role: User role. + method: HTTP method (GET, POST, DELETE, etc.). + path: Endpoint path. + + Returns: + Tuple of (allowed: bool, reason: Optional[str]). + """ + endpoint_key = f"{method} {path}" + + required_permission = cls.ENDPOINT_PERMISSIONS.get(endpoint_key) + + if not required_permission: + for pattern, permission in cls.ENDPOINT_PERMISSIONS.items(): + if cls._match_endpoint_pattern(endpoint_key, pattern): + required_permission = permission + break + + if not required_permission: + return True, None + + has_permission = cls.check_permission(user_role, required_permission) + + if not has_permission: + reason = ( + f"Required permission '{required_permission}' " + f"not granted to role '{user_role.value}'" + ) + logger.info( + "Endpoint access denied", + role=user_role.value, + endpoint=endpoint_key, + required_permission=required_permission, + reason=reason, + ) + return False, reason + + logger.debug( + "Endpoint access granted", + role=user_role.value, + endpoint=endpoint_key, + required_permission=required_permission, + ) + + return True, None + + @classmethod + def _match_endpoint_pattern(cls, endpoint: str, pattern: str) -> bool: + """ + Check if endpoint matches pattern. + + Args: + endpoint: Endpoint to check (e.g., "DELETE /connections/123"). + pattern: Pattern (e.g., "DELETE /connections"). + + Returns: + True if matches. + """ + if pattern.endswith("/connections"): + base_pattern = pattern.replace("/connections", "/connections/") + return endpoint.startswith(base_pattern) + + return False + + @classmethod + def check_connection_ownership( + cls, user_role: UserRole, username: str, connection_owner: str + ) -> Tuple[bool, Optional[str]]: + """ + Check connection management rights. + + Args: + user_role: User role. + username: Username. + connection_owner: Connection owner. + + Returns: + Tuple of (allowed: bool, reason: Optional[str]). + """ + if user_role in (UserRole.ADMIN, UserRole.SUPER_ADMIN): + return True, None + + if username == connection_owner: + return True, None + + reason = ( + f"User '{username}' cannot manage connection owned by '{connection_owner}'" + ) + logger.info( + "Connection ownership check failed", + user=username, + owner=connection_owner, + role=user_role.value, + reason=reason, + ) + + return False, reason + + @classmethod + def can_view_all_connections(cls, user_role: UserRole) -> bool: + """ + Check if user can view all connections. + + Args: + user_role: User role. + + Returns: + True if can view all connections. + """ + return cls.check_permission(user_role, "view_all_connections") + + @classmethod + def can_delete_any_connection(cls, user_role: UserRole) -> bool: + """ + Check if user can delete any connection. + + Args: + user_role: User role. + + Returns: + True if can delete any connection. + """ + return cls.check_permission(user_role, "delete_any_connection") + + @classmethod + def get_user_permissions_list(cls, user_role: UserRole) -> List[str]: + """ + Get sorted list of user permissions for API response. + + Args: + user_role: User role. + + Returns: + Sorted list of permissions. + """ + permissions = cls.get_role_permissions(user_role) + return sorted(permissions) + + @classmethod + def validate_role_hierarchy( + cls, current_user_role: UserRole, target_user_role: UserRole + ) -> bool: + """ + Validate role hierarchy for user management. + + Args: + current_user_role: Current user role. + target_user_role: Target user role. + + Returns: + True if current user can manage target user. + """ + return current_user_role == UserRole.SUPER_ADMIN \ No newline at end of file diff --git a/guacamole_test_11_26/api/core/pki_certificate_verifier.py b/guacamole_test_11_26/api/core/pki_certificate_verifier.py new file mode 100755 index 00000000..de66b90f --- /dev/null +++ b/guacamole_test_11_26/api/core/pki_certificate_verifier.py @@ -0,0 +1,259 @@ +""" +Module for PKI/CA certificate handling for server key signature verification. +""" + +import logging +import ssl +from datetime import datetime, timezone +from typing import Dict, List, Optional + +import requests +from cryptography import x509 +from cryptography.hazmat.primitives.asymmetric import ed25519 + +logger = logging.getLogger(__name__) + +class PKICertificateVerifier: + """PKI/CA certificate verifier for server key signature verification.""" + + def __init__(self, ca_cert_path: str, crl_urls: Optional[List[str]] = None) -> None: + """Initialize PKI certificate verifier. + + Args: + ca_cert_path: Path to CA certificate file. + crl_urls: List of CRL URLs (optional). + """ + self.ca_cert_path = ca_cert_path + self.crl_urls = crl_urls or [] + self.ca_cert = self._load_ca_certificate() + self.cert_store = self._build_cert_store() + + def _load_ca_certificate(self) -> x509.Certificate: + """Load CA certificate. + + Returns: + Loaded CA certificate. + + Raises: + Exception: If certificate cannot be loaded. + """ + try: + with open(self.ca_cert_path, "rb") as f: + ca_cert_data = f.read() + return x509.load_pem_x509_certificate(ca_cert_data) + except Exception as e: + logger.error("Failed to load CA certificate", extra={"error": str(e)}) + raise + + def _build_cert_store(self) -> x509.CertificateStore: + """Build certificate store. + + Returns: + Certificate store with CA certificate. + """ + store = x509.CertificateStore() + store.add_cert(self.ca_cert) + return store + + def verify_server_certificate(self, server_cert_pem: bytes) -> bool: + """ + Verify server certificate through PKI/CA. + + Args: + server_cert_pem: PEM-encoded server certificate. + + Returns: + True if certificate is valid, False otherwise. + """ + try: + server_cert = x509.load_pem_x509_certificate(server_cert_pem) + + if not self._verify_certificate_chain(server_cert): + logger.warning("Certificate chain verification failed") + return False + + if not self._check_certificate_revocation(server_cert): + logger.warning("Certificate is revoked") + return False + + if not self._check_certificate_validity(server_cert): + logger.warning("Certificate is expired or not yet valid") + return False + + logger.info("Server certificate verified successfully") + return True + + except Exception as e: + logger.error("Certificate verification error", extra={"error": str(e)}) + return False + + def _verify_certificate_chain(self, server_cert: x509.Certificate) -> bool: + """Verify certificate chain. + + Args: + server_cert: Server certificate to verify. + + Returns: + True if chain is valid, False otherwise. + """ + try: + ca_public_key = self.ca_cert.public_key() + ca_public_key.verify( + server_cert.signature, + server_cert.tbs_certificate_bytes, + server_cert.signature_algorithm_oid, + ) + return True + except Exception as e: + logger.error( + "Certificate chain verification failed", extra={"error": str(e)} + ) + return False + + def _check_certificate_revocation(self, server_cert: x509.Certificate) -> bool: + """Check certificate revocation via CRL. + + Args: + server_cert: Server certificate to check. + + Returns: + True if certificate is not revoked, False otherwise. + """ + try: + crl_dps = server_cert.extensions.get_extension_for_oid( + x509.ExtensionOID.CRL_DISTRIBUTION_POINTS + ).value + + for crl_dp in crl_dps: + for crl_url in crl_dp.full_name: + if self._check_crl(server_cert, crl_url.value): + return False + + return True + + except Exception as e: + logger.warning("CRL check failed", extra={"error": str(e)}) + return True + + def _check_crl(self, server_cert: x509.Certificate, crl_url: str) -> bool: + """Check specific CRL for certificate revocation. + + Args: + server_cert: Server certificate to check. + crl_url: CRL URL. + + Returns: + True if certificate is revoked, False otherwise. + """ + try: + response = requests.get(crl_url, timeout=10) + if response.status_code == 200: + crl_data = response.content + crl = x509.load_der_x509_crl(crl_data) + return server_cert.serial_number in [revoked.serial_number for revoked in crl] + return False + except Exception as e: + logger.warning("Failed to check CRL", extra={"crl_url": crl_url, "error": str(e)}) + return False + + def _check_certificate_validity(self, server_cert: x509.Certificate) -> bool: + """Check certificate validity period. + + Args: + server_cert: Server certificate to check. + + Returns: + True if certificate is valid, False otherwise. + """ + now = datetime.now(timezone.utc) + return server_cert.not_valid_before <= now <= server_cert.not_valid_after + + def extract_public_key_from_certificate( + self, server_cert_pem: bytes + ) -> ed25519.Ed25519PublicKey: + """Extract public key from certificate. + + Args: + server_cert_pem: PEM-encoded server certificate. + + Returns: + Extracted Ed25519 public key. + """ + server_cert = x509.load_pem_x509_certificate(server_cert_pem) + public_key = server_cert.public_key() + if not isinstance(public_key, ed25519.Ed25519PublicKey): + raise ValueError("Certificate does not contain Ed25519 public key") + return public_key + +class ServerCertificateManager: + """Server certificate manager.""" + + def __init__(self, pki_verifier: PKICertificateVerifier) -> None: + """Initialize server certificate manager. + + Args: + pki_verifier: PKI certificate verifier instance. + """ + self.pki_verifier = pki_verifier + self.server_certificates: Dict[str, bytes] = {} + + def get_server_certificate(self, server_hostname: str) -> Optional[bytes]: + """Get server certificate via TLS handshake. + + Args: + server_hostname: Server hostname. + + Returns: + PEM-encoded server certificate or None if failed. + """ + try: + context = ssl.create_default_context() + context.check_hostname = True + context.verify_mode = ssl.CERT_REQUIRED + + with ssl.create_connection((server_hostname, 443)) as sock: + with context.wrap_socket(sock, server_hostname=server_hostname) as ssock: + cert_der = ssock.getpeercert_chain()[0] + cert_pem = ssl.DER_cert_to_PEM_cert(cert_der) + cert_bytes = cert_pem.encode() + + if self.pki_verifier.verify_server_certificate(cert_bytes): + self.server_certificates[server_hostname] = cert_bytes + return cert_bytes + else: + logger.error( + "Server certificate verification failed", + extra={"server_hostname": server_hostname}, + ) + return None + + except Exception as e: + logger.error( + "Failed to get server certificate", + extra={"server_hostname": server_hostname, "error": str(e)}, + ) + return None + + def get_trusted_public_key( + self, server_hostname: str + ) -> Optional[ed25519.Ed25519PublicKey]: + """Get trusted public key from server certificate. + + Args: + server_hostname: Server hostname. + + Returns: + Ed25519 public key or None if failed. + """ + cert_pem = self.get_server_certificate(server_hostname) + if cert_pem: + return self.pki_verifier.extract_public_key_from_certificate(cert_pem) + return None + + +pki_verifier = PKICertificateVerifier( + ca_cert_path="/etc/ssl/certs/ca-certificates.crt", + crl_urls=["http://crl.example.com/crl.pem"], +) + +certificate_manager = ServerCertificateManager(pki_verifier) diff --git a/guacamole_test_11_26/api/core/rate_limiter.py b/guacamole_test_11_26/api/core/rate_limiter.py new file mode 100755 index 00000000..7087e2ee --- /dev/null +++ b/guacamole_test_11_26/api/core/rate_limiter.py @@ -0,0 +1,342 @@ +"""Redis-based thread-safe rate limiting.""" + +import os +import time +from typing import Any, Dict, Optional, Tuple + +import redis +import structlog + +logger = structlog.get_logger(__name__) + +# Redis connection constants +REDIS_DEFAULT_HOST = "localhost" +REDIS_DEFAULT_PORT = "6379" +REDIS_DEFAULT_DB = "0" +REDIS_SOCKET_TIMEOUT = 5 + +# Rate limiting constants +DEFAULT_RATE_LIMIT_REQUESTS = 10 +DEFAULT_RATE_LIMIT_WINDOW_SECONDS = 60 +FAILED_LOGIN_RETENTION_SECONDS = 3600 # 1 hour +LOGIN_RATE_LIMIT_REQUESTS = 5 +LOGIN_RATE_LIMIT_WINDOW_SECONDS = 900 # 15 minutes +DEFAULT_FAILED_LOGIN_WINDOW_MINUTES = 60 +SECONDS_PER_MINUTE = 60 + +# Redis key prefixes +RATE_LIMIT_KEY_PREFIX = "rate_limit:" +FAILED_LOGINS_IP_PREFIX = "failed_logins:ip:" +FAILED_LOGINS_USER_PREFIX = "failed_logins:user:" +LOGIN_LIMIT_PREFIX = "login_limit:" + +# Rate limit headers +HEADER_RATE_LIMIT = "X-RateLimit-Limit" +HEADER_RATE_LIMIT_WINDOW = "X-RateLimit-Window" +HEADER_RATE_LIMIT_USED = "X-RateLimit-Used" +HEADER_RATE_LIMIT_REMAINING = "X-RateLimit-Remaining" +HEADER_RATE_LIMIT_RESET = "X-RateLimit-Reset" +HEADER_RATE_LIMIT_STATUS = "X-RateLimit-Status" + + +class RedisRateLimiter: + """Thread-safe Redis-based rate limiter with sliding window algorithm.""" + + def __init__(self) -> None: + """Initialize Redis rate limiter.""" + self.redis_client = redis.Redis( + host=os.getenv("REDIS_HOST", REDIS_DEFAULT_HOST), + port=int(os.getenv("REDIS_PORT", REDIS_DEFAULT_PORT)), + password=os.getenv("REDIS_PASSWORD"), + db=int(os.getenv("REDIS_DB", REDIS_DEFAULT_DB)), + decode_responses=True, + socket_connect_timeout=REDIS_SOCKET_TIMEOUT, + socket_timeout=REDIS_SOCKET_TIMEOUT, + retry_on_timeout=True, + ) + + try: + self.redis_client.ping() + logger.info("Rate limiter Redis connection established") + except redis.ConnectionError as e: + logger.error("Failed to connect to Redis for rate limiting", error=str(e)) + raise + + def check_rate_limit( + self, + client_ip: str, + requests_limit: int = DEFAULT_RATE_LIMIT_REQUESTS, + window_seconds: int = DEFAULT_RATE_LIMIT_WINDOW_SECONDS, + ) -> Tuple[bool, Dict[str, int]]: + """ + Check rate limit using sliding window algorithm. + + Args: + client_ip: Client IP address. + requests_limit: Maximum number of requests. + window_seconds: Time window in seconds. + + Returns: + Tuple of (allowed: bool, headers: Dict[str, int]). + """ + try: + current_time = int(time.time()) + window_start = current_time - window_seconds + + key = f"{RATE_LIMIT_KEY_PREFIX}{client_ip}" + + lua_script = """ + local key = KEYS[1] + local window_start = tonumber(ARGV[1]) + local current_time = tonumber(ARGV[2]) + local requests_limit = tonumber(ARGV[3]) + local window_seconds = tonumber(ARGV[4]) + + -- Remove old entries (outside window) + redis.call('ZREMRANGEBYSCORE', key, '-inf', window_start) + + -- Count current requests + local current_requests = redis.call('ZCARD', key) + + -- Check limit + if current_requests >= requests_limit then + -- Return blocking information + local oldest_request = redis.call('ZRANGE', key, 0, 0, 'WITHSCORES') + local reset_time = oldest_request[2] + window_seconds + return {0, current_requests, reset_time} + else + -- Add current request + redis.call('ZADD', key, current_time, current_time) + redis.call('EXPIRE', key, window_seconds) + + -- Count updated requests + local new_count = redis.call('ZCARD', key) + return {1, new_count, 0} + end + """ + + result = self.redis_client.eval( + lua_script, 1, key, window_start, current_time, requests_limit, window_seconds + ) + + allowed = bool(result[0]) + current_requests = result[1] + reset_time = result[2] if result[2] > 0 else 0 + + headers = { + HEADER_RATE_LIMIT: requests_limit, + HEADER_RATE_LIMIT_WINDOW: window_seconds, + HEADER_RATE_LIMIT_USED: current_requests, + HEADER_RATE_LIMIT_REMAINING: max(0, requests_limit - current_requests), + } + + if reset_time > 0: + headers[HEADER_RATE_LIMIT_RESET] = reset_time + + if allowed: + logger.debug( + "Rate limit check passed", + client_ip=client_ip, + current_requests=current_requests, + limit=requests_limit, + ) + else: + logger.warning( + "Rate limit exceeded", + client_ip=client_ip, + current_requests=current_requests, + limit=requests_limit, + reset_time=reset_time, + ) + + return allowed, headers + + except redis.RedisError as e: + logger.error( + "Redis error during rate limit check", client_ip=client_ip, error=str(e) + ) + return True, { + HEADER_RATE_LIMIT: requests_limit, + HEADER_RATE_LIMIT_WINDOW: window_seconds, + HEADER_RATE_LIMIT_USED: 0, + HEADER_RATE_LIMIT_REMAINING: requests_limit, + HEADER_RATE_LIMIT_STATUS: "redis_error", + } + except Exception as e: + logger.error( + "Unexpected error during rate limit check", client_ip=client_ip, error=str(e) + ) + return True, { + HEADER_RATE_LIMIT: requests_limit, + HEADER_RATE_LIMIT_WINDOW: window_seconds, + HEADER_RATE_LIMIT_USED: 0, + HEADER_RATE_LIMIT_REMAINING: requests_limit, + HEADER_RATE_LIMIT_STATUS: "error", + } + + def check_login_rate_limit( + self, client_ip: str, username: Optional[str] = None + ) -> Tuple[bool, Dict[str, int]]: + """ + Special rate limit for login endpoint. + + Args: + client_ip: Client IP address. + username: Username (optional). + + Returns: + Tuple of (allowed: bool, headers: Dict[str, int]). + """ + allowed, headers = self.check_rate_limit( + client_ip, LOGIN_RATE_LIMIT_REQUESTS, LOGIN_RATE_LIMIT_WINDOW_SECONDS + ) + + if username and allowed: + user_key = f"{LOGIN_LIMIT_PREFIX}{username}" + user_allowed, user_headers = self.check_rate_limit( + user_key, LOGIN_RATE_LIMIT_REQUESTS, LOGIN_RATE_LIMIT_WINDOW_SECONDS + ) + + if not user_allowed: + logger.warning( + "Login rate limit exceeded for user", + username=username, + client_ip=client_ip, + ) + return False, user_headers + + return allowed, headers + + def record_failed_login(self, client_ip: str, username: str) -> None: + """ + Record failed login attempt for brute-force attack tracking. + + Args: + client_ip: Client IP address. + username: Username. + """ + try: + current_time = int(time.time()) + + ip_key = f"{FAILED_LOGINS_IP_PREFIX}{client_ip}" + self.redis_client.zadd(ip_key, {current_time: current_time}) + self.redis_client.expire(ip_key, FAILED_LOGIN_RETENTION_SECONDS) + + user_key = f"{FAILED_LOGINS_USER_PREFIX}{username}" + self.redis_client.zadd(user_key, {current_time: current_time}) + self.redis_client.expire(user_key, FAILED_LOGIN_RETENTION_SECONDS) + + logger.debug("Failed login recorded", client_ip=client_ip, username=username) + + except Exception as e: + logger.error( + "Failed to record failed login attempt", + client_ip=client_ip, + username=username, + error=str(e), + ) + + def get_failed_login_count( + self, + client_ip: str, + username: Optional[str] = None, + window_minutes: int = DEFAULT_FAILED_LOGIN_WINDOW_MINUTES, + ) -> Dict[str, int]: + """ + Get count of failed login attempts. + + Args: + client_ip: Client IP address. + username: Username (optional). + window_minutes: Time window in minutes. + + Returns: + Dictionary with failed login counts. + """ + try: + current_time = int(time.time()) + window_start = current_time - (window_minutes * SECONDS_PER_MINUTE) + + result = {"ip_failed_count": 0, "user_failed_count": 0} + + ip_key = f"{FAILED_LOGINS_IP_PREFIX}{client_ip}" + ip_count = self.redis_client.zcount(ip_key, window_start, current_time) + result["ip_failed_count"] = ip_count + + if username: + user_key = f"{FAILED_LOGINS_USER_PREFIX}{username}" + user_count = self.redis_client.zcount(user_key, window_start, current_time) + result["user_failed_count"] = user_count + + return result + + except Exception as e: + logger.error( + "Failed to get failed login count", + client_ip=client_ip, + username=username, + error=str(e), + ) + return {"ip_failed_count": 0, "user_failed_count": 0} + + def clear_failed_logins( + self, client_ip: str, username: Optional[str] = None + ) -> None: + """ + Clear failed login attempt records. + + Args: + client_ip: Client IP address. + username: Username (optional). + """ + try: + ip_key = f"{FAILED_LOGINS_IP_PREFIX}{client_ip}" + self.redis_client.delete(ip_key) + + if username: + user_key = f"{FAILED_LOGINS_USER_PREFIX}{username}" + self.redis_client.delete(user_key) + + logger.debug( + "Failed login records cleared", client_ip=client_ip, username=username + ) + + except Exception as e: + logger.error( + "Failed to clear failed login records", + client_ip=client_ip, + username=username, + error=str(e), + ) + + def get_rate_limit_stats(self) -> Dict[str, Any]: + """ + Get rate limiting statistics. + + Returns: + Rate limiting statistics dictionary. + """ + try: + rate_limit_keys = self.redis_client.keys(f"{RATE_LIMIT_KEY_PREFIX}*") + failed_login_keys = self.redis_client.keys(f"{FAILED_LOGINS_IP_PREFIX}*") + + return { + "active_rate_limits": len(rate_limit_keys), + "failed_login_trackers": len(failed_login_keys), + "redis_memory_usage": ( + self.redis_client.memory_usage(f"{RATE_LIMIT_KEY_PREFIX}*") + + self.redis_client.memory_usage(f"{FAILED_LOGINS_IP_PREFIX}*") + ), + } + + except Exception as e: + logger.error("Failed to get rate limit stats", error=str(e)) + return { + "active_rate_limits": 0, + "failed_login_trackers": 0, + "redis_memory_usage": 0, + "error": str(e), + } + + +redis_rate_limiter = RedisRateLimiter() diff --git a/guacamole_test_11_26/api/core/redis_storage.py b/guacamole_test_11_26/api/core/redis_storage.py new file mode 100755 index 00000000..c486fc72 --- /dev/null +++ b/guacamole_test_11_26/api/core/redis_storage.py @@ -0,0 +1,266 @@ +""" +Redis Storage Helper for storing shared state in cluster. +""" + +import json +import os +from typing import Any, Dict, List, Optional + +import redis +import structlog + +logger = structlog.get_logger(__name__) + + +class RedisConnectionStorage: + """ + Redis storage for active connections. + + Supports cluster operation with automatic TTL. + """ + + def __init__(self) -> None: + """Initialize Redis connection storage.""" + self._redis_client = redis.Redis( + host=os.getenv("REDIS_HOST", "redis"), + port=int(os.getenv("REDIS_PORT", "6379")), + password=os.getenv("REDIS_PASSWORD"), + db=0, + decode_responses=True, + ) + + try: + self._redis_client.ping() + logger.info("Redis Connection Storage initialized successfully") + except Exception as e: + logger.error("Failed to connect to Redis for connections", error=str(e)) + raise RuntimeError(f"Redis connection failed: {e}") + + def add_connection( + self, + connection_id: str, + connection_data: Dict[str, Any], + ttl_seconds: Optional[int] = None, + ) -> None: + """ + Add connection to Redis. + + Args: + connection_id: Connection ID. + connection_data: Connection data dictionary. + ttl_seconds: TTL in seconds (None = no automatic expiration). + """ + try: + redis_key = f"connection:active:{connection_id}" + if ttl_seconds is not None: + self._redis_client.setex( + redis_key, ttl_seconds, json.dumps(connection_data) + ) + logger.debug( + "Connection added to Redis with TTL", + connection_id=connection_id, + ttl_seconds=ttl_seconds, + ) + else: + self._redis_client.set(redis_key, json.dumps(connection_data)) + logger.debug( + "Connection added to Redis without TTL", + connection_id=connection_id, + ) + except Exception as e: + logger.error( + "Failed to add connection to Redis", + connection_id=connection_id, + error=str(e), + ) + raise + + def get_connection(self, connection_id: str) -> Optional[Dict[str, Any]]: + """ + Get connection from Redis. + + Args: + connection_id: Connection ID. + + Returns: + Connection data dictionary or None if not found. + """ + try: + redis_key = f"connection:active:{connection_id}" + conn_json = self._redis_client.get(redis_key) + + if not conn_json: + return None + + return json.loads(conn_json) + except Exception as e: + logger.error( + "Failed to get connection from Redis", + connection_id=connection_id, + error=str(e), + ) + return None + + def update_connection( + self, connection_id: str, update_data: Dict[str, Any] + ) -> None: + """ + Update connection data. + + Args: + connection_id: Connection ID. + update_data: Data to update (will be merged with existing data). + """ + try: + redis_key = f"connection:active:{connection_id}" + + conn_json = self._redis_client.get(redis_key) + if not conn_json: + logger.warning( + "Cannot update non-existent connection", + connection_id=connection_id, + ) + return + + conn_data = json.loads(conn_json) + conn_data.update(update_data) + + ttl = self._redis_client.ttl(redis_key) + if ttl > 0: + self._redis_client.setex(redis_key, ttl, json.dumps(conn_data)) + logger.debug("Connection updated in Redis", connection_id=connection_id) + except Exception as e: + logger.error( + "Failed to update connection in Redis", + connection_id=connection_id, + error=str(e), + ) + + def delete_connection(self, connection_id: str) -> bool: + """ + Delete connection from Redis. + + Args: + connection_id: Connection ID. + + Returns: + True if connection was deleted, False otherwise. + """ + try: + redis_key = f"connection:active:{connection_id}" + result = self._redis_client.delete(redis_key) + + if result > 0: + logger.debug("Connection deleted from Redis", connection_id=connection_id) + return True + return False + except Exception as e: + logger.error( + "Failed to delete connection from Redis", + connection_id=connection_id, + error=str(e), + ) + return False + + def get_all_connections(self) -> Dict[str, Dict[str, Any]]: + """ + Get all active connections. + + Returns: + Dictionary mapping connection_id to connection_data. + """ + try: + pattern = "connection:active:*" + keys = list(self._redis_client.scan_iter(match=pattern, count=100)) + + connections = {} + for key in keys: + try: + conn_id = key.replace("connection:active:", "") + conn_json = self._redis_client.get(key) + if conn_json: + connections[conn_id] = json.loads(conn_json) + except Exception: + continue + + return connections + except Exception as e: + logger.error("Failed to get all connections from Redis", error=str(e)) + return {} + + def get_user_connections(self, username: str) -> List[Dict[str, Any]]: + """ + Get all user connections. + + Args: + username: Username. + + Returns: + List of user connections. + """ + try: + all_connections = self.get_all_connections() + user_connections = [ + conn_data + for conn_data in all_connections.values() + if conn_data.get("owner_username") == username + ] + + return user_connections + except Exception as e: + logger.error( + "Failed to get user connections from Redis", + username=username, + error=str(e), + ) + return [] + + def cleanup_expired_connections(self) -> int: + """ + Cleanup expired connections. + + Returns: + Number of removed connections. + """ + try: + pattern = "connection:active:*" + keys = list(self._redis_client.scan_iter(match=pattern, count=100)) + + cleaned_count = 0 + for key in keys: + ttl = self._redis_client.ttl(key) + if ttl == -2: + cleaned_count += 1 + elif ttl == -1: + self._redis_client.delete(key) + cleaned_count += 1 + + if cleaned_count > 0: + logger.info( + "Connections cleanup completed", cleaned_count=cleaned_count + ) + + return cleaned_count + except Exception as e: + logger.error("Failed to cleanup expired connections", error=str(e)) + return 0 + + def get_stats(self) -> Dict[str, Any]: + """ + Get connection statistics. + + Returns: + Connection statistics dictionary. + """ + try: + pattern = "connection:active:*" + keys = list(self._redis_client.scan_iter(match=pattern, count=100)) + + return {"total_connections": len(keys), "storage": "Redis"} + except Exception as e: + logger.error("Failed to get connection stats", error=str(e)) + return {"error": str(e), "storage": "Redis"} + + +redis_connection_storage = RedisConnectionStorage() + diff --git a/guacamole_test_11_26/api/core/replay_protection.py b/guacamole_test_11_26/api/core/replay_protection.py new file mode 100755 index 00000000..6de8d14e --- /dev/null +++ b/guacamole_test_11_26/api/core/replay_protection.py @@ -0,0 +1,172 @@ +""" +Module for nonce management and replay attack prevention. +""" + +import hashlib +import logging +import time + +import redis + +logger = logging.getLogger(__name__) + +# Constants +NONCE_TTL_SECONDS = 300 # 5 minutes TTL for nonce +TIMESTAMP_TOLERANCE_SECONDS = 30 # 30 seconds tolerance for timestamp + +class NonceManager: + """Nonce manager for replay attack prevention.""" + + def __init__(self, redis_client: redis.Redis) -> None: + """Initialize nonce manager. + + Args: + redis_client: Redis client instance. + """ + self.redis = redis_client + self.nonce_ttl = NONCE_TTL_SECONDS + self.timestamp_tolerance = TIMESTAMP_TOLERANCE_SECONDS + + def validate_nonce( + self, client_nonce: bytes, timestamp: int, session_id: str + ) -> bool: + """ + Validate nonce uniqueness and timestamp validity. + + Args: + client_nonce: Nonce from client. + timestamp: Timestamp from client. + session_id: Session ID. + + Returns: + True if nonce is valid, False otherwise. + """ + try: + if not self._validate_timestamp(timestamp): + logger.warning( + "Invalid timestamp", + extra={"timestamp": timestamp, "session_id": session_id}, + ) + return False + + nonce_key = self._create_nonce_key(client_nonce, session_id) + nonce_hash = hashlib.sha256(client_nonce).hexdigest()[:16] + + if self.redis.exists(nonce_key): + logger.warning( + "Nonce already used", + extra={"session_id": session_id, "nonce_hash": nonce_hash}, + ) + return False + + self.redis.setex(nonce_key, self.nonce_ttl, timestamp) + + logger.info( + "Nonce validated successfully", + extra={"session_id": session_id, "nonce_hash": nonce_hash}, + ) + + return True + + except Exception as e: + logger.error( + "Nonce validation error", + extra={"error": str(e), "session_id": session_id}, + ) + return False + + def _validate_timestamp(self, timestamp: int) -> bool: + """Validate timestamp. + + Args: + timestamp: Timestamp in milliseconds. + + Returns: + True if timestamp is within tolerance, False otherwise. + """ + current_time = int(time.time() * 1000) + time_diff = abs(current_time - timestamp) + return time_diff <= (self.timestamp_tolerance * 1000) + + def _create_nonce_key(self, client_nonce: bytes, session_id: str) -> str: + """Create unique key for nonce in Redis. + + Args: + client_nonce: Nonce from client. + session_id: Session ID. + + Returns: + Redis key string. + """ + nonce_hash = hashlib.sha256(client_nonce).hexdigest() + return f"nonce:{session_id}:{nonce_hash}" + + def cleanup_expired_nonces(self) -> int: + """ + Cleanup expired nonces. + + Redis automatically removes keys by TTL, but this method provides + additional cleanup for keys without TTL. + + Returns: + Number of expired nonces removed. + """ + try: + pattern = "nonce:*" + keys = self.redis.keys(pattern) + + expired_count = 0 + for key in keys: + ttl = self.redis.ttl(key) + if ttl == -1: + self.redis.delete(key) + expired_count += 1 + + logger.info( + "Nonce cleanup completed", + extra={"expired_count": expired_count, "total_keys": len(keys)}, + ) + + return expired_count + + except Exception as e: + logger.error("Nonce cleanup error", extra={"error": str(e)}) + return 0 + +class ReplayProtection: + """Replay attack protection.""" + + def __init__(self, redis_client: redis.Redis) -> None: + """Initialize replay protection. + + Args: + redis_client: Redis client instance. + """ + self.nonce_manager = NonceManager(redis_client) + + def validate_request( + self, client_nonce: bytes, timestamp: int, session_id: str + ) -> bool: + """ + Validate request for replay attacks. + + Args: + client_nonce: Nonce from client. + timestamp: Timestamp from client. + session_id: Session ID. + + Returns: + True if request is valid, False otherwise. + """ + return self.nonce_manager.validate_nonce( + client_nonce, timestamp, session_id + ) + + def cleanup(self) -> int: + """ + Cleanup expired nonces. + + Returns: + Number of expired nonces removed. + """ + return self.nonce_manager.cleanup_expired_nonces() diff --git a/guacamole_test_11_26/api/core/saved_machines_db.py b/guacamole_test_11_26/api/core/saved_machines_db.py new file mode 100755 index 00000000..121322f7 --- /dev/null +++ b/guacamole_test_11_26/api/core/saved_machines_db.py @@ -0,0 +1,401 @@ +""" +Database operations for saved user machines. +""" + +import os +from typing import Any, Dict, List, Optional + +import psycopg2 +import structlog +from psycopg2.extras import RealDictCursor +from psycopg2.extensions import connection as Connection + +logger = structlog.get_logger(__name__) + + +class SavedMachinesDB: + """PostgreSQL operations for saved machines.""" + + def __init__(self) -> None: + """Initialize database configuration.""" + self.db_config = { + "host": os.getenv("POSTGRES_HOST", "postgres"), + "port": int(os.getenv("POSTGRES_PORT", "5432")), + "database": os.getenv("POSTGRES_DB", "guacamole_db"), + "user": os.getenv("POSTGRES_USER", "guacamole_user"), + "password": os.getenv("POSTGRES_PASSWORD"), + } + + def _get_connection(self) -> Connection: + """Get database connection.""" + try: + return psycopg2.connect(**self.db_config) + except Exception as e: + logger.error("Failed to connect to database", error=str(e)) + raise + + def create_machine( + self, + user_id: str, + name: str, + hostname: str, + port: int, + protocol: str, + os: Optional[str] = None, + description: Optional[str] = None, + tags: Optional[List[str]] = None, + is_favorite: bool = False, + ) -> Dict[str, Any]: + """ + Create new saved machine. + + Args: + user_id: User ID. + name: Machine name. + hostname: Machine hostname. + port: Connection port. + protocol: Connection protocol. + os: Operating system (optional). + description: Description (optional). + tags: Tags list (optional). + is_favorite: Whether machine is favorite. + + Returns: + Dictionary with created machine data including ID. + """ + with self._get_connection() as conn: + try: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + query = """ + INSERT INTO api.user_saved_machines + (user_id, name, hostname, port, protocol, os, + description, tags, is_favorite) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) + RETURNING id, user_id, name, hostname, port, protocol, os, + description, tags, is_favorite, created_at, updated_at, + last_connected_at + """ + + cur.execute( + query, + ( + user_id, + name, + hostname, + port, + protocol, + os, + description, + tags or [], + is_favorite, + ), + ) + + result = dict(cur.fetchone()) + conn.commit() + + logger.info( + "Saved machine created", + machine_id=result["id"], + user_id=user_id, + name=name, + ) + + return result + + except Exception as e: + conn.rollback() + logger.error( + "Failed to create saved machine", error=str(e), user_id=user_id + ) + raise + + def get_user_machines( + self, user_id: str, include_stats: bool = False + ) -> List[Dict[str, Any]]: + """ + Get all user machines. + + Args: + user_id: User ID. + include_stats: Include connection statistics. + + Returns: + List of machine dictionaries. + """ + with self._get_connection() as conn: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + if include_stats: + query = """ + SELECT + m.*, + json_build_object( + 'total_connections', COALESCE(COUNT(h.id), 0), + 'last_connection', MAX(h.connected_at), + 'successful_connections', + COALESCE(SUM(CASE WHEN h.success = TRUE THEN 1 ELSE 0 END), 0), + 'failed_connections', + COALESCE(SUM(CASE WHEN h.success = FALSE THEN 1 ELSE 0 END), 0) + ) as connection_stats + FROM api.user_saved_machines m + LEFT JOIN api.connection_history h ON m.id = h.machine_id + WHERE m.user_id = %s + GROUP BY m.id + ORDER BY m.is_favorite DESC, m.updated_at DESC + """ + else: + query = """ + SELECT * FROM api.user_saved_machines + WHERE user_id = %s + ORDER BY is_favorite DESC, updated_at DESC + """ + + cur.execute(query, (user_id,)) + results = [dict(row) for row in cur.fetchall()] + + logger.debug( + "Retrieved user machines", user_id=user_id, count=len(results) + ) + + return results + + def get_machine_by_id( + self, machine_id: str, user_id: str + ) -> Optional[Dict[str, Any]]: + """ + Get machine by ID with owner verification. + + Args: + machine_id: Machine UUID. + user_id: User ID for permission check. + + Returns: + Machine dictionary or None if not found. + """ + with self._get_connection() as conn: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + query = """ + SELECT * FROM api.user_saved_machines + WHERE id = %s AND user_id = %s + """ + + cur.execute(query, (machine_id, user_id)) + result = cur.fetchone() + + return dict(result) if result else None + + def update_machine( + self, machine_id: str, user_id: str, **updates: Any + ) -> Optional[Dict[str, Any]]: + """ + Update machine. + + Args: + machine_id: Machine UUID. + user_id: User ID for permission check. + **updates: Fields to update. + + Returns: + Updated machine dictionary or None if not found. + """ + allowed_fields = { + "name", + "hostname", + "port", + "protocol", + "os", + "description", + "tags", + "is_favorite", + } + + updates_filtered = { + k: v for k, v in updates.items() if k in allowed_fields and v is not None + } + + if not updates_filtered: + return self.get_machine_by_id(machine_id, user_id) + + set_clause = ", ".join([f"{k} = %s" for k in updates_filtered.keys()]) + values = list(updates_filtered.values()) + [machine_id, user_id] + + query = f""" + UPDATE api.user_saved_machines + SET {set_clause} + WHERE id = %s AND user_id = %s + RETURNING id, user_id, name, hostname, port, protocol, os, + description, tags, is_favorite, created_at, updated_at, + last_connected_at + """ + + with self._get_connection() as conn: + try: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + cur.execute(query, values) + result = cur.fetchone() + conn.commit() + + if result: + logger.info( + "Saved machine updated", + machine_id=machine_id, + user_id=user_id, + updated_fields=list(updates_filtered.keys()), + ) + return dict(result) + + return None + + except Exception as e: + conn.rollback() + logger.error( + "Failed to update machine", + error=str(e), + machine_id=machine_id, + user_id=user_id, + ) + raise + + def delete_machine(self, machine_id: str, user_id: str) -> bool: + """ + Delete machine. + + Args: + machine_id: Machine UUID. + user_id: User ID for permission check. + + Returns: + True if deleted, False if not found. + """ + with self._get_connection() as conn: + try: + with conn.cursor() as cur: + query = """ + DELETE FROM api.user_saved_machines + WHERE id = %s AND user_id = %s + """ + + cur.execute(query, (machine_id, user_id)) + deleted_count = cur.rowcount + conn.commit() + + if deleted_count > 0: + logger.info( + "Saved machine deleted", + machine_id=machine_id, + user_id=user_id, + ) + return True + + logger.warning( + "Machine not found for deletion", + machine_id=machine_id, + user_id=user_id, + ) + return False + + except Exception as e: + conn.rollback() + logger.error( + "Failed to delete machine", + error=str(e), + machine_id=machine_id, + user_id=user_id, + ) + raise + + def update_last_connected(self, machine_id: str, user_id: str) -> None: + """ + Update last connection time. + + Args: + machine_id: Machine UUID. + user_id: User ID. + """ + with self._get_connection() as conn: + try: + with conn.cursor() as cur: + query = """ + UPDATE api.user_saved_machines + SET last_connected_at = NOW() + WHERE id = %s AND user_id = %s + """ + + cur.execute(query, (machine_id, user_id)) + conn.commit() + + logger.debug( + "Updated last_connected_at", + machine_id=machine_id, + user_id=user_id, + ) + + except Exception as e: + conn.rollback() + logger.error("Failed to update last_connected", error=str(e)) + + def add_connection_history( + self, + user_id: str, + machine_id: str, + success: bool = True, + error_message: Optional[str] = None, + duration_seconds: Optional[int] = None, + client_ip: Optional[str] = None, + ) -> str: + """ + Add connection history record. + + Args: + user_id: User ID. + machine_id: Machine ID. + success: Whether connection was successful. + error_message: Error message if failed (optional). + duration_seconds: Connection duration in seconds (optional). + client_ip: Client IP address (optional). + + Returns: + UUID of created record. + """ + with self._get_connection() as conn: + try: + with conn.cursor(cursor_factory=RealDictCursor) as cur: + query = """ + INSERT INTO api.connection_history + (user_id, machine_id, success, error_message, duration_seconds, client_ip) + VALUES (%s, %s, %s, %s, %s, %s) + RETURNING id + """ + + cur.execute( + query, + ( + user_id, + machine_id, + success, + error_message, + duration_seconds, + client_ip, + ), + ) + + result = cur.fetchone() + conn.commit() + + logger.info( + "Connection history record created", + machine_id=machine_id, + user_id=user_id, + success=success, + ) + + return str(result["id"]) + + except Exception as e: + conn.rollback() + logger.error("Failed to add connection history", error=str(e)) + raise + + +saved_machines_db = SavedMachinesDB() + diff --git a/guacamole_test_11_26/api/core/session_storage.py b/guacamole_test_11_26/api/core/session_storage.py new file mode 100755 index 00000000..4374eef9 --- /dev/null +++ b/guacamole_test_11_26/api/core/session_storage.py @@ -0,0 +1,339 @@ +""" +Redis-based session storage for Guacamole tokens. +""" + +import json +import os +import uuid +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, Optional + +import redis +import structlog + +logger = structlog.get_logger(__name__) + + +class SessionStorage: + """Redis-based session storage for secure Guacamole token storage.""" + + def __init__(self) -> None: + """Initialize Redis client and verify connection.""" + self.redis_client = redis.Redis( + host=os.getenv("REDIS_HOST", "localhost"), + port=int(os.getenv("REDIS_PORT", "6379")), + password=os.getenv("REDIS_PASSWORD"), + db=int(os.getenv("REDIS_DB", "0")), + decode_responses=True, + socket_connect_timeout=5, + socket_timeout=5, + retry_on_timeout=True, + ) + + try: + self.redis_client.ping() + logger.info("Redis connection established successfully") + except redis.ConnectionError as e: + logger.error("Failed to connect to Redis", error=str(e)) + raise + + def create_session( + self, + user_info: Dict[str, Any], + guac_token: str, + expires_in_minutes: int = 60, + ) -> str: + """ + Create new session. + + Args: + user_info: User information dictionary. + guac_token: Guacamole authentication token. + expires_in_minutes: Session lifetime in minutes. + + Returns: + Unique session ID. + """ + session_id = str(uuid.uuid4()) + now = datetime.now(timezone.utc) + + session_data = { + "session_id": session_id, + "user_info": user_info, + "guac_token": guac_token, + "created_at": now.isoformat(), + "expires_at": (now + timedelta(minutes=expires_in_minutes)).isoformat(), + "last_accessed": now.isoformat(), + } + + try: + ttl_seconds = expires_in_minutes * 60 + self.redis_client.setex( + f"session:{session_id}", + ttl_seconds, + json.dumps(session_data), + ) + + self.redis_client.setex( + f"user_session:{user_info['username']}", + ttl_seconds, + session_id, + ) + + logger.info( + "Session created successfully", + session_id=session_id, + username=user_info["username"], + expires_in_minutes=expires_in_minutes, + redis_key=f"session:{session_id}", + has_guac_token=bool(guac_token), + guac_token_length=len(guac_token) if guac_token else 0, + ) + + return session_id + + except redis.RedisError as e: + logger.error("Failed to create session", error=str(e)) + raise + + def get_session(self, session_id: str) -> Optional[Dict[str, Any]]: + """ + Get session data. + + Args: + session_id: Session ID. + + Returns: + Session data or None if not found/expired. + """ + try: + session_data = self.redis_client.get(f"session:{session_id}") + + if not session_data: + logger.debug("Session not found", session_id=session_id) + return None + + session = json.loads(session_data) + session["last_accessed"] = datetime.now(timezone.utc).isoformat() + + ttl = self.redis_client.ttl(f"session:{session_id}") + if ttl > 0: + self.redis_client.setex( + f"session:{session_id}", + ttl, + json.dumps(session), + ) + + logger.debug( + "Session retrieved successfully", + session_id=session_id, + username=session["user_info"]["username"], + ) + + return session + + except redis.RedisError as e: + logger.error("Failed to get session", session_id=session_id, error=str(e)) + return None + except json.JSONDecodeError as e: + logger.error( + "Failed to decode session data", session_id=session_id, error=str(e) + ) + return None + + def get_session_by_username(self, username: str) -> Optional[Dict[str, Any]]: + """ + Get session by username. + + Args: + username: Username. + + Returns: + Session data or None. + """ + try: + session_id = self.redis_client.get(f"user_session:{username}") + + if not session_id: + logger.debug("No active session for user", username=username) + return None + + return self.get_session(session_id) + + except redis.RedisError as e: + logger.error( + "Failed to get session by username", username=username, error=str(e) + ) + return None + + def update_session(self, session_id: str, updates: Dict[str, Any]) -> bool: + """ + Update session data. + + Args: + session_id: Session ID. + updates: Updates to apply. + + Returns: + True if update successful. + """ + try: + session_data = self.redis_client.get(f"session:{session_id}") + + if not session_data: + logger.warning("Session not found for update", session_id=session_id) + return False + + session = json.loads(session_data) + session.update(updates) + session["last_accessed"] = datetime.now(timezone.utc).isoformat() + + ttl = self.redis_client.ttl(f"session:{session_id}") + if ttl > 0: + self.redis_client.setex( + f"session:{session_id}", + ttl, + json.dumps(session), + ) + + logger.debug( + "Session updated successfully", + session_id=session_id, + updates=list(updates.keys()), + ) + return True + else: + logger.warning("Session expired during update", session_id=session_id) + return False + + except redis.RedisError as e: + logger.error("Failed to update session", session_id=session_id, error=str(e)) + return False + except json.JSONDecodeError as e: + logger.error( + "Failed to decode session data for update", + session_id=session_id, + error=str(e), + ) + return False + + def delete_session(self, session_id: str) -> bool: + """ + Delete session. + + Args: + session_id: Session ID. + + Returns: + True if deletion successful. + """ + try: + session_data = self.redis_client.get(f"session:{session_id}") + + if session_data: + session = json.loads(session_data) + username = session["user_info"]["username"] + + self.redis_client.delete(f"session:{session_id}") + self.redis_client.delete(f"user_session:{username}") + + logger.info( + "Session deleted successfully", + session_id=session_id, + username=username, + ) + return True + else: + logger.debug("Session not found for deletion", session_id=session_id) + return False + + except redis.RedisError as e: + logger.error("Failed to delete session", session_id=session_id, error=str(e)) + return False + except json.JSONDecodeError as e: + logger.error( + "Failed to decode session data for deletion", + session_id=session_id, + error=str(e), + ) + return False + + def delete_user_sessions(self, username: str) -> int: + """ + Delete all user sessions. + + Args: + username: Username. + + Returns: + Number of deleted sessions. + """ + try: + pattern = f"user_session:{username}" + session_keys = self.redis_client.keys(pattern) + + deleted_count = 0 + for key in session_keys: + session_id = self.redis_client.get(key) + if session_id and self.delete_session(session_id): + deleted_count += 1 + + logger.info( + "User sessions deleted", username=username, deleted_count=deleted_count + ) + + return deleted_count + + except redis.RedisError as e: + logger.error( + "Failed to delete user sessions", username=username, error=str(e) + ) + return 0 + + def cleanup_expired_sessions(self) -> int: + """ + Cleanup expired sessions. + + Redis automatically removes keys by TTL, so this method is mainly + for compatibility and potential logic extension. + + Returns: + Number of cleaned sessions (always 0, as Redis handles this automatically). + """ + logger.debug( + "Expired sessions cleanup completed (Redis TTL handles this automatically)" + ) + return 0 + + def get_session_stats(self) -> Dict[str, Any]: + """ + Get session statistics. + + Returns: + Session statistics dictionary. + """ + try: + session_keys = self.redis_client.keys("session:*") + user_keys = self.redis_client.keys("user_session:*") + + memory_usage = ( + self.redis_client.memory_usage("session:*") if session_keys else 0 + ) + + return { + "active_sessions": len(session_keys), + "active_users": len(user_keys), + "redis_memory_usage": memory_usage, + } + + except redis.RedisError as e: + logger.error("Failed to get session stats", error=str(e)) + return { + "active_sessions": 0, + "active_users": 0, + "redis_memory_usage": 0, + "error": str(e), + } + + +session_storage = SessionStorage() diff --git a/guacamole_test_11_26/api/core/signature_verifier.py b/guacamole_test_11_26/api/core/signature_verifier.py new file mode 100755 index 00000000..9cef5b14 --- /dev/null +++ b/guacamole_test_11_26/api/core/signature_verifier.py @@ -0,0 +1,154 @@ +"""Module for verifying server key signatures with constant-time comparison.""" + +import logging +from typing import Dict, Optional + +from cryptography.exceptions import InvalidSignature +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import ed25519 + +logger = logging.getLogger(__name__) + +# Ed25519 constants +ED25519_SIGNATURE_LENGTH = 64 +ED25519_PUBLIC_KEY_LENGTH = 32 +DEFAULT_KEY_ID = "default" + + +class SignatureVerifier: + """Signature verifier with constant-time comparison.""" + + def __init__(self) -> None: + """Initialize the signature verifier.""" + self.trusted_public_keys = self._load_trusted_keys() + + def _load_trusted_keys(self) -> Dict[str, Optional[ed25519.Ed25519PublicKey]]: + """ + Load trusted public keys. + + Returns: + Dictionary mapping key IDs to public keys. + """ + return {DEFAULT_KEY_ID: None} + + def verify_server_key_signature( + self, + public_key_pem: bytes, + signature: bytes, + kid: Optional[str] = None, + ) -> bool: + """ + Verify server public key signature with constant-time comparison. + + Args: + public_key_pem: PEM-encoded public key. + signature: Signature bytes. + kid: Key ID for key selection (optional). + + Returns: + True if signature is valid, False otherwise. + """ + try: + if len(signature) != ED25519_SIGNATURE_LENGTH: + logger.warning( + "Invalid signature length", + extra={ + "expected": ED25519_SIGNATURE_LENGTH, + "actual": len(signature), + "kid": kid, + }, + ) + return False + + try: + public_key = serialization.load_pem_public_key(public_key_pem) + except Exception as e: + logger.warning( + "Failed to load PEM public key", + extra={"error": str(e), "kid": kid}, + ) + return False + + if not isinstance(public_key, ed25519.Ed25519PublicKey): + logger.warning( + "Public key is not Ed25519", + extra={"kid": kid}, + ) + return False + + raw_public_key = public_key.public_bytes_raw() + if len(raw_public_key) != ED25519_PUBLIC_KEY_LENGTH: + logger.warning( + "Invalid public key length", + extra={ + "expected": ED25519_PUBLIC_KEY_LENGTH, + "actual": len(raw_public_key), + "kid": kid, + }, + ) + return False + + trusted_key = self._get_trusted_key(kid) + if not trusted_key: + logger.error("No trusted key found", extra={"kid": kid}) + return False + + try: + trusted_key.verify(signature, public_key_pem) + logger.info("Signature verification successful", extra={"kid": kid}) + return True + except InvalidSignature: + logger.warning("Signature verification failed", extra={"kid": kid}) + return False + + except Exception as e: + logger.error( + "Signature verification error", + extra={"error": str(e), "kid": kid}, + ) + return False + + def _get_trusted_key( + self, kid: Optional[str] = None + ) -> Optional[ed25519.Ed25519PublicKey]: + """ + Get trusted public key by kid. + + Args: + kid: Key ID (optional). + + Returns: + Trusted public key or None if not found. + """ + key_id = kid if kid else DEFAULT_KEY_ID + return self.trusted_public_keys.get(key_id) + + def add_trusted_key(self, kid: str, public_key_pem: bytes) -> bool: + """ + Add trusted public key. + + Args: + kid: Key ID. + public_key_pem: PEM-encoded public key. + + Returns: + True if key was added successfully. + """ + try: + public_key = serialization.load_pem_public_key(public_key_pem) + if not isinstance(public_key, ed25519.Ed25519PublicKey): + logger.error("Public key is not Ed25519", extra={"kid": kid}) + return False + + self.trusted_public_keys[kid] = public_key + logger.info("Trusted key added", extra={"kid": kid}) + return True + except Exception as e: + logger.error( + "Failed to add trusted key", + extra={"error": str(e), "kid": kid}, + ) + return False + + +signature_verifier = SignatureVerifier() diff --git a/guacamole_test_11_26/api/core/ssrf_protection.py b/guacamole_test_11_26/api/core/ssrf_protection.py new file mode 100755 index 00000000..2dcb7d30 --- /dev/null +++ b/guacamole_test_11_26/api/core/ssrf_protection.py @@ -0,0 +1,327 @@ +"""Enhanced SSRF attack protection with DNS pinning and rebinding prevention.""" + +# Standard library imports +import ipaddress +import socket +import time +from typing import Any, Dict, List, Optional, Set, Tuple + +# Third-party imports +import structlog + +logger = structlog.get_logger(__name__) + + +class SSRFProtection: + """Enhanced SSRF attack protection with DNS pinning.""" + + def __init__(self) -> None: + """Initialize SSRF protection with blocked IPs and networks.""" + self._dns_cache: Dict[str, Tuple[str, float, int]] = {} + self._dns_cache_ttl = 300 + + self._blocked_ips: Set[str] = { + "127.0.0.1", + "::1", + "0.0.0.0", + "169.254.169.254", + "10.0.0.1", + "10.255.255.255", + "172.16.0.1", + "172.31.255.255", + "192.168.0.1", + "192.168.255.255", + } + + self._blocked_networks = [ + "127.0.0.0/8", + "169.254.0.0/16", + "224.0.0.0/4", + "240.0.0.0/4", + "172.17.0.0/16", + "172.18.0.0/16", + "172.19.0.0/16", + "172.20.0.0/16", + "172.21.0.0/16", + "172.22.0.0/16", + "172.23.0.0/16", + "172.24.0.0/16", + "172.25.0.0/16", + "172.26.0.0/16", + "172.27.0.0/16", + "172.28.0.0/16", + "172.29.0.0/16", + "172.30.0.0/16", + "172.31.0.0/16", + ] + + self._allowed_networks: Dict[str, List[str]] = { + "USER": ["10.0.0.0/8", "172.16.0.0/16", "192.168.1.0/24"], + "ADMIN": [ + "10.0.0.0/8", + "172.16.0.0/16", + "192.168.0.0/16", + "203.0.113.0/24", + ], + "SUPER_ADMIN": ["0.0.0.0/0"], + } + + def validate_host( + self, hostname: str, user_role: str + ) -> Tuple[bool, str]: + """Validate host with enhanced SSRF protection. + + Args: + hostname: Hostname or IP address + user_role: User role + + Returns: + Tuple of (allowed: bool, reason: str) + """ + try: + if not hostname or len(hostname) > 253: + return False, f"Invalid hostname length: {hostname}" + + suspicious_chars = [ + "..", + "//", + "\\", + "<", + ">", + '"', + "'", + "`", + "\x00", + ] + if any(char in hostname for char in suspicious_chars): + return False, f"Suspicious characters in hostname: {hostname}" + + if hostname.lower() in ("localhost", "127.0.0.1", "::1"): + return False, f"Host {hostname} is blocked (localhost)" + + resolved_ip = self._resolve_hostname_with_pinning(hostname) + if not resolved_ip: + return False, f"Cannot resolve hostname: {hostname}" + + if resolved_ip in self._blocked_ips: + return False, f"IP {resolved_ip} is in blocked list" + + ip_addr = ipaddress.ip_address(resolved_ip) + for blocked_network in self._blocked_networks: + if ip_addr in ipaddress.ip_network(blocked_network): + return ( + False, + f"IP {resolved_ip} is in blocked network {blocked_network}", + ) + + allowed_networks = self._allowed_networks.get(user_role, []) + if not allowed_networks: + return False, f"Role {user_role} has no allowed networks" + + if user_role == "SUPER_ADMIN": + return True, f"IP {resolved_ip} allowed for SUPER_ADMIN" + + for allowed_network in allowed_networks: + if ip_addr in ipaddress.ip_network(allowed_network): + return ( + True, + f"IP {resolved_ip} allowed in network {allowed_network}", + ) + + return ( + False, + f"IP {resolved_ip} not in any allowed network for role {user_role}", + ) + + except Exception as e: + logger.error("SSRF validation error", hostname=hostname, error=str(e)) + return False, f"Error validating host: {str(e)}" + + def _resolve_hostname_with_pinning(self, hostname: str) -> Optional[str]: + """DNS resolution with pinning to prevent rebinding attacks. + + Args: + hostname: Hostname to resolve + + Returns: + IP address or None if resolution failed + """ + try: + cache_key = hostname.lower() + if cache_key in self._dns_cache: + cached_ip, timestamp, ttl = self._dns_cache[cache_key] + + if time.time() - timestamp < ttl: + logger.debug( + "Using cached DNS resolution", + hostname=hostname, + ip=cached_ip, + age_seconds=int(time.time() - timestamp), + ) + return cached_ip + del self._dns_cache[cache_key] + + original_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(5) + + try: + ip1 = socket.gethostbyname(hostname) + + time.sleep(0.1) + + ip2 = socket.gethostbyname(hostname) + + if ip1 != ip2: + logger.warning( + "DNS rebinding detected", + hostname=hostname, + ip1=ip1, + ip2=ip2, + ) + return None + + if ip1 in ("127.0.0.1", "::1"): + logger.warning( + "DNS resolution returned localhost", hostname=hostname, ip=ip1 + ) + return None + + self._dns_cache[cache_key] = ( + ip1, + time.time(), + self._dns_cache_ttl, + ) + + logger.info( + "DNS resolution successful", hostname=hostname, ip=ip1, cached=True + ) + + return ip1 + + finally: + socket.setdefaulttimeout(original_timeout) + + except socket.gaierror as e: + logger.warning("DNS resolution failed", hostname=hostname, error=str(e)) + return None + except socket.timeout: + logger.warning("DNS resolution timeout", hostname=hostname) + return None + except Exception as e: + logger.error( + "Unexpected DNS resolution error", hostname=hostname, error=str(e) + ) + return None + + def validate_port(self, port: int) -> Tuple[bool, str]: + """Validate port number. + + Args: + port: Port number + + Returns: + Tuple of (valid: bool, reason: str) + """ + if not isinstance(port, int) or port < 1 or port > 65535: + return False, f"Invalid port: {port}" + + blocked_ports = { + 22, + 23, + 25, + 53, + 80, + 110, + 143, + 443, + 993, + 995, + 135, + 139, + 445, + 1433, + 1521, + 3306, + 5432, + 6379, + 3389, + 5900, + 5901, + 5902, + 5903, + 5904, + 5905, + 8080, + 8443, + 9090, + 9091, + } + + if port in blocked_ports: + return False, f"Port {port} is blocked (system port)" + + return True, f"Port {port} is valid" + + def cleanup_expired_cache(self) -> None: + """Clean up expired DNS cache entries.""" + current_time = time.time() + expired_keys = [ + key + for key, (_, timestamp, ttl) in self._dns_cache.items() + if current_time - timestamp > ttl + ] + + for key in expired_keys: + del self._dns_cache[key] + + if expired_keys: + logger.info( + "Cleaned up expired DNS cache entries", count=len(expired_keys) + ) + + def get_cache_stats(self) -> Dict[str, Any]: + """Get DNS cache statistics. + + Returns: + Dictionary with cache statistics + """ + current_time = time.time() + active_entries = 0 + expired_entries = 0 + + for _, timestamp, ttl in self._dns_cache.values(): + if current_time - timestamp < ttl: + active_entries += 1 + else: + expired_entries += 1 + + return { + "total_entries": len(self._dns_cache), + "active_entries": active_entries, + "expired_entries": expired_entries, + "cache_ttl_seconds": self._dns_cache_ttl, + "blocked_ips_count": len(self._blocked_ips), + "blocked_networks_count": len(self._blocked_networks), + } + + def add_blocked_ip(self, ip: str) -> None: + """Add IP to blocked list. + + Args: + ip: IP address to block + """ + self._blocked_ips.add(ip) + logger.info("Added IP to blocked list", ip=ip) + + def remove_blocked_ip(self, ip: str) -> None: + """Remove IP from blocked list. + + Args: + ip: IP address to unblock + """ + self._blocked_ips.discard(ip) + logger.info("Removed IP from blocked list", ip=ip) + + +# Global instance for use in API +ssrf_protection = SSRFProtection() diff --git a/guacamole_test_11_26/api/core/token_blacklist.py b/guacamole_test_11_26/api/core/token_blacklist.py new file mode 100755 index 00000000..b37f5f87 --- /dev/null +++ b/guacamole_test_11_26/api/core/token_blacklist.py @@ -0,0 +1,263 @@ +"""Redis-based token blacklist for JWT token revocation.""" + +# Standard library imports +import hashlib +import json +import os +from datetime import datetime, timezone +from typing import Any, Dict, Optional + +# Third-party imports +import redis +import structlog + +# Local imports +from .session_storage import session_storage + +logger = structlog.get_logger(__name__) + +# Redis configuration constants +REDIS_SOCKET_TIMEOUT = 5 +REDIS_DEFAULT_HOST = "localhost" +REDIS_DEFAULT_PORT = "6379" +REDIS_DEFAULT_DB = "0" + +# Blacklist constants +BLACKLIST_KEY_PREFIX = "blacklist:" +TOKEN_HASH_PREVIEW_LENGTH = 16 +DEFAULT_REVOCATION_REASON = "logout" +DEFAULT_FORCE_LOGOUT_REASON = "force_logout" + + +class TokenBlacklist: + """Redis-based blacklist for JWT token revocation.""" + + def __init__(self) -> None: + """Initialize token blacklist with Redis connection.""" + self.redis_client = redis.Redis( + host=os.getenv("REDIS_HOST", REDIS_DEFAULT_HOST), + port=int(os.getenv("REDIS_PORT", REDIS_DEFAULT_PORT)), + password=os.getenv("REDIS_PASSWORD"), + db=int(os.getenv("REDIS_DB", REDIS_DEFAULT_DB)), + decode_responses=True, + socket_connect_timeout=REDIS_SOCKET_TIMEOUT, + socket_timeout=REDIS_SOCKET_TIMEOUT, + retry_on_timeout=True, + ) + + try: + self.redis_client.ping() + logger.info("Token blacklist Redis connection established") + except redis.ConnectionError as e: + logger.error( + "Failed to connect to Redis for token blacklist", error=str(e) + ) + raise + + def _get_token_hash(self, token: str) -> str: + """Get token hash for use as Redis key. + + Args: + token: JWT token + + Returns: + SHA-256 hash of token + """ + return hashlib.sha256(token.encode("utf-8")).hexdigest() + + def revoke_token( + self, + token: str, + reason: str = DEFAULT_REVOCATION_REASON, + revoked_by: Optional[str] = None, + ) -> bool: + """Revoke token (add to blacklist). + + Args: + token: JWT token to revoke + reason: Revocation reason + revoked_by: Username who revoked the token + + Returns: + True if token successfully revoked + """ + try: + from .utils import get_token_expiry_info + + expiry_info = get_token_expiry_info(token) + + if not expiry_info: + logger.warning( + "Cannot revoke token: invalid or expired", reason=reason + ) + return False + + token_hash = self._get_token_hash(token) + + now = datetime.now(timezone.utc) + blacklist_data = { + "token_hash": token_hash, + "reason": reason, + "revoked_at": now.isoformat(), + "revoked_by": revoked_by, + "expires_at": expiry_info["expires_at"], + "username": expiry_info.get("username"), + "token_type": expiry_info.get("token_type", "access"), + } + + expires_at = datetime.fromisoformat(expiry_info["expires_at"]) + if expires_at.tzinfo is None: + expires_at = expires_at.replace(tzinfo=timezone.utc) + ttl_seconds = int((expires_at - now).total_seconds()) + + if ttl_seconds <= 0: + logger.debug( + "Token already expired, no need to blacklist", + username=expiry_info.get("username"), + ) + return True + + self.redis_client.setex( + f"{BLACKLIST_KEY_PREFIX}{token_hash}", + ttl_seconds, + json.dumps(blacklist_data), + ) + + logger.info( + "Token revoked successfully", + token_hash=token_hash[:TOKEN_HASH_PREVIEW_LENGTH] + "...", + username=expiry_info.get("username"), + reason=reason, + revoked_by=revoked_by, + ttl_seconds=ttl_seconds, + ) + + return True + + except Exception as e: + logger.error("Failed to revoke token", error=str(e), reason=reason) + return False + + def is_token_revoked(self, token: str) -> bool: + """Check if token is revoked. + + Args: + token: JWT token to check + + Returns: + True if token is revoked + """ + try: + token_hash = self._get_token_hash(token) + blacklist_data = self.redis_client.get(f"{BLACKLIST_KEY_PREFIX}{token_hash}") + + if blacklist_data: + data = json.loads(blacklist_data) + logger.debug( + "Token is revoked", + token_hash=token_hash[:TOKEN_HASH_PREVIEW_LENGTH] + "...", + reason=data.get("reason"), + revoked_at=data.get("revoked_at"), + ) + return True + + return False + + except Exception as e: + logger.error( + "Failed to check token revocation status", error=str(e) + ) + return False + + def revoke_user_tokens( + self, + username: str, + reason: str = DEFAULT_FORCE_LOGOUT_REASON, + revoked_by: Optional[str] = None, + ) -> int: + """Revoke all user tokens. + + Args: + username: Username + reason: Revocation reason + revoked_by: Who revoked the tokens + + Returns: + Number of revoked tokens + """ + try: + session = session_storage.get_session_by_username(username) + + if not session: + logger.debug( + "No active session found for user", username=username + ) + return 0 + + session_storage.delete_user_sessions(username) + + logger.info( + "User tokens revoked", + username=username, + reason=reason, + revoked_by=revoked_by, + ) + + return 1 + + except Exception as e: + logger.error( + "Failed to revoke user tokens", username=username, error=str(e) + ) + return 0 + + def get_blacklist_stats(self) -> Dict[str, Any]: + """Get blacklist statistics. + + Returns: + Statistics about revoked tokens + """ + try: + blacklist_keys = self.redis_client.keys(f"{BLACKLIST_KEY_PREFIX}*") + + reasons_count: Dict[str, int] = {} + for key in blacklist_keys: + data = self.redis_client.get(key) + if data: + blacklist_data = json.loads(data) + reason = blacklist_data.get("reason", "unknown") + reasons_count[reason] = reasons_count.get(reason, 0) + 1 + + return { + "revoked_tokens": len(blacklist_keys), + "reasons": reasons_count, + "redis_memory_usage": ( + self.redis_client.memory_usage(f"{BLACKLIST_KEY_PREFIX}*") + if blacklist_keys + else 0 + ), + } + + except Exception as e: + logger.error("Failed to get blacklist stats", error=str(e)) + return { + "revoked_tokens": 0, + "reasons": {}, + "redis_memory_usage": 0, + "error": str(e), + } + + def cleanup_expired_blacklist(self) -> int: + """Clean up expired blacklist entries. + + Returns: + Number of cleaned entries (always 0, Redis handles this automatically) + """ + logger.debug( + "Expired blacklist cleanup completed (Redis TTL handles this automatically)" + ) + return 0 + + +# Global instance for use in API +token_blacklist = TokenBlacklist() diff --git a/guacamole_test_11_26/api/core/utils.py b/guacamole_test_11_26/api/core/utils.py new file mode 100755 index 00000000..4ed7bb65 --- /dev/null +++ b/guacamole_test_11_26/api/core/utils.py @@ -0,0 +1,362 @@ +"""Utilities for JWT token and session storage operations.""" + +# Standard library imports +import os +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, Optional + +# Third-party imports +import jwt +import structlog + +# Local imports +from .session_storage import session_storage +from .token_blacklist import token_blacklist + +logger = structlog.get_logger(__name__) + +# JWT configuration from environment variables +JWT_SECRET_KEY = os.getenv( + "JWT_SECRET_KEY", + "your_super_secret_jwt_key_minimum_32_characters_long", +) +JWT_ALGORITHM = os.getenv("JWT_ALGORITHM", "HS256") +JWT_ACCESS_TOKEN_EXPIRE_MINUTES = int( + os.getenv("JWT_ACCESS_TOKEN_EXPIRE_MINUTES", "60") +) +JWT_REFRESH_TOKEN_EXPIRE_DAYS = int( + os.getenv("JWT_REFRESH_TOKEN_EXPIRE_DAYS", "7") +) + + +def create_jwt_token( + user_info: Dict[str, Any], session_id: str, token_type: str = "access" +) -> str: + """Create JWT token with session_id instead of Guacamole token. + + Args: + user_info: User information dictionary + session_id: Session ID in Redis + token_type: Token type ("access" or "refresh") + + Returns: + JWT token as string + + Raises: + Exception: If token creation fails + """ + try: + if token_type == "refresh": + expire_delta = timedelta(days=JWT_REFRESH_TOKEN_EXPIRE_DAYS) + else: + expire_delta = timedelta( + minutes=JWT_ACCESS_TOKEN_EXPIRE_MINUTES + ) + + now = datetime.now(timezone.utc) + payload = { + "username": user_info["username"], + "role": user_info["role"], + "permissions": user_info.get("permissions", []), + "session_id": session_id, + "token_type": token_type, + "exp": now + expire_delta, + "iat": now, + "iss": "remote-access-api", + } + + optional_fields = [ + "full_name", + "email", + "organization", + "organizational_role", + ] + for field in optional_fields: + if field in user_info: + payload[field] = user_info[field] + + token = jwt.encode(payload, JWT_SECRET_KEY, algorithm=JWT_ALGORITHM) + + logger.info( + "JWT token created successfully", + username=user_info["username"], + token_type=token_type, + session_id=session_id, + expires_in_minutes=expire_delta.total_seconds() / 60, + payload_keys=list(payload.keys()), + token_prefix=token[:30] + "...", + ) + + return token + + except Exception as e: + logger.error( + "Failed to create JWT token", + username=user_info.get("username", "unknown"), + error=str(e), + ) + raise + + +def verify_jwt_token(token: str) -> Optional[Dict[str, Any]]: + """Verify and decode JWT token with blacklist check. + + Args: + token: JWT token to verify + + Returns: + Decoded payload or None if token is invalid + """ + try: + logger.debug("Starting JWT verification", token_prefix=token[:30] + "...") + + if token_blacklist.is_token_revoked(token): + logger.info("JWT token is revoked", token_prefix=token[:20] + "...") + return None + + logger.debug("Token not in blacklist, attempting decode") + + payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=[JWT_ALGORITHM]) + + logger.info( + "JWT decode successful", + username=payload.get("username"), + payload_keys=list(payload.keys()), + has_session_id="session_id" in payload, + session_id=payload.get("session_id", "NOT_FOUND"), + ) + + required_fields = ["username", "role", "session_id", "exp", "iat"] + for field in required_fields: + if field not in payload: + logger.warning( + "JWT token missing required field", + field=field, + username=payload.get("username", "unknown"), + available_fields=list(payload.keys()), + ) + return None + + logger.debug("All required fields present") + + exp_timestamp = payload["exp"] + current_timestamp = datetime.now(timezone.utc).timestamp() + if current_timestamp > exp_timestamp: + logger.info( + "JWT token expired", + username=payload["username"], + expired_at=datetime.fromtimestamp( + exp_timestamp, tz=timezone.utc + ).isoformat(), + current_time=datetime.now(timezone.utc).isoformat(), + ) + return None + + logger.debug( + "Token not expired, checking Redis session", + session_id=payload["session_id"], + ) + + session_data = session_storage.get_session(payload["session_id"]) + if not session_data: + logger.warning( + "Session not found for JWT token", + username=payload["username"], + session_id=payload["session_id"], + possible_reasons=[ + "session expired in Redis", + "session never created", + "Redis connection issue", + ], + ) + return None + + logger.debug( + "Session found in Redis", + username=payload["username"], + session_id=payload["session_id"], + session_keys=list(session_data.keys()), + ) + + if "guac_token" not in session_data: + logger.error( + "Session exists but missing guac_token", + username=payload["username"], + session_id=payload["session_id"], + session_keys=list(session_data.keys()), + ) + return None + + payload["guac_token"] = session_data["guac_token"] + + logger.info( + "JWT token verified successfully", + username=payload["username"], + role=payload["role"], + token_type=payload.get("token_type", "access"), + session_id=payload["session_id"], + guac_token_length=len(session_data["guac_token"]), + ) + + return payload + + except jwt.ExpiredSignatureError: + logger.info( + "JWT token expired (ExpiredSignatureError)", + token_prefix=token[:20] + "...", + ) + return None + except jwt.InvalidTokenError as e: + logger.warning( + "Invalid JWT token (InvalidTokenError)", + error=str(e), + error_type=type(e).__name__, + token_prefix=token[:20] + "...", + ) + return None + except Exception as e: + logger.error( + "Unexpected error verifying JWT token", + error=str(e), + error_type=type(e).__name__, + ) + return None + + +def create_refresh_token( + user_info: Dict[str, Any], session_id: str +) -> str: + """Create refresh token. + + Args: + user_info: User information dictionary + session_id: Session ID in Redis + + Returns: + Refresh token + """ + return create_jwt_token(user_info, session_id, token_type="refresh") + + +def extract_token_from_header( + authorization_header: Optional[str], +) -> Optional[str]: + """Extract token from Authorization header. + + Args: + authorization_header: Authorization header value + + Returns: + JWT token or None + """ + if not authorization_header: + return None + + if not authorization_header.startswith("Bearer "): + return None + + return authorization_header.split(" ", 1)[1] + + +def get_token_expiry_info(token: str) -> Optional[Dict[str, Any]]: + """Get token expiration information. + + Args: + token: JWT token + + Returns: + Expiration information or None + """ + try: + payload = jwt.decode(token, options={"verify_signature": False}) + + exp_timestamp = payload.get("exp") + iat_timestamp = payload.get("iat") + + if not exp_timestamp: + return None + + exp_datetime = datetime.fromtimestamp(exp_timestamp, tz=timezone.utc) + iat_datetime = ( + datetime.fromtimestamp(iat_timestamp, tz=timezone.utc) + if iat_timestamp + else None + ) + current_time = datetime.now(timezone.utc) + + return { + "expires_at": exp_datetime.isoformat(), + "issued_at": iat_datetime.isoformat() if iat_datetime else None, + "expires_in_seconds": max( + 0, int((exp_datetime - current_time).total_seconds()) + ), + "is_expired": current_time > exp_datetime, + "username": payload.get("username"), + "token_type": payload.get("token_type", "access"), + "session_id": payload.get("session_id"), + } + + except Exception as e: + logger.error("Failed to get token expiry info", error=str(e)) + return None + + +def is_token_expired(token: str) -> bool: + """Check if token is expired. + + Args: + token: JWT token + + Returns: + True if token is expired, False if valid + """ + expiry_info = get_token_expiry_info(token) + return expiry_info["is_expired"] if expiry_info else True + + +def revoke_token( + token: str, + reason: str = "logout", + revoked_by: Optional[str] = None, +) -> bool: + """Revoke token (add to blacklist). + + Args: + token: JWT token to revoke + reason: Revocation reason + revoked_by: Who revoked the token + + Returns: + True if token successfully revoked + """ + try: + return token_blacklist.revoke_token(token, reason, revoked_by) + except Exception as e: + logger.error("Failed to revoke token", error=str(e)) + return False + + +def revoke_user_tokens( + username: str, + reason: str = "force_logout", + revoked_by: Optional[str] = None, +) -> int: + """Revoke all user tokens. + + Args: + username: Username + reason: Revocation reason + revoked_by: Who revoked the tokens + + Returns: + Number of revoked tokens + """ + try: + return token_blacklist.revoke_user_tokens( + username, reason, revoked_by + ) + except Exception as e: + logger.error( + "Failed to revoke user tokens", username=username, error=str(e) + ) + return 0 \ No newline at end of file diff --git a/guacamole_test_11_26/api/core/websocket_manager.py b/guacamole_test_11_26/api/core/websocket_manager.py new file mode 100755 index 00000000..bd3363cb --- /dev/null +++ b/guacamole_test_11_26/api/core/websocket_manager.py @@ -0,0 +1,326 @@ +"""WebSocket Manager for real-time client notifications.""" + +# Standard library imports +import asyncio +from datetime import datetime, timezone +from typing import Any, Dict, List, Optional, Set + +# Third-party imports +from fastapi import WebSocket +import structlog + +logger = structlog.get_logger(__name__) + + +class WebSocketManager: + """WebSocket connection manager for sending notifications to clients. + + Supported events: + - connection_expired: Connection expired + - connection_deleted: Connection deleted manually + - connection_will_expire: Connection will expire soon (5 min warning) + - jwt_will_expire: JWT token will expire soon (5 min warning) + - jwt_expired: JWT token expired + - connection_extended: Connection TTL extended + """ + + def __init__(self) -> None: + """Initialize WebSocket manager.""" + self.active_connections: Dict[str, Set[WebSocket]] = {} + self._lock = asyncio.Lock() + + async def connect(self, websocket: WebSocket, username: str) -> None: + """Connect a new client. + + Args: + websocket: WebSocket connection (already accepted) + username: Username + """ + async with self._lock: + if username not in self.active_connections: + self.active_connections[username] = set() + self.active_connections[username].add(websocket) + + logger.info( + "WebSocket client connected", + username=username, + total_connections=len( + self.active_connections.get(username, set()) + ), + ) + + async def disconnect(self, websocket: WebSocket, username: str) -> None: + """Disconnect a client. + + Args: + websocket: WebSocket connection + username: Username + """ + async with self._lock: + if username in self.active_connections: + self.active_connections[username].discard(websocket) + + if not self.active_connections[username]: + del self.active_connections[username] + + logger.info( + "WebSocket client disconnected", + username=username, + remaining_connections=len( + self.active_connections.get(username, set()) + ), + ) + + async def send_to_user( + self, username: str, message: Dict[str, Any] + ) -> None: + """Send message to all WebSocket connections of a user. + + Args: + username: Username + message: Dictionary with data to send + """ + if username not in self.active_connections: + logger.debug( + "No active WebSocket connections for user", username=username + ) + return + + connections = self.active_connections[username].copy() + + disconnected = [] + for websocket in connections: + try: + await websocket.send_json(message) + logger.debug( + "Message sent via WebSocket", + username=username, + event_type=message.get("type"), + ) + except Exception as e: + logger.error( + "Failed to send WebSocket message", + username=username, + error=str(e), + ) + disconnected.append(websocket) + + if disconnected: + async with self._lock: + for ws in disconnected: + self.active_connections[username].discard(ws) + + if not self.active_connections[username]: + del self.active_connections[username] + + async def send_connection_expired( + self, + username: str, + connection_id: str, + hostname: str, + protocol: str, + ) -> None: + """Notify about connection expiration. + + Args: + username: Username + connection_id: Connection ID + hostname: Machine hostname + protocol: Connection protocol + """ + message = { + "type": "connection_expired", + "timestamp": datetime.now(timezone.utc).isoformat(), + "data": { + "connection_id": connection_id, + "hostname": hostname, + "protocol": protocol, + "reason": "TTL expired", + }, + } + await self.send_to_user(username, message) + + logger.info( + "Connection expired notification sent", + username=username, + connection_id=connection_id, + hostname=hostname, + ) + + async def send_connection_deleted( + self, + username: str, + connection_id: str, + hostname: str, + protocol: str, + reason: str = "manual", + ) -> None: + """Notify about connection deletion. + + Args: + username: Username + connection_id: Connection ID + hostname: Machine hostname + protocol: Connection protocol + reason: Deletion reason (manual, expired, error) + """ + message = { + "type": "connection_deleted", + "timestamp": datetime.now(timezone.utc).isoformat(), + "data": { + "connection_id": connection_id, + "hostname": hostname, + "protocol": protocol, + "reason": reason, + }, + } + await self.send_to_user(username, message) + + logger.info( + "Connection deleted notification sent", + username=username, + connection_id=connection_id, + reason=reason, + ) + + async def send_connection_will_expire( + self, + username: str, + connection_id: str, + hostname: str, + protocol: str, + minutes_remaining: int, + ) -> None: + """Warn about upcoming connection expiration. + + Args: + username: Username + connection_id: Connection ID + hostname: Machine hostname + protocol: Connection protocol + minutes_remaining: Minutes until expiration + """ + message = { + "type": "connection_will_expire", + "timestamp": datetime.now(timezone.utc).isoformat(), + "data": { + "connection_id": connection_id, + "hostname": hostname, + "protocol": protocol, + "minutes_remaining": minutes_remaining, + }, + } + await self.send_to_user(username, message) + + logger.info( + "Connection expiration warning sent", + username=username, + connection_id=connection_id, + minutes_remaining=minutes_remaining, + ) + + async def send_jwt_will_expire( + self, username: str, minutes_remaining: int + ) -> None: + """Warn about upcoming JWT token expiration. + + Args: + username: Username + minutes_remaining: Minutes until expiration + """ + message = { + "type": "jwt_will_expire", + "timestamp": datetime.now(timezone.utc).isoformat(), + "data": { + "minutes_remaining": minutes_remaining, + "action_required": "Please refresh your token or re-login", + }, + } + await self.send_to_user(username, message) + + logger.info( + "JWT expiration warning sent", + username=username, + minutes_remaining=minutes_remaining, + ) + + async def send_jwt_expired(self, username: str) -> None: + """Notify about JWT token expiration. + + Args: + username: Username + """ + message = { + "type": "jwt_expired", + "timestamp": datetime.now(timezone.utc).isoformat(), + "data": {"action_required": "Please re-login"}, + } + await self.send_to_user(username, message) + + logger.info("JWT expired notification sent", username=username) + + async def send_connection_extended( + self, + username: str, + connection_id: str, + hostname: str, + new_expires_at: datetime, + additional_minutes: int, + ) -> None: + """Notify about connection extension. + + Args: + username: Username + connection_id: Connection ID + hostname: Machine hostname + new_expires_at: New expiration time + additional_minutes: Minutes added + """ + message = { + "type": "connection_extended", + "timestamp": datetime.now(timezone.utc).isoformat(), + "data": { + "connection_id": connection_id, + "hostname": hostname, + "new_expires_at": new_expires_at.isoformat(), + "additional_minutes": additional_minutes, + }, + } + await self.send_to_user(username, message) + + logger.info( + "Connection extension notification sent", + username=username, + connection_id=connection_id, + additional_minutes=additional_minutes, + ) + + def get_active_users(self) -> List[str]: + """Get list of users with active WebSocket connections. + + Returns: + List of usernames + """ + return list(self.active_connections.keys()) + + def get_connection_count(self, username: Optional[str] = None) -> int: + """Get count of active WebSocket connections. + + Args: + username: Username (if None, returns total count) + + Returns: + Number of connections + """ + if username: + return len(self.active_connections.get(username, [])) + + return sum( + len(connections) + for connections in self.active_connections.values() + ) + + +# Singleton instance +websocket_manager = WebSocketManager() + diff --git a/guacamole_test_11_26/api/get_signing_key.py b/guacamole_test_11_26/api/get_signing_key.py new file mode 100755 index 00000000..56926ecd --- /dev/null +++ b/guacamole_test_11_26/api/get_signing_key.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python3 +"""Utility to retrieve Ed25519 signing public key for client configuration. + +This script outputs the public key in base64 format for adding to +SignatureVerificationService.ts on the client side. + +Usage: + python get_signing_key.py +""" + +# Standard library imports +import base64 +import os +import sys +from typing import Tuple + +# Third-party imports +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization + + +def get_signing_public_key() -> Tuple[str, str]: + """Read signing public key from file. + + Returns: + Tuple of (PEM format string, base64 encoded string). + + Raises: + SystemExit: If key file not found or failed to load. + """ + key_file = os.getenv( + "ED25519_SIGNING_KEY_PATH", "/app/secrets/ed25519_signing_key.pem" + ) + + if not os.path.exists(key_file): + print( + f"ERROR: Signing key file not found: {key_file}", + file=sys.stderr, + ) + print("", file=sys.stderr) + print("SOLUTION:", file=sys.stderr) + print( + "1. Start the API server first to generate the key:", + file=sys.stderr, + ) + print( + " docker-compose up remote_access_api", + file=sys.stderr, + ) + print( + "2. Or run this script inside the container:", + file=sys.stderr, + ) + print( + " docker-compose exec remote_access_api python get_signing_key.py", + file=sys.stderr, + ) + sys.exit(1) + + try: + with open(key_file, "rb") as f: + private_key_pem = f.read() + + private_key = serialization.load_pem_private_key( + private_key_pem, password=None, backend=default_backend() + ) + + public_key = private_key.public_key() + + public_key_pem = public_key.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) + + public_key_b64 = base64.b64encode(public_key_pem).decode("utf-8") + + return public_key_pem.decode("utf-8"), public_key_b64 + + except Exception as e: + print( + f"ERROR: Failed to load signing key: {e}", + file=sys.stderr, + ) + sys.exit(1) + + +def main() -> None: + """Main function to display signing public key.""" + print("=" * 80) + print("Ed25519 Signing Public Key for Client Configuration") + print("=" * 80) + print("") + + pem, base64_encoded = get_signing_public_key() + + print("PEM Format:") + print(pem) + + print("Base64 Encoded (for client configuration):") + print(base64_encoded) + print("") + + print("=" * 80) + print("How to use:") + print("=" * 80) + print("") + print("1. Copy the Base64 encoded key above") + print("") + print( + "2. Update MachineControlCenter/src/renderer/services/SignatureVerificationService.ts:" + ) + print("") + print(" const TRUSTED_SIGNING_KEYS: Record = {") + print(f" production: '{base64_encoded}',") + print(f" development: '{base64_encoded}',") + print(f" local: '{base64_encoded}'") + print(" };") + print("") + print("3. Rebuild the client application:") + print(" cd MachineControlCenter") + print(" npm run build") + print("") + print("=" * 80) + print("") + + +if __name__ == "__main__": + main() + diff --git a/guacamole_test_11_26/api/main.py b/guacamole_test_11_26/api/main.py new file mode 100755 index 00000000..f119bddc --- /dev/null +++ b/guacamole_test_11_26/api/main.py @@ -0,0 +1,2903 @@ +# Standard library imports +import asyncio +import base64 +import json +import logging +import os +import platform +import socket +import subprocess +import sys +import time +import uuid +from collections import defaultdict, deque +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, List, Optional + +# Third-party imports +import jwt +import psutil +import requests +import structlog +from cryptography.hazmat.primitives import serialization +from dotenv import load_dotenv +from fastapi import ( + BackgroundTasks, + Depends, + FastAPI, + HTTPException, + Request, + Response, + WebSocket, + WebSocketDisconnect, +) +from fastapi.middleware.cors import CORSMiddleware +from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials +from pydantic import BaseModel + +# Local imports +from core import ( + ConnectionRequest, + ConnectionResponse, + GuacamoleAuthenticator, + LoginRequest, + LoginResponse, + PermissionChecker, + UserInfo, + UserRole, + create_jwt_token, + verify_jwt_token, +) +from core.audit_logger import immutable_audit_logger +from core.brute_force_protection import brute_force_protection +from core.csrf_protection import csrf_protection +from core.log_sanitizer import log_sanitizer, sanitize_log_processor +from core.middleware import get_current_user, get_current_user_token, jwt_auth_middleware +from core.models import ( + BulkHealthCheckRequest, + BulkHealthCheckResponse, + BulkHealthCheckResult, + BulkSSHCommandRequest, + BulkSSHCommandResponse, + BulkSSHCommandResult, + ConnectionHistoryCreate, + ConnectionHistoryResponse, + SavedMachineCreate, + SavedMachineList, + SavedMachineResponse, + SavedMachineUpdate, + SSHCredentials, +) +from core.rate_limiter import redis_rate_limiter +from core.redis_storage import redis_connection_storage +from core.saved_machines_db import saved_machines_db +from core.session_storage import session_storage +from core.ssrf_protection import ssrf_protection +from core.token_blacklist import token_blacklist +from core.websocket_manager import websocket_manager +from security_config import SecurityConfig +from services.system_service import SystemService +from routers import bulk_router + +load_dotenv() + +enable_docs = os.getenv("ENABLE_DOCS", "true").lower() == "true" + +tags_metadata = [ + { + "name": "System", + "description": "System health checks and service status" + }, + { + "name": "Authentication", + "description": "User authentication and authorization (login, logout, profile, permissions)" + }, + { + "name": "Connections", + "description": "Remote desktop connection management (create, list, delete, extend TTL)" + }, + { + "name": "Machines", + "description": "Machine management and saved machines CRUD operations" + }, + { + "name": "Bulk Operations", + "description": "Bulk operations on multiple machines (health checks, SSH commands)" + } +] + +app = FastAPI( + title="Remote Access API", + description="Remote desktop management API via Apache Guacamole. Supports RDP, VNC, SSH protocols with JWT authentication.", + version="1.0.0", + docs_url="/api/docs" if enable_docs else None, + redoc_url="/api/redoc" if enable_docs else None, + openapi_url="/api/openapi.json" if enable_docs else None, + openapi_tags=tags_metadata +) + +app.include_router(bulk_router) + +security = HTTPBearer() + +# Structured logging configuration +LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO").upper() +LOG_FORMAT = os.getenv("LOG_FORMAT", "json") + +def add_caller_info( + logger: Any, name: str, event_dict: Dict[str, Any] +) -> Dict[str, Any]: + """Add caller information to log entry.""" + frame = sys._getframe() + try: + while frame and frame.f_back: + filename = frame.f_code.co_filename + if "structlog" not in filename and "logging" not in filename: + event_dict["caller"] = { + "file": filename.split("/")[-1], + "function": frame.f_code.co_name, + "line": frame.f_lineno, + } + break + frame = frame.f_back + finally: + del frame + return event_dict + + +def add_service_context( + logger: Any, name: str, event_dict: Dict[str, Any] +) -> Dict[str, Any]: + """Add service context to log entry.""" + event_dict["service"] = "remote-access-api" + event_dict["version"] = "1.0.0" + return event_dict + +json_processors = [ + structlog.processors.TimeStamper(fmt="iso"), + add_service_context, + add_caller_info, + sanitize_log_processor, + structlog.processors.add_log_level, + structlog.processors.JSONRenderer() +] + +text_processors = [ + structlog.processors.TimeStamper(fmt="iso"), + add_service_context, + sanitize_log_processor, + structlog.processors.add_log_level, + structlog.dev.ConsoleRenderer(colors=True) +] + +structlog.configure( + processors=json_processors if LOG_FORMAT == "json" else text_processors, + wrapper_class=structlog.stdlib.BoundLogger, + logger_factory=structlog.stdlib.LoggerFactory(), + cache_logger_on_first_use=True, +) + +logging.basicConfig( + level=getattr(logging, LOG_LEVEL, logging.INFO), + format='%(message)s' +) + +logger = structlog.get_logger() +security_logger = structlog.get_logger("security") +audit_logger = structlog.get_logger("audit") +performance_logger = structlog.get_logger("performance") +error_logger = structlog.get_logger("error") + + +def record_request_metric( + endpoint: str, + method: str, + status_code: int, + response_time_ms: float, + client_ip: str, +) -> None: + """Record request metrics.""" + metrics_storage["requests"]["total"] += 1 + metrics_storage["requests"]["by_endpoint"][endpoint] += 1 + metrics_storage["requests"]["by_method"][method] += 1 + metrics_storage["requests"]["by_status"][status_code] += 1 + metrics_storage["requests"]["by_ip"][client_ip] += 1 + + times = metrics_storage["performance"]["response_times"][endpoint] + times.append(response_time_ms) + if len(times) > 100: + times.pop(0) + +def record_connection_metric( + protocol: str, + client_ip: str, + creation_time_ms: float, + success: bool = True, +) -> None: + """Record connection creation metrics.""" + if success: + metrics_storage["connections"]["total_created"] += 1 + metrics_storage["connections"]["by_protocol"][protocol] += 1 + metrics_storage["connections"]["by_ip"][client_ip] += 1 + metrics_storage["connections"]["active_count"] = len( + redis_connection_storage.get_all_connections() + ) + + times = metrics_storage["performance"]["connection_creation_times"] + times.append(creation_time_ms) + if len(times) > 100: + times.pop(0) + else: + metrics_storage["errors"]["connection_failures"] += 1 + +def record_host_check_metric(check_time_ms: float, success: bool = True) -> None: + """Record host check metrics.""" + if success: + times = metrics_storage["performance"]["host_check_times"] + times.append(check_time_ms) + if len(times) > 100: + times.pop(0) + else: + metrics_storage["connections"]["failed_host_checks"] += 1 + metrics_storage["errors"]["host_unreachable"] += 1 + + +def record_error_metric(error_type: str) -> None: + """Record error metrics.""" + if error_type in metrics_storage["errors"]: + metrics_storage["errors"][error_type] += 1 + +def calculate_percentiles(values: List[float]) -> Dict[str, float]: + """Calculate percentiles for value list""" + if not values: + return {"p50": 0, "p90": 0, "p95": 0, "p99": 0} + + sorted_values = sorted(values) + n = len(sorted_values) + + return { + "p50": sorted_values[int(n * 0.5)], + "p90": sorted_values[int(n * 0.9)], + "p95": sorted_values[int(n * 0.95)], + "p99": sorted_values[int(n * 0.99)] + } + +def get_metrics_summary() -> Dict[str, Any]: + """Get metrics summary.""" + uptime_seconds = int((datetime.now() - service_start_time).total_seconds()) + + endpoint_stats = { + endpoint: { + "request_count": len(times), + "avg_response_time_ms": round(sum(times) / len(times), 2), + "percentiles": calculate_percentiles(times), + } + for endpoint, times in metrics_storage["performance"]["response_times"].items() + if times + } + + connection_times = metrics_storage["performance"]["connection_creation_times"] + connection_stats = ( + { + "avg_creation_time_ms": round(sum(connection_times) / len(connection_times), 2), + "percentiles": calculate_percentiles(connection_times), + } + if connection_times + else {} + ) + + host_check_times = metrics_storage["performance"]["host_check_times"] + host_check_stats = ( + { + "avg_check_time_ms": round(sum(host_check_times) / len(host_check_times), 2), + "percentiles": calculate_percentiles(host_check_times), + } + if host_check_times + else {} + ) + + top_ips = dict( + sorted( + metrics_storage["requests"]["by_ip"].items(), + key=lambda x: x[1], + reverse=True, + )[:10] + ) + + top_protocols = dict( + sorted( + metrics_storage["connections"]["by_protocol"].items(), + key=lambda x: x[1], + reverse=True, + ) + ) + + return { + "uptime_seconds": uptime_seconds, + "requests": { + "total": metrics_storage["requests"]["total"], + "requests_per_second": round( + metrics_storage["requests"]["total"] / max(uptime_seconds, 1), 2 + ), + "by_status": dict(metrics_storage["requests"]["by_status"]), + "by_method": dict(metrics_storage["requests"]["by_method"]), + "top_ips": top_ips, + }, + "connections": { + "total_created": metrics_storage["connections"]["total_created"], + "currently_active": len(redis_connection_storage.get_all_connections()), + "by_protocol": top_protocols, + "failed_host_checks": metrics_storage["connections"]["failed_host_checks"], + }, + "performance": { + "endpoints": endpoint_stats, + "connection_creation": connection_stats, + "host_checks": host_check_stats, + }, + "errors": dict(metrics_storage["errors"]), + } + +def log_security_event( + event_type: str, + client_ip: str, + user_agent: Optional[str] = None, + details: Optional[Dict[str, Any]] = None, + severity: str = "info", + username: Optional[str] = None, +) -> None: + """Log security events to immutable audit log.""" + immutable_audit_logger.log_security_event( + event_type=event_type, + client_ip=client_ip, + user_agent=user_agent, + details=details, + severity=severity, + username=username, + ) + + event = { + "event_type": event_type, + "client_ip": client_ip, + "user_agent": user_agent or "unknown", + "severity": severity, + "timestamp": datetime.now().isoformat(), + } + + if details: + event.update(details) + + severity_loggers = { + "critical": security_logger.critical, + "high": security_logger.error, + "medium": security_logger.warning, + } + logger_func = severity_loggers.get(severity, security_logger.info) + logger_func("Security event", **event) + +def log_audit_event( + action: str, + resource: str, + client_ip: str, + user_agent: Optional[str] = None, + result: str = "success", + details: Optional[Dict[str, Any]] = None, + username: Optional[str] = None, +) -> None: + """Log audit events to immutable audit log.""" + immutable_audit_logger.log_audit_event( + action=action, + resource=resource, + client_ip=client_ip, + user_agent=user_agent, + result=result, + details=details, + username=username, + ) + + event = { + "action": action, + "resource": resource, + "client_ip": client_ip, + "user_agent": user_agent or "unknown", + "result": result, + "timestamp": datetime.now().isoformat(), + } + + if details: + event.update(details) + + if result == "failure": + audit_logger.warning("Audit event", **event) + else: + audit_logger.info("Audit event", **event) + +def log_performance_event( + operation: str, + duration_ms: float, + details: Optional[Dict[str, Any]] = None, +) -> None: + """Log performance events.""" + event = { + "operation": operation, + "duration_ms": round(duration_ms, 2), + "timestamp": datetime.now().isoformat(), + } + + if details: + event.update(details) + + if duration_ms > 5000: + performance_logger.warning("Slow operation", **event) + elif duration_ms > 1000: + performance_logger.info("Performance event", **event) + else: + performance_logger.debug("Performance event", **event) + +def log_connection_lifecycle( + connection_id: str, + action: str, + client_ip: str, + hostname: str, + protocol: str, + details: Optional[Dict[str, Any]] = None, +) -> None: + """Log connection lifecycle events.""" + event = { + "connection_id": connection_id, + "action": action, + "client_ip": client_ip, + "hostname": hostname, + "protocol": protocol, + "timestamp": datetime.now().isoformat(), + } + + if details: + event.update(details) + + if action == "failed": + audit_logger.error("Connection lifecycle", **event) + else: + audit_logger.info("Connection lifecycle", **event) + +def log_error_with_context( + error: Exception, + operation: str, + context: Optional[Dict[str, Any]] = None, +) -> None: + """Log errors with context.""" + event = { + "operation": operation, + "error_type": type(error).__name__, + "error_message": str(error), + "timestamp": datetime.now().isoformat(), + } + + if context: + event.update(context) + + error_logger.error("Application error", **event) + + + +# CORS middleware - configured via .env +allowed_origins_str = os.getenv("ALLOWED_ORIGINS") + +if not allowed_origins_str: + logger.error("ALLOWED_ORIGINS environment variable is not set!") + raise RuntimeError( + "ALLOWED_ORIGINS must be set in .env file or docker-compose.yml. " + "Example: ALLOWED_ORIGINS=https://mc.exbytestudios.com,http://localhost:5173" + ) + +allowed_origins = [origin.strip() for origin in allowed_origins_str.split(",") if origin.strip()] + +if not allowed_origins: + logger.error("ALLOWED_ORIGINS is empty after parsing!") + raise RuntimeError("ALLOWED_ORIGINS must contain at least one valid origin") + +logger.info("CORS configured", allowed_origins=allowed_origins) + +app.add_middleware( + CORSMiddleware, + allow_origins=allowed_origins, + allow_credentials=True, + allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS", "PATCH"], + allow_headers=["*"], +) + +# Electron desktop app CORS middleware +@app.middleware("http") +async def electron_cors_middleware( + request: Request, call_next: Any +) -> Response: + """Handle CORS for Electron desktop app (missing/custom Origin headers).""" + origin = request.headers.get("origin") + response = await call_next(request) + + cors_headers = { + "Access-Control-Allow-Credentials": "true", + "Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS, PATCH", + "Access-Control-Allow-Headers": "*", + } + + if not origin or origin == "null": + logger.debug( + "Request without Origin header (Electron or API client)", + path=request.url.path, + method=request.method, + user_agent=request.headers.get("user-agent", "unknown")[:50], + ) + response.headers["Access-Control-Allow-Origin"] = "*" + response.headers.update(cors_headers) + return response + + if origin.startswith(("file://", "app://")): + logger.debug( + "Request from Electron with custom protocol", + origin=origin, + path=request.url.path, + method=request.method, + ) + response.headers["Access-Control-Allow-Origin"] = origin + response.headers.update(cors_headers) + return response + + return response + +@app.middleware("http") +async def auth_middleware(request: Request, call_next: Any) -> Response: + """JWT authentication middleware.""" + return await jwt_auth_middleware(request, call_next) + +@app.middleware("http") +async def logging_and_metrics_middleware( + request: Request, call_next: Any +) -> Response: + """Request logging and metrics collection middleware.""" + start_time = time.time() + client_ip = request.client.host if request.client else "unknown" + user_agent = request.headers.get("user-agent", "unknown") + request_id = str(uuid.uuid4())[:8] + logger.debug( + "Request started", + request_id=request_id, + method=request.method, + path=request.url.path, + query=str(request.query_params) if request.query_params else None, + client_ip=client_ip, + user_agent=user_agent, + ) + + try: + response = await call_next(request) + response_time_ms = (time.time() - start_time) * 1000 + + record_request_metric( + endpoint=request.url.path, + method=request.method, + status_code=response.status_code, + response_time_ms=response_time_ms, + client_ip=client_ip, + ) + + log_level = logging.WARNING if response.status_code >= 400 else logging.INFO + logger.log( + log_level, + "Request completed", + request_id=request_id, + method=request.method, + path=request.url.path, + status_code=response.status_code, + response_time_ms=round(response_time_ms, 2), + client_ip=client_ip, + ) + + if response_time_ms > 1000: + log_performance_event( + operation=f"{request.method} {request.url.path}", + duration_ms=response_time_ms, + details={ + "request_id": request_id, + "client_ip": client_ip, + "status_code": response.status_code, + }, + ) + + response.headers["X-Request-ID"] = request_id + return response + + except Exception as e: + response_time_ms = (time.time() - start_time) * 1000 + + log_error_with_context( + error=e, + operation=f"{request.method} {request.url.path}", + context={ + "request_id": request_id, + "client_ip": client_ip, + "user_agent": user_agent, + "response_time_ms": round(response_time_ms, 2), + }, + ) + + raise + +@app.middleware("http") +async def csrf_middleware(request: Request, call_next: Any) -> Response: + """CSRF protection middleware.""" + if not csrf_protection.should_protect_endpoint( + request.method, request.url.path + ): + return await call_next(request) + + try: + user_info = None + user_id = None + + auth_header = request.headers.get("Authorization") + if auth_header and auth_header.startswith("Bearer "): + token = auth_header.split(" ")[1] + try: + jwt_payload = verify_jwt_token(token) + if jwt_payload: + user_info = jwt_payload + user_id = jwt_payload.get("username") + except Exception as e: + logger.debug( + "JWT token validation failed in CSRF middleware", + error=str(e), + ) + + if not user_info or not user_id: + return await call_next(request) + + if auth_header and auth_header.startswith("Bearer "): + logger.debug( + "JWT authentication detected, skipping CSRF validation", + user_id=user_id, + method=request.method, + path=request.url.path, + ) + return await call_next(request) + + csrf_token = request.headers.get("X-CSRF-Token") + + if not csrf_token: + logger.warning( + "CSRF token missing for non-JWT auth", + user_id=user_id, + method=request.method, + path=request.url.path, + ) + + return Response( + content=json.dumps({ + "error": "CSRF token required", + "message": "X-CSRF-Token header is required for cookie-based authentication", + }), + status_code=403, + media_type="application/json", + ) + + if not csrf_protection.validate_csrf_token(csrf_token, user_id): + logger.warning( + "CSRF token validation failed", + user_id=user_id, + method=request.method, + path=request.url.path, + token_preview=csrf_token[:16] + "...", + ) + + return Response( + content=json.dumps({ + "error": "Invalid CSRF token", + "message": "CSRF token validation failed", + }), + status_code=403, + media_type="application/json", + ) + + response = await call_next(request) + new_csrf_token = csrf_protection.generate_csrf_token(user_id) + response.headers["X-CSRF-Token"] = new_csrf_token + + return response + + except Exception as e: + logger.error("CSRF middleware error", error=str(e)) + return Response( + content=json.dumps({ + "error": "CSRF protection error", + "message": "Internal server error", + }), + status_code=500, + media_type="application/json", + ) + +@app.middleware("http") +async def rate_limit_middleware(request: Request, call_next: Any) -> Response: + """Rate limiting middleware with Redis.""" + excluded_paths = { + "/health", + "/health/detailed", + "/health/ready", + "/health/live", + "/", + "/docs", + "/openapi.json", + "/rate-limit/status", + "/metrics", + "/stats", + } + if request.url.path in excluded_paths: + return await call_next(request) + + client_ip = request.client.host if request.client else "unknown" + + allowed, headers = redis_rate_limiter.check_rate_limit( + client_ip=client_ip, + requests_limit=RATE_LIMIT_REQUESTS, + window_seconds=RATE_LIMIT_WINDOW, + ) + + if not allowed: + user_agent = request.headers.get("user-agent", "unknown") + + log_security_event( + event_type="rate_limit_exceeded", + client_ip=client_ip, + user_agent=user_agent, + details={ + "path": request.url.path, + "method": request.method, + "limit": RATE_LIMIT_REQUESTS, + "window_seconds": RATE_LIMIT_WINDOW, + }, + severity="medium", + ) + + record_error_metric("rate_limit_blocks") + + response = Response( + content=json.dumps({ + "error": "Rate limit exceeded", + "message": f"Too many requests. Limit: {RATE_LIMIT_REQUESTS} per {RATE_LIMIT_WINDOW} seconds", + "retry_after": headers.get( + "X-RateLimit-Reset", int(time.time() + RATE_LIMIT_WINDOW) + ), + }), + status_code=429, + media_type="application/json", + ) + else: + response = await call_next(request) + + for header_name, header_value in headers.items(): + response.headers[header_name] = str(header_value) + + return response + +# Guacamole configuration +GUACAMOLE_URL = os.getenv("GUACAMOLE_URL", "http://localhost:8080") +GUACAMOLE_PUBLIC_URL = os.getenv("GUACAMOLE_PUBLIC_URL", GUACAMOLE_URL) + +guacamole_authenticator = GuacamoleAuthenticator() + +# Rate limiting configuration +RATE_LIMIT_REQUESTS = int(os.getenv("RATE_LIMIT_REQUESTS", "10")) +RATE_LIMIT_WINDOW = int(os.getenv("RATE_LIMIT_WINDOW", "60")) +RATE_LIMIT_ENABLED = os.getenv("RATE_LIMIT_ENABLED", "true").lower() == "true" + + +service_start_time = datetime.now() + +# Metrics storage +metrics_storage = { + "requests": { + "total": 0, + "by_endpoint": defaultdict(int), + "by_method": defaultdict(int), + "by_status": defaultdict(int), + "by_ip": defaultdict(int), + }, + "connections": { + "total_created": 0, + "by_protocol": defaultdict(int), + "by_ip": defaultdict(int), + "active_count": 0, + "failed_host_checks": 0, + }, + "performance": { + "response_times": defaultdict(list), + "connection_creation_times": [], + "host_check_times": [], + }, + "errors": { + "rate_limit_blocks": 0, + "connection_failures": 0, + "host_unreachable": 0, + "authentication_failures": 0, + }, +} + + +def decrypt_password_from_request( + encrypted_password: str, + request: Request, + context: Optional[Dict[str, Any]] = None, +) -> str: + """Return password as-is (protected by HTTPS). + + Args: + encrypted_password: Password string from client + request: FastAPI request object + context: Optional context dict for logging + + Returns: + Password as provided by client + """ + return encrypted_password + + +def generate_connection_url(connection_id: str, guacamole_token: str) -> str: + """Generate Guacamole connection URL. + + Args: + connection_id: Guacamole connection ID + guacamole_token: Guacamole auth token (NOT JWT) + + Returns: + Full URL for Guacamole client connection + """ + encoded_connection_id = base64.b64encode( + f"{connection_id}\0c\0postgresql".encode() + ).decode() + + connection_url = ( + f"{GUACAMOLE_PUBLIC_URL}/guacamole/?token={guacamole_token}" + f"#/client/{encoded_connection_id}" + ) + + logger.debug( + "Connection URL generated", + connection_id=connection_id, + encoded_connection_id=encoded_connection_id, + url_length=len(connection_url), + ) + + return connection_url + +class GuacamoleClient: + """Client for interacting with Guacamole API.""" + + def __init__(self) -> None: + """Initialize Guacamole client.""" + self.base_url = GUACAMOLE_URL + self.session = requests.Session() + self.authenticator = guacamole_authenticator + + def get_system_token(self) -> str: + """Get system token for service operations.""" + return self.authenticator.get_system_token() + + def create_connection_with_user_token( + self, connection_request: ConnectionRequest, guacamole_token: str + ) -> Dict[str, Any]: + """Create new Guacamole connection with Guacamole token. + + Note: guacamole_token is GUACAMOLE auth token, NOT JWT + """ + if not connection_request.port: + port_map = {"rdp": 3389, "vnc": 5900, "ssh": 22} + connection_request.port = port_map.get( + connection_request.protocol, 3389 + ) + + original_hostname = connection_request.hostname + resolved_ip = original_hostname + + try: + resolved_info = socket.getaddrinfo( + original_hostname, None, socket.AF_INET + ) + if resolved_info: + resolved_ip = resolved_info[0][4][0] + logger.info( + "Hostname resolved for Guacamole connection", + original_hostname=original_hostname, + resolved_ip=resolved_ip, + protocol=connection_request.protocol, + port=connection_request.port, + ) + except (socket.gaierror, socket.herror, OSError) as e: + logger.warning( + "Failed to resolve hostname, using as-is", + hostname=original_hostname, + error=str(e), + message="Guacamole will receive the original hostname", + ) + resolved_ip = original_hostname + + connection_config = { + "name": f"Auto-{original_hostname}-{int(time.time())}", + "protocol": connection_request.protocol, + "parameters": { + "hostname": resolved_ip, + "port": str(connection_request.port), + }, + "attributes": {}, + } + + if connection_request.protocol == "rdp": + connection_config["parameters"].update({ + "security": "any", + "ignore-cert": "true", + "enable-wallpaper": "false", + }) + if connection_request.username: + connection_config["parameters"][ + "username" + ] = connection_request.username + if connection_request.password: + connection_config["parameters"][ + "password" + ] = connection_request.password + + elif connection_request.protocol == "vnc": + if connection_request.password: + connection_config["parameters"][ + "password" + ] = connection_request.password + + elif connection_request.protocol == "ssh": + if connection_request.username: + connection_config["parameters"][ + "username" + ] = connection_request.username + if connection_request.password: + connection_config["parameters"][ + "password" + ] = connection_request.password + + if connection_request.enable_sftp is not None: + connection_config["parameters"]["enable-sftp"] = ( + "true" if connection_request.enable_sftp else "false" + ) + + if ( + connection_request.enable_sftp + and connection_request.sftp_root_directory + ): + connection_config["parameters"][ + "sftp-root-directory" + ] = connection_request.sftp_root_directory + + if ( + connection_request.enable_sftp + and connection_request.sftp_server_alive_interval + and connection_request.sftp_server_alive_interval > 0 + ): + connection_config["parameters"][ + "server-alive-interval" + ] = str(connection_request.sftp_server_alive_interval) + else: + connection_config["parameters"]["enable-sftp"] = "true" + + created_connection = self.authenticator.create_connection_with_token( + connection_config, guacamole_token + ) + + if not created_connection: + raise HTTPException( + status_code=500, detail="Failed to create connection in Guacamole" + ) + + connection_id = created_connection.get("identifier") + connection_url = generate_connection_url(connection_id, guacamole_token) + + logger.info( + "Connection created", + connection_id=connection_id, + token_type="guacamole", + ) + + return { + "connection_id": connection_id, + "connection_url": connection_url, + "status": "created", + "auth_token": guacamole_token, + } + + def delete_connection_with_user_token( + self, connection_id: str, auth_token: str + ) -> bool: + """Delete Guacamole connection using user token.""" + return self.authenticator.delete_connection_with_token( + connection_id, auth_token + ) + + def get_user_connections(self, auth_token: str) -> List[Dict[str, Any]]: + """Get user connections list.""" + return self.authenticator.get_user_connections(auth_token) + + def get_all_connections_with_system_token(self) -> List[Dict[str, Any]]: + """Get all connections using system token.""" + system_token = self.get_system_token() + return self.authenticator.get_user_connections(system_token) + + def delete_connection_with_system_token(self, connection_id: str) -> bool: + """Delete connection using system token.""" + system_token = self.get_system_token() + return self.authenticator.delete_connection_with_token( + connection_id, system_token + ) + +guacamole_client = GuacamoleClient() + +async def wait_for_guacamole( + timeout_seconds: int = 30, check_interval: float = 1.0 +) -> bool: + """Wait for Guacamole to become available. + + Args: + timeout_seconds: Maximum wait time in seconds + check_interval: Check interval in seconds + + Returns: + True if Guacamole is available, False on timeout + """ + start_time = time.time() + attempt = 0 + + logger.info( + "Waiting for Guacamole to become available...", + timeout_seconds=timeout_seconds, + guacamole_url=GUACAMOLE_URL, + ) + + while (time.time() - start_time) < timeout_seconds: + attempt += 1 + try: + response = await asyncio.to_thread( + requests.get, f"{GUACAMOLE_URL}/guacamole/", timeout=2 + ) + + if response.status_code in (200, 401, 403, 404): + elapsed = time.time() - start_time + logger.info( + "Guacamole is available", + attempt=attempt, + elapsed_seconds=round(elapsed, 2), + status_code=response.status_code, + ) + return True + + except requests.exceptions.RequestException as e: + logger.debug( + "Guacamole not ready yet", + attempt=attempt, + elapsed_seconds=round(time.time() - start_time, 2), + error=str(e)[:100], + ) + + await asyncio.sleep(check_interval) + + logger.warning( + "Guacamole did not become available within timeout", + timeout_seconds=timeout_seconds, + total_attempts=attempt, + ) + return False + +async def cleanup_orphaned_guacamole_connections(): + """Clean up orphaned Guacamole connections if Redis is empty + + Needed after FLUSHDB or first startup after crash when Guacamole + may have orphaned connections without Redis records. + + Returns: + Number of deleted connections + """ + try: + all_connections = redis_connection_storage.get_all_connections() + + if len(all_connections) > 0: + logger.info("Redis has active connections, skipping orphaned cleanup", + redis_connections_count=len(all_connections)) + return 0 + + logger.warning("Redis is empty, checking for orphaned Guacamole connections", + message="This usually happens after FLUSHDB or service restart") + + guac_connections = guacamole_client.get_all_connections_with_system_token() + + if not guac_connections or len(guac_connections) == 0: + logger.info("No Guacamole connections found, nothing to clean up") + return 0 + + logger.warning("Found orphaned Guacamole connections", + guacamole_connections_count=len(guac_connections), + message="Deleting all orphaned connections") + + deleted_count = 0 + for conn in guac_connections: + conn_id = conn.get('identifier') + if conn_id: + try: + if guacamole_client.delete_connection_with_system_token(conn_id): + deleted_count += 1 + logger.debug("Deleted orphaned connection", + connection_id=conn_id, + connection_name=conn.get('name', 'unknown')) + except Exception as e: + logger.error("Failed to delete orphaned connection", + connection_id=conn_id, + error=str(e)) + + if deleted_count > 0: + logger.info("Orphaned connections cleanup completed", + deleted_count=deleted_count, + total_found=len(guac_connections)) + + return deleted_count + + except Exception as e: + logger.error("Error during orphaned connections cleanup", error=str(e)) + return 0 + +async def cleanup_expired_connections_once(log_action: str = "expired"): + """Execute one iteration of expired connections cleanup + + Args: + log_action: Action for logging (expired, startup_cleanup, etc.) + + Returns: + Number of deleted connections + """ + try: + current_time = datetime.now(timezone.utc) + expired_connections = [] + + all_connections = redis_connection_storage.get_all_connections() + for conn_id, conn_data in all_connections.items(): + expires_at = datetime.fromisoformat(conn_data['expires_at']) + if expires_at.tzinfo is None: + expires_at = expires_at.replace(tzinfo=timezone.utc) + + if expires_at <= current_time: + expired_connections.append(conn_id) + + deleted_count = 0 + for conn_id in expired_connections: + conn_data = redis_connection_storage.get_connection(conn_id) + if conn_data: + if guacamole_client.delete_connection_with_system_token(conn_id): + deleted_count += 1 + log_connection_lifecycle( + connection_id=conn_id, + action=log_action, + client_ip="system", + hostname=conn_data.get('hostname', 'unknown'), + protocol=conn_data.get('protocol', 'unknown'), + details={ + "ttl_minutes": conn_data.get('ttl_minutes'), + "created_at": conn_data.get('created_at'), + "expires_at": conn_data.get('expires_at') + } + ) + + owner_username = conn_data.get('owner_username') + if owner_username: + try: + await websocket_manager.send_connection_expired( + username=owner_username, + connection_id=conn_id, + hostname=conn_data.get('hostname', 'unknown'), + protocol=conn_data.get('protocol', 'unknown') + ) + except Exception as ws_error: + logger.warning("Failed to send WebSocket notification", + connection_id=conn_id, + error=str(ws_error)) + + redis_connection_storage.delete_connection(conn_id) + + if deleted_count > 0: + logger.info("Cleanup completed", + action=log_action, + expired_count=len(expired_connections), + deleted_count=deleted_count) + + return deleted_count + + except Exception as e: + logger.error("Error during cleanup", error=str(e)) + return 0 + +async def cleanup_expired_connections(): + """Background task to remove expired connections""" + while True: + try: + await cleanup_expired_connections_once("expired") + except Exception as e: + logger.error("Error during cleanup task", error=str(e)) + + await asyncio.sleep(60) + +async def check_expiring_connections(): + """Background task to check connections expiring soon (warns 5 min before)""" + warned_connections = set() + + while True: + try: + current_time = datetime.now(timezone.utc) + warning_threshold = current_time + timedelta(minutes=5) + + all_connections = redis_connection_storage.get_all_connections() + + for conn_id, conn_data in all_connections.items(): + expires_at = datetime.fromisoformat(conn_data['expires_at']) + if expires_at.tzinfo is None: + expires_at = expires_at.replace(tzinfo=timezone.utc) + + if current_time < expires_at <= warning_threshold and conn_id not in warned_connections: + owner_username = conn_data.get('owner_username') + if owner_username: + minutes_remaining = max(1, int((expires_at - current_time).total_seconds() / 60)) + + try: + await websocket_manager.send_connection_will_expire( + username=owner_username, + connection_id=conn_id, + hostname=conn_data.get('hostname', 'unknown'), + protocol=conn_data.get('protocol', 'unknown'), + minutes_remaining=minutes_remaining + ) + warned_connections.add(conn_id) + + logger.info("Connection expiration warning sent", + connection_id=conn_id, + username=owner_username, + minutes_remaining=minutes_remaining) + except Exception as ws_error: + logger.warning("Failed to send expiration warning", + connection_id=conn_id, + error=str(ws_error)) + + elif expires_at <= current_time and conn_id in warned_connections: + warned_connections.discard(conn_id) + + current_conn_ids = set(all_connections.keys()) + warned_connections &= current_conn_ids + + except Exception as e: + logger.error("Error during expiring connections check", error=str(e)) + + await asyncio.sleep(30) + + +async def cleanup_ssrf_cache(): + """Background task to clean up SSRF cache""" + while True: + try: + ssrf_protection.cleanup_expired_cache() + except Exception as e: + logger.error("Error during SSRF cache cleanup", error=str(e)) + + await asyncio.sleep(180) + +async def cleanup_csrf_tokens(): + """Background task to clean up expired CSRF tokens""" + while True: + try: + csrf_protection.cleanup_expired_tokens() + except Exception as e: + logger.error("Error during CSRF token cleanup", error=str(e)) + + await asyncio.sleep(600) + +def schedule_connection_deletion( + connection_id: str, + ttl_minutes: int, + auth_token: str, + guacamole_username: str, + hostname: str = "unknown", + protocol: str = "unknown", + owner_username: str = "unknown", + owner_role: str = "unknown" +): + """Schedule connection deletion via TTL using Redis""" + + expires_at = datetime.now(timezone.utc) + timedelta(minutes=ttl_minutes) + created_at = datetime.now(timezone.utc) + + + connection_data = { + 'connection_id': connection_id, + 'created_at': created_at.isoformat(), + 'expires_at': expires_at.isoformat(), + 'ttl_minutes': ttl_minutes, + 'auth_token': auth_token, + 'guacamole_username': guacamole_username, + 'hostname': hostname, + 'protocol': protocol, + 'owner_username': owner_username, + 'owner_role': owner_role + } + + redis_connection_storage.add_connection( + connection_id, + connection_data, + ttl_seconds=None + ) + +@app.on_event("startup") +async def startup_event(): + """Application startup initialization""" + startup_info = { + "guacamole_url": GUACAMOLE_URL, + "guacamole_public_url": GUACAMOLE_PUBLIC_URL, + "rate_limiting_enabled": RATE_LIMIT_ENABLED, + "rate_limit_config": f"{RATE_LIMIT_REQUESTS} requests per {RATE_LIMIT_WINDOW} seconds" if RATE_LIMIT_ENABLED else None, + "log_level": LOG_LEVEL, + "log_format": LOG_FORMAT, + "python_version": sys.version.split()[0], + "platform": platform.system() + } + + print("Starting Remote Access API...") + print(f"Guacamole URL (internal): {GUACAMOLE_URL}") + print(f"Guacamole Public URL (client): {GUACAMOLE_PUBLIC_URL}") + print(f"Rate Limiting: {'Enabled' if RATE_LIMIT_ENABLED else 'Disabled'}") + if RATE_LIMIT_ENABLED: + print(f"Rate Limit: {RATE_LIMIT_REQUESTS} requests per {RATE_LIMIT_WINDOW} seconds") + print(f"Log Level: {LOG_LEVEL}, Format: {LOG_FORMAT}") + + logger.info("Application startup", **startup_info) + + log_audit_event( + action="application_started", + resource="system", + client_ip="system", + details=startup_info + ) + + # Cleanup expired connections on startup + guacamole_ready = await wait_for_guacamole(timeout_seconds=30, check_interval=1.0) + + if not guacamole_ready: + logger.warning("Guacamole not available, skipping startup cleanup", + message="Cleanup will be performed by background task when Guacamole becomes available") + else: + logger.info("Checking for orphaned Guacamole connections...") + orphaned_count = await cleanup_orphaned_guacamole_connections() + + if orphaned_count > 0: + logger.warning( + "Orphaned cleanup completed", + deleted_connections=orphaned_count, + message="Removed orphaned Guacamole connections (no Redis records)" + ) + + logger.info("Starting cleanup of expired connections from previous runs...") + deleted_count = await cleanup_expired_connections_once("startup_cleanup") + + if deleted_count > 0: + logger.info( + "Startup cleanup completed", + deleted_connections=deleted_count, + message="Removed expired connections from previous application runs" + ) + else: + logger.info( + "Startup cleanup completed", + deleted_connections=0, + message="No expired connections found" + ) + + asyncio.create_task(cleanup_expired_connections()) + asyncio.create_task(check_expiring_connections()) + asyncio.create_task(cleanup_ssrf_cache()) + asyncio.create_task(cleanup_csrf_tokens()) + logger.info( + "Background tasks started", + ttl_cleanup=True, + expiring_connections_check=True, + rate_limit_cleanup=RATE_LIMIT_ENABLED, + ssrf_cache_cleanup=True, + csrf_token_cleanup=True + ) + +@app.get("/", tags=["System"]) +async def root(): + return {"message": "Remote Access API is running"} + +@app.get("/api/health", tags=["System"]) +async def health_check(): + """Health check with component status""" + start_time = time.time() + + try: + response = await asyncio.to_thread( + requests.get, + f"{GUACAMOLE_URL}/guacamole", + timeout=5 + ) + guacamole_web = { + "status": "ok" if response.status_code == 200 else "error", + "response_time_ms": round(response.elapsed.total_seconds() * 1000, 2), + "status_code": response.status_code + } + except Exception as e: + guacamole_web = {"status": "error", "error": str(e)} + + database = SystemService.check_database_connection(guacamole_client, GUACAMOLE_URL) + guacd = SystemService.check_guacd_daemon() + system = SystemService.check_system_resources() + system_info = SystemService(service_start_time).get_system_info() + + components = [guacamole_web, database, guacd, system] + overall_status = "ok" + + for component in components: + if component.get("status") == "error": + overall_status = "error" + break + elif component.get("status") == "critical": + overall_status = "critical" + elif component.get("status") == "warning" and overall_status == "ok": + overall_status = "warning" + + check_duration = round((time.time() - start_time) * 1000, 2) + + return { + "overall_status": overall_status, + "timestamp": datetime.now().isoformat(), + "check_duration_ms": check_duration, + "system_info": system_info, + "components": { + "guacamole_web": guacamole_web, + "database": database, + "guacd_daemon": guacd, + "system_resources": system + }, + "statistics": { + "active_connections": len(redis_connection_storage.get_all_connections()), + "rate_limiting": { + "enabled": RATE_LIMIT_ENABLED, + "limit": RATE_LIMIT_REQUESTS, + "window_seconds": RATE_LIMIT_WINDOW, + "active_clients": redis_rate_limiter.get_stats().get("active_rate_limits", 0) if RATE_LIMIT_ENABLED else 0 + } + } + } + +@app.websocket("/ws/notifications") +async def websocket_notifications(websocket: WebSocket): + """ + WebSocket endpoint for real-time notifications + + Events: + - connection_expired: Connection expired + - connection_deleted: Connection deleted + - connection_will_expire: Connection will expire soon (5 min warning) + - jwt_will_expire: JWT will expire soon (5 min warning) + - jwt_expired: JWT expired + - connection_extended: Connection extended + + Connection protocol: + 1. Client sends JWT token on connection + 2. Server validates token + 3. Server sends confirmation + 4. Server starts sending notifications + """ + username = None + + try: + await websocket.accept() + logger.info("WebSocket connection accepted, waiting for auth") + + auth_message = await asyncio.wait_for( + websocket.receive_json(), + timeout=5.0 + ) + + if auth_message.get("type") != "auth" or not auth_message.get("token"): + await websocket.close(code=4001, reason="Authentication required") + logger.warning("WebSocket connection rejected: no auth") + return + + token = auth_message["token"] + payload = verify_jwt_token(token) + + if not payload: + await websocket.close(code=4001, reason="Invalid token") + logger.warning("WebSocket connection rejected: invalid token") + return + + username = payload.get("username") + if not username: + await websocket.close(code=4001, reason="Invalid token payload") + return + + await websocket_manager.connect(websocket, username) + + await websocket.send_json({ + "type": "connected", + "timestamp": datetime.now(timezone.utc).isoformat(), + "data": { + "username": username, + "message": "Successfully connected to notifications stream" + } + }) + + logger.info("WebSocket client authenticated and connected", + username=username) + + while True: + try: + message = await asyncio.wait_for( + websocket.receive_json(), + timeout=30.0 + ) + + if message.get("type") == "ping": + await websocket.send_json({ + "type": "pong", + "timestamp": datetime.now(timezone.utc).isoformat() + }) + + except asyncio.TimeoutError: + # Timeout - send ping from server + try: + await websocket.send_json({ + "type": "ping", + "timestamp": datetime.now(timezone.utc).isoformat() + }) + except (WebSocketDisconnect, ConnectionError, RuntimeError): + break + except WebSocketDisconnect: + break + except Exception as e: + logger.error("Error in WebSocket loop", + username=username, + error=str(e)) + break + + except asyncio.TimeoutError: + try: + await websocket.close(code=4408, reason="Authentication timeout") + except (WebSocketDisconnect, ConnectionError, RuntimeError): + pass # WebSocket may not be accepted yet + logger.warning("WebSocket connection timeout during auth") + + except WebSocketDisconnect: + logger.info("WebSocket client disconnected", + username=username) + + except Exception as e: + logger.error("WebSocket error", + username=username, + error=str(e)) + try: + await websocket.close(code=1011, reason="Internal error") + except (WebSocketDisconnect, ConnectionError, RuntimeError): + pass + + finally: + if username: + await websocket_manager.disconnect(websocket, username) + +@app.post( + "/api/auth/login", + tags=["Authentication"], + response_model=LoginResponse, + summary="Authenticate user", + description="Login with username and password to receive JWT token" +) +async def login(login_request: LoginRequest, request: Request): + client_ip = request.client.host if request.client else "unknown" + user_agent = request.headers.get("user-agent", "unknown") + + try: + # Check brute-force protection + allowed, reason, protection_details = brute_force_protection.check_login_allowed( + client_ip, login_request.username + ) + + if not allowed: + # Log security event + immutable_audit_logger.log_security_event( + event_type="login_blocked", + client_ip=client_ip, + user_agent=user_agent, + details={ + "username": login_request.username, + "reason": reason, + "protection_details": protection_details + }, + severity="high", + username=login_request.username + ) + + raise HTTPException( + status_code=429, + detail=f"Login blocked: {reason}" + ) + + user_info = guacamole_authenticator.authenticate_user( + login_request.username, + login_request.password + ) + + if not user_info: + brute_force_protection.record_failed_login( + client_ip, login_request.username, "invalid_credentials" + ) + + immutable_audit_logger.log_security_event( + event_type="login_failed", + client_ip=client_ip, + user_agent=user_agent, + details={"username": login_request.username}, + severity="medium", + username=login_request.username + ) + + record_error_metric("authentication_failures") + + raise HTTPException( + status_code=401, + detail="Invalid username or password" + ) + + brute_force_protection.record_successful_login(client_ip, login_request.username) + jwt_token = guacamole_authenticator.create_jwt_for_user(user_info) + + immutable_audit_logger.log_audit_event( + action="user_login", + resource=f"user/{login_request.username}", + client_ip=client_ip, + user_agent=user_agent, + result="success", + details={ + "role": user_info["role"], + "permissions_count": len(user_info.get("permissions", [])) + }, + username=login_request.username + ) + + expires_in = int(os.getenv("JWT_ACCESS_TOKEN_EXPIRE_MINUTES", "60")) * 60 + + return LoginResponse( + access_token=jwt_token, + token_type="bearer", + expires_in=expires_in, + user_info={ + "username": user_info["username"], + "role": user_info["role"], + "permissions": user_info.get("permissions", []), + "full_name": user_info.get("full_name"), + "email": user_info.get("email"), + "organization": user_info.get("organization"), + "organizational_role": user_info.get("organizational_role") + } + ) + + except HTTPException: + raise + except Exception as e: + logger.error("Unexpected error during login", + username=login_request.username, + client_ip=client_ip, + error=str(e)) + + record_error_metric("authentication_failures") + + raise HTTPException( + status_code=500, + detail="Internal server error during authentication" + ) + +@app.get( + "/api/auth/profile", + tags=["Authentication"], + summary="Get user profile", + description="Retrieve current user profile information" +) +async def get_user_profile(request: Request, credentials: HTTPAuthorizationCredentials = Depends(security)): + """Get current user information""" + user_info = get_current_user(request) + + return { + "username": user_info["username"], + "role": user_info["role"], + "permissions": user_info.get("permissions", []), + "full_name": user_info.get("full_name"), + "email": user_info.get("email"), + "organization": user_info.get("organization"), + "organizational_role": user_info.get("organizational_role") + } + +@app.get( + "/api/auth/permissions", + tags=["Authentication"], + summary="Get user permissions", + description="List all permissions for current user role" +) +async def get_user_permissions(request: Request, credentials: HTTPAuthorizationCredentials = Depends(security)): + user_info = get_current_user(request) + user_role = UserRole(user_info["role"]) + + permissions = PermissionChecker.get_user_permissions_list(user_role) + + return { + "username": user_info["username"], + "role": user_info["role"], + "permissions": permissions, + "system_permissions": user_info.get("permissions", []) + } + +@app.post( + "/api/auth/logout", + tags=["Authentication"], + summary="Logout user", + description="Revoke current JWT token and end session" +) +async def logout(request: Request, credentials: HTTPAuthorizationCredentials = Depends(security)): + user_info = get_current_user(request) + client_ip = request.client.host if request.client else "unknown" + user_agent = request.headers.get("user-agent", "unknown") + + auth_header = request.headers.get("Authorization") + if auth_header and auth_header.startswith("Bearer "): + token = auth_header.split(" ", 1)[1] + token_blacklist.revoke_token(token, "logout", user_info["username"]) + + immutable_audit_logger.log_audit_event( + action="user_logout", + resource=f"user/{user_info['username']}", + client_ip=client_ip, + user_agent=user_agent, + result="success", + username=user_info["username"] + ) + + return { + "message": "Successfully logged out", + "note": "JWT token has been revoked and added to blacklist" + } + +@app.get( + "/api/auth/limits", + tags=["Authentication"], + summary="Get user limits", + description="Retrieve role-based limits and allowed networks" +) +async def get_user_limits(request: Request, credentials: HTTPAuthorizationCredentials = Depends(security)): + """Get limits and restrictions for current user""" + user_info = get_current_user(request) + + if not user_info: + raise HTTPException(status_code=401, detail="Not authenticated") + + user_role = UserRole(user_info["role"]) + role_limits = SecurityConfig.get_role_limits(user_role) + + return { + "username": user_info["username"], + "role": user_role.value, + "limits": role_limits, + "security_info": { + "blocked_hosts": list(SecurityConfig.BLOCKED_HOSTS), + "blocked_networks": SecurityConfig.BLOCKED_NETWORKS + } + } + +@app.post( + "/api/auth/revoke", + tags=["Authentication"], + summary="Revoke token", + description="Revoke JWT token and invalidate session" +) +async def revoke_token(request: Request, credentials: HTTPAuthorizationCredentials = Depends(security)): + """Revoke current JWT token""" + client_ip = request.client.host if request.client else "unknown" + user_agent = request.headers.get("user-agent", "unknown") + + user_info = get_current_user(request) + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + raise HTTPException(status_code=400, detail="No valid token provided") + + token = auth_header.split(" ", 1)[1] + + success = token_blacklist.revoke_token(token, "logout", user_info["username"]) + + if success: + immutable_audit_logger.log_audit_event( + action="token_revoked", + resource=f"token/{token[:20]}...", + client_ip=client_ip, + user_agent=user_agent, + result="success", + username=user_info["username"] + ) + + return {"message": "Token revoked successfully"} + else: + raise HTTPException(status_code=500, detail="Failed to revoke token") + +class MachineAvailabilityRequest(BaseModel): + """Machine availability check request""" + hostname: str + port: Optional[int] = None + +class MachineAvailabilityResponse(BaseModel): + """Machine availability check response""" + available: bool + hostname: str + port: int + response_time_ms: Optional[float] = None + checked_at: str + +@app.post( + "/api/machines/check-availability", + tags=["Machines"], + response_model=MachineAvailabilityResponse, + summary="Check machine availability", + description="Test if machine is reachable via TCP connection" +) +async def check_machine_availability( + availability_request: MachineAvailabilityRequest, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + """Check machine availability (quick ping) + + Args: + hostname: DNS hostname of machine + port: Port to check (optional, default 3389 for RDP) + + Returns: + Availability check result with response time + """ + client_ip = request.client.host if request.client else "unknown" + user_info = get_current_user(request) + username = user_info.get("username", "unknown") + + hostname = availability_request.hostname + port = availability_request.port if availability_request.port else 3389 + + logger.debug("Machine availability check requested", + hostname=hostname, + port=port, + username=username, + client_ip=client_ip) + + start_time = time.time() + + try: + with socket.create_connection((hostname, port), timeout=2): + response_time_ms = (time.time() - start_time) * 1000 + available = True + + logger.info("Machine is available", + hostname=hostname, + port=port, + response_time_ms=round(response_time_ms, 2), + username=username) + except (socket.timeout, socket.error, ConnectionRefusedError, OSError) as e: + response_time_ms = (time.time() - start_time) * 1000 + available = False + + logger.info("Machine is not available", + hostname=hostname, + port=port, + response_time_ms=round(response_time_ms, 2), + error=str(e), + username=username) + + return MachineAvailabilityResponse( + available=available, + hostname=hostname, + port=port, + response_time_ms=round(response_time_ms, 2) if response_time_ms else None, + checked_at=datetime.now().isoformat() + ) + +@app.post( + "/api/connections", + tags=["Connections"], + response_model=ConnectionResponse, + summary="Create connection", + description="Create new remote desktop connection (RDP/VNC/SSH)" +) +async def create_remote_connection( + connection_request: ConnectionRequest, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + start_time = time.time() + client_ip = request.client.host if request.client else "unknown" + user_agent = request.headers.get("user-agent", "unknown") + + user_info = get_current_user(request) + guacamole_token = get_current_user_token(request) + + if not user_info or not guacamole_token: + logger.error("Missing user info or token from middleware", + has_user_info=bool(user_info), + has_token=bool(guacamole_token)) + raise HTTPException(status_code=401, detail="Authentication required") + + username = user_info["username"] + user_role = UserRole(user_info["role"]) + + logger.info("Creating connection for authenticated user", + username=username, + role=user_role.value, + guac_token_length=len(guacamole_token)) + + if connection_request.password: + connection_request.password = decrypt_password_from_request( + connection_request.password, + request, + context={ + "username": username, + "hostname": connection_request.hostname, + "protocol": connection_request.protocol + } + ) + + role_limits = SecurityConfig.get_role_limits(user_role) + + if not role_limits["can_create_connections"]: + raise HTTPException( + status_code=403, + detail=f"Role {user_role.value} cannot create connections" + ) + + ttl_valid, ttl_reason = SecurityConfig.validate_ttl(connection_request.ttl_minutes) + if not ttl_valid: + raise HTTPException(status_code=400, detail=f"Invalid TTL: {ttl_reason}") + + if connection_request.ttl_minutes > role_limits["max_ttl_minutes"]: + raise HTTPException( + status_code=403, + detail=f"TTL {connection_request.ttl_minutes} exceeds role limit {role_limits['max_ttl_minutes']} minutes" + ) + + host_allowed, host_reason = SecurityConfig.is_host_allowed(connection_request.hostname, user_role) + if not host_allowed: + logger.warning("Host access denied", + username=username, + role=user_role.value, + hostname=connection_request.hostname, + reason=host_reason, + client_ip=client_ip) + + log_security_event( + event_type="forbidden_host_access_attempt", + client_ip=client_ip, + user_agent=user_agent, + details={ + "username": username, + "role": user_role.value, + "hostname": connection_request.hostname, + "reason": host_reason + }, + severity="high" + ) + + raise HTTPException( + status_code=403, + detail=f"Access to host denied: {host_reason}" + ) + + log_audit_event( + action="connection_creation_started", + resource=f"{connection_request.protocol}://{connection_request.hostname}", + client_ip=client_ip, + user_agent=user_agent, + details={ + "protocol": connection_request.protocol, + "hostname": connection_request.hostname, + "port": connection_request.port, + "ttl_minutes": connection_request.ttl_minutes, + "username": username, + "role": user_role.value + } + ) + + if not connection_request.port: + port_map = {"rdp": 3389, "vnc": 5900, "ssh": 22} + check_port = port_map.get(connection_request.protocol, 3389) + else: + check_port = connection_request.port + + logger.debug("Starting host connectivity check", + target_host=connection_request.hostname, + port=check_port, + client_ip=client_ip) + + # Check host availability before creating connection + try: + with socket.create_connection((connection_request.hostname, check_port), timeout=3): + pass # Connection successful + except (socket.timeout, socket.error, ConnectionRefusedError, OSError) as e: + log_security_event( + event_type="unreachable_host_attempt", + client_ip=client_ip, + user_agent=user_agent, + details={ + "hostname": connection_request.hostname, + "port": check_port, + "protocol": connection_request.protocol, + "error": str(e) + }, + severity="low" + ) + + raise HTTPException( + status_code=400, + detail=f"Host {connection_request.hostname}:{check_port} is not accessible" + ) + + logger.debug("Host connectivity check passed", + target_host=connection_request.hostname, + port=check_port, + client_ip=client_ip) + + try: + + result = guacamole_client.create_connection_with_user_token(connection_request, guacamole_token) + connection_id = result.get("connection_id") + + expires_at = datetime.now(timezone.utc) + timedelta(minutes=connection_request.ttl_minutes) + schedule_connection_deletion( + connection_id=connection_id, + ttl_minutes=connection_request.ttl_minutes, + auth_token=result['auth_token'], + guacamole_username=user_info["username"], + hostname=connection_request.hostname, + protocol=connection_request.protocol, + owner_username=user_info["username"], + owner_role=user_info["role"] + ) + + result['expires_at'] = expires_at.isoformat() + result['ttl_minutes'] = connection_request.ttl_minutes + + public_result = { + "connection_id": result["connection_id"], + "connection_url": result["connection_url"], + "status": result["status"], + "expires_at": result["expires_at"], + "ttl_minutes": result["ttl_minutes"] + } + + duration_ms = (time.time() - start_time) * 1000 + + record_connection_metric( + protocol=connection_request.protocol, + client_ip=client_ip, + creation_time_ms=duration_ms, + success=True + ) + + log_connection_lifecycle( + connection_id=connection_id, + action="created", + client_ip=client_ip, + hostname=connection_request.hostname, + protocol=connection_request.protocol, + details={ + "ttl_minutes": connection_request.ttl_minutes, + "expires_at": expires_at.isoformat(), + "creation_duration_ms": round(duration_ms, 2), + "user_agent": user_agent, + "username": user_info["username"], + "role": user_info["role"] + } + ) + + log_audit_event( + action="connection_created", + resource=f"{connection_request.protocol}://{connection_request.hostname}", + client_ip=client_ip, + user_agent=user_agent, + result="success", + details={ + "connection_id": connection_id, + "duration_ms": round(duration_ms, 2), + "username": user_info["username"], + "role": user_info["role"] + } + ) + + return ConnectionResponse(**public_result) + except Exception as e: + duration_ms = (time.time() - start_time) * 1000 + + record_connection_metric( + protocol=connection_request.protocol, + client_ip=client_ip, + creation_time_ms=duration_ms, + success=False + ) + + log_connection_lifecycle( + connection_id="failed", + action="failed", + client_ip=client_ip, + hostname=connection_request.hostname, + protocol=connection_request.protocol, + details={ + "error": str(e), + "duration_ms": round(duration_ms, 2), + "user_agent": user_agent, + "username": user_info["username"], + "role": user_info["role"] + } + ) + + log_audit_event( + action="connection_creation_failed", + resource=f"{connection_request.protocol}://{connection_request.hostname}", + client_ip=client_ip, + user_agent=user_agent, + result="failure", + details={ + "error": str(e), + "duration_ms": round(duration_ms, 2), + "username": user_info["username"], + "role": user_info["role"] + } + ) + raise HTTPException(status_code=500, detail=str(e)) + +@app.get( + "/api/connections", + tags=["Connections"], + summary="List connections", + description="Retrieve active connections based on user role" +) +async def list_connections(request: Request, credentials: HTTPAuthorizationCredentials = Depends(security)): + """ + Returns active connections with connection_url for session restoration. + Used to restore connections after user re-login. + + Note: URLs are generated with current user token, not old token from Redis. + This allows restoring connections after logout/login without 403 errors. + """ + user_info = get_current_user(request) + user_role = UserRole(user_info["role"]) + username = user_info["username"] + + current_guac_token = get_current_user_token(request) + + if not current_guac_token: + logger.error("No Guacamole token available for user", + username=username) + raise HTTPException( + status_code=401, + detail="Authentication token not available" + ) + client_ip = request.client.host if request.client else "unknown" + rate_limit_key = f"get_connections:{username}:{client_ip}" + + allowed, rate_limit_headers = redis_rate_limiter.check_rate_limit( + rate_limit_key, + requests_limit=60, + window_seconds=60 + ) + + if not allowed: + logger.warning("Rate limit exceeded for get connections", + username=username, + client_ip=client_ip, + rate_limit_headers=rate_limit_headers) + raise HTTPException( + status_code=429, + detail="Too many requests. Please try again later." + ) + current_time = datetime.now(timezone.utc) + connections_with_ttl = [] + all_connections = redis_connection_storage.get_all_connections() + + for conn_id, conn_data in all_connections.items(): + if not PermissionChecker.can_view_all_connections(user_role): + if conn_data.get('owner_username') != username: + continue + + expires_at = datetime.fromisoformat(conn_data['expires_at']) + if expires_at.tzinfo is None: + expires_at = expires_at.replace(tzinfo=timezone.utc) + + remaining_minutes = max(0, int((expires_at - current_time).total_seconds() / 60)) + + connection_url = None + + if remaining_minutes > 0: + try: + connection_url = generate_connection_url(conn_id, current_guac_token) + logger.debug("Connection URL generated with current user token", + connection_id=conn_id, + username=username, + remaining_minutes=remaining_minutes) + except Exception as e: + logger.error("Failed to generate connection URL", + connection_id=conn_id, + error=str(e)) + + connections_with_ttl.append({ + "connection_id": conn_id, + "hostname": conn_data.get('hostname', 'unknown'), + "protocol": conn_data.get('protocol', 'unknown'), + "owner_username": conn_data.get('owner_username', 'unknown'), + "owner_role": conn_data.get('owner_role', 'unknown'), + "created_at": conn_data['created_at'], + "expires_at": conn_data['expires_at'], + "ttl_minutes": conn_data['ttl_minutes'], + "remaining_minutes": remaining_minutes, + "status": "active" if remaining_minutes > 0 else "expired", + "connection_url": connection_url + }) + + logger.info( + "User retrieved connections list with refreshed tokens", + username=username, + total_connections=len(connections_with_ttl), + active_connections=len([c for c in connections_with_ttl if c['status'] == 'active']), + using_current_token=True + ) + + return { + "total_connections": len(connections_with_ttl), + "active_connections": len([c for c in connections_with_ttl if c['status'] == 'active']), + "connections": connections_with_ttl + } + +import asyncio +import inspect +from typing import Any + +async def _maybe_call(func: Any, *args, **kwargs): + if inspect.iscoroutinefunction(func): + return await func(*args, **kwargs) + return await asyncio.to_thread(func, *args, **kwargs) + + +@app.delete("/api/connections/{connection_id}", tags=["Connections"]) +async def delete_connection( + connection_id: str, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security), +): + """Force delete connection before TTL expiration""" + client_ip = request.client.host if request.client else "unknown" + user_agent = request.headers.get("user-agent", "unknown") + + user_info = await _maybe_call(get_current_user, request) + user_role = UserRole(user_info["role"]) + current_user_token = await _maybe_call(get_current_user_token, request) + + if not current_user_token: + logger.error( + "No Guacamole token available for user", + username=user_info.get("username"), + connection_id=connection_id, + ) + raise HTTPException(status_code=401, detail="Authentication token not available") + + conn_data = await _maybe_call(redis_connection_storage.get_connection, connection_id) + + if not conn_data: + log_security_event( + event_type="delete_nonexistent_connection", + client_ip=client_ip, + user_agent=user_agent, + details={"connection_id": connection_id, "username": user_info.get("username")}, + severity="low", + ) + raise HTTPException(status_code=404, detail="Connection not found") + + # Permission check + allowed, reason = PermissionChecker.check_connection_ownership( + user_role, user_info.get("username"), conn_data.get("owner_username", "") + ) + + if not allowed: + log_security_event( + event_type="unauthorized_connection_deletion", + client_ip=client_ip, + user_agent=user_agent, + details={ + "connection_id": connection_id, + "username": user_info.get("username"), + "owner": conn_data.get("owner_username", ""), + "reason": reason, + }, + severity="medium", + ) + raise HTTPException(status_code=403, detail=reason) + + try: + + deletion_success = await _maybe_call( + guacamole_client.delete_connection_with_user_token, connection_id, current_user_token + ) + + if not deletion_success: + logger.warning( + "Failed to delete connection from Guacamole", + connection_id=connection_id, + username=user_info.get("username"), + ) + log_audit_event( + action="connection_deletion_failed", + resource=f"connection/{connection_id}", + client_ip=client_ip, + user_agent=user_agent, + result="failure", + details={"error": "Failed to delete from Guacamole"}, + ) + raise HTTPException(status_code=500, detail="Failed to delete connection from Guacamole") + + await _maybe_call(redis_connection_storage.delete_connection, connection_id) + + log_connection_lifecycle( + connection_id=connection_id, + action="deleted", + client_ip=client_ip, + hostname=conn_data.get("hostname", "unknown"), + protocol=conn_data.get("protocol", "unknown"), + details={ + "user_agent": user_agent, + "remaining_ttl_minutes": conn_data.get("ttl_minutes", 0), + "deleted_manually": True, + "deleted_by": user_info.get("username"), + "deleter_role": user_info.get("role"), + }, + ) + + log_audit_event( + action="connection_deleted", + resource=f"connection/{connection_id}", + client_ip=client_ip, + user_agent=user_agent, + result="success", + details={ + "deleted_by": user_info.get("username"), + "deleter_role": user_info.get("role"), + "owner": conn_data.get("owner_username", ""), + }, + ) + + return {"status": "deleted", "connection_id": connection_id} + + except HTTPException: + raise + + except Exception as e: + logger.error( + "Exception during connection deletion", + connection_id=connection_id, + username=user_info.get("username"), + error=str(e), + error_type=type(e).__name__, + ) + + log_audit_event( + action="connection_deletion_error", + resource=f"connection/{connection_id}", + client_ip=client_ip, + user_agent=user_agent, + result="error", + details={"error": str(e), "error_type": type(e).__name__}, + ) + + raise HTTPException( + status_code=500, detail=f"Internal error during connection deletion: {str(e)}" + ) + + + +@app.post("/api/connections/{connection_id}/extend", tags=["Connections"]) +async def extend_connection_ttl( + connection_id: str, + request: Request, + additional_minutes: int = 30, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + """Extend active connection TTL + + Args: + connection_id: Connection ID + additional_minutes: Minutes to add to TTL (default 30) + + Returns: + Updated connection information + """ + client_ip = request.client.host if request.client else "unknown" + user_info = get_current_user(request) + user_role = UserRole(user_info["role"]) + + if additional_minutes < 1 or additional_minutes > 120: + raise HTTPException( + status_code=400, + detail="Additional minutes must be between 1 and 120" + ) + + conn_data = redis_connection_storage.get_connection(connection_id) + + if not conn_data: + raise HTTPException(status_code=404, detail="Connection not found") + + allowed, reason = PermissionChecker.check_connection_ownership( + user_role, user_info["username"], conn_data.get('owner_username', '') + ) + + if not allowed: + log_security_event( + event_type="unauthorized_connection_extension", + client_ip=client_ip, + user_agent=request.headers.get("user-agent", "unknown"), + details={ + "connection_id": connection_id, + "username": user_info["username"], + "owner": conn_data.get('owner_username', ''), + "reason": reason + }, + severity="medium" + ) + raise HTTPException(status_code=403, detail=reason) + + current_expires_at = datetime.fromisoformat(conn_data['expires_at']) + if current_expires_at.tzinfo is None: + current_expires_at = current_expires_at.replace(tzinfo=timezone.utc) + + current_time = datetime.now(timezone.utc) + if current_expires_at <= current_time: + raise HTTPException( + status_code=400, + detail="Connection has already expired and cannot be extended" + ) + + new_expires_at = current_expires_at + timedelta(minutes=additional_minutes) + new_ttl_minutes = int(conn_data.get('ttl_minutes', 60)) + additional_minutes + + conn_data['expires_at'] = new_expires_at.isoformat() + conn_data['ttl_minutes'] = new_ttl_minutes + + redis_connection_storage.add_connection( + connection_id, + conn_data, + ttl_seconds=None + ) + + log_connection_lifecycle( + connection_id=connection_id, + action="extended", + client_ip=client_ip, + hostname=conn_data.get('hostname', 'unknown'), + protocol=conn_data.get('protocol', 'unknown'), + details={ + "extended_by": user_info["username"], + "additional_minutes": additional_minutes, + "new_ttl_minutes": new_ttl_minutes, + "new_expires_at": new_expires_at.isoformat() + } + ) + + owner_username = conn_data.get('owner_username') + if owner_username: + try: + await websocket_manager.send_connection_extended( + username=owner_username, + connection_id=connection_id, + hostname=conn_data.get('hostname', 'unknown'), + new_expires_at=new_expires_at, + additional_minutes=additional_minutes + ) + except Exception as ws_error: + logger.warning("Failed to send WebSocket notification", + connection_id=connection_id, + error=str(ws_error)) + + logger.info("Connection TTL extended", + connection_id=connection_id, + username=user_info["username"], + additional_minutes=additional_minutes, + new_expires_at=new_expires_at.isoformat()) + + return { + "status": "extended", + "connection_id": connection_id, + "additional_minutes": additional_minutes, + "new_ttl_minutes": new_ttl_minutes, + "new_expires_at": new_expires_at.isoformat(), + "remaining_minutes": int((new_expires_at - current_time).total_seconds() / 60) + } + +@app.get( + "/api/machines/saved", + tags=["Machines"], + response_model=SavedMachineList, + summary="List saved machines", + description="Retrieve all saved machines for current user" +) +async def get_saved_machines( + request: Request, + include_stats: bool = False, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + """Get user saved machines list + + Args: + include_stats: Include connection statistics (optional) + """ + try: + user_info = get_current_user(request) + user_id = user_info["username"] + + machines = saved_machines_db.get_user_machines(user_id, include_stats=include_stats) + + machines_response = [] + for machine in machines: + machine_dict = { + "id": str(machine['id']), + "user_id": machine['user_id'], + "name": machine['name'], + "hostname": machine['hostname'], + "port": machine['port'], + "protocol": machine['protocol'], + "os": machine.get('os'), + "username": machine.get('username'), + "description": machine.get('description'), + "tags": machine.get('tags') or [], + "is_favorite": machine.get('is_favorite', False), + "created_at": machine['created_at'].isoformat(), + "updated_at": machine['updated_at'].isoformat(), + "last_connected_at": machine['last_connected_at'].isoformat() if machine.get('last_connected_at') else None, + } + + if include_stats and 'connection_stats' in machine: + machine_dict['connection_stats'] = machine['connection_stats'] + + machines_response.append(SavedMachineResponse(**machine_dict)) + + logger.info( + "Retrieved saved machines", + user_id=user_id, + count=len(machines_response) + ) + + return SavedMachineList( + total=len(machines_response), + machines=machines_response + ) + + except Exception as e: + logger.error("Failed to get saved machines", error=str(e)) + raise HTTPException(status_code=500, detail=f"Failed to retrieve saved machines: {str(e)}") + + +@app.post( + "/api/machines/saved", + tags=["Machines"], + response_model=SavedMachineResponse, + summary="Save machine", + description="Create new saved machine entry with credentials" +) +async def create_saved_machine( + machine: SavedMachineCreate, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + """ + Save a new machine in user profile + + Security: Password transmitted over HTTPS and encrypted in DB (AES-256). + """ + try: + user_info = get_current_user(request) + user_id = user_info["username"] + user_token = get_current_user_token(request) + + valid_protocols = ['rdp', 'ssh', 'vnc', 'telnet'] + if machine.protocol.lower() not in valid_protocols: + raise HTTPException( + status_code=400, + detail=f"Invalid protocol. Must be one of: {', '.join(valid_protocols)}" + ) + + # Create machine in DB (passwords are NOT stored) + created_machine = saved_machines_db.create_machine( + user_id=user_id, + name=machine.name, + hostname=machine.hostname, + port=machine.port, + protocol=machine.protocol.lower(), + os=machine.os, + description=machine.description, + tags=machine.tags or [], + is_favorite=machine.is_favorite + ) + + logger.info( + "Saved machine created", + machine_id=created_machine['id'], + user_id=user_id, + name=machine.name + ) + + return SavedMachineResponse( + id=str(created_machine['id']), + user_id=created_machine['user_id'], + name=created_machine['name'], + hostname=created_machine['hostname'], + port=created_machine['port'], + protocol=created_machine['protocol'], + os=created_machine.get('os'), + username=created_machine.get('username'), + description=created_machine.get('description'), + tags=created_machine.get('tags') or [], + is_favorite=created_machine.get('is_favorite', False), + created_at=created_machine['created_at'].isoformat(), + updated_at=created_machine['updated_at'].isoformat(), + last_connected_at=None + ) + + except HTTPException: + raise + except Exception as e: + logger.error("Failed to create saved machine", + error=str(e), + user_id=user_info.get("username") if 'user_info' in locals() else "unknown") + raise HTTPException(status_code=500, detail=f"Failed to create saved machine: {str(e)}") + + +@app.get( + "/api/machines/saved/{machine_id}", + tags=["Machines"], + response_model=SavedMachineResponse, + summary="Get saved machine", + description="Retrieve specific saved machine details" +) +async def get_saved_machine( + machine_id: str, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + """Get saved machine information + + Only owner can access the machine. + """ + try: + user_info = get_current_user(request) + user_id = user_info["username"] + + machine = saved_machines_db.get_machine_by_id(machine_id, user_id) + + if not machine: + raise HTTPException(status_code=404, detail="Machine not found") + + return SavedMachineResponse( + id=str(machine['id']), + user_id=machine['user_id'], + name=machine['name'], + hostname=machine['hostname'], + port=machine['port'], + protocol=machine['protocol'], + os=machine.get('os'), + description=machine.get('description'), + tags=machine.get('tags') or [], + is_favorite=machine.get('is_favorite', False), + created_at=machine['created_at'].isoformat(), + updated_at=machine['updated_at'].isoformat(), + last_connected_at=machine['last_connected_at'].isoformat() if machine.get('last_connected_at') else None + ) + + except HTTPException: + raise + except Exception as e: + logger.error("Failed to get saved machine", + error=str(e), + machine_id=machine_id) + raise HTTPException(status_code=500, detail=f"Failed to retrieve machine: {str(e)}") + + +@app.put( + "/api/machines/saved/{machine_id}", + tags=["Machines"], + response_model=SavedMachineResponse, + summary="Update saved machine", + description="Modify machine configuration and credentials" +) +async def update_saved_machine( + machine_id: str, + machine: SavedMachineUpdate, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + """ + Update saved machine + + Security: Password transmitted over HTTPS and encrypted in DB (AES-256). + """ + try: + user_info = get_current_user(request) + user_id = user_info["username"] + user_token = get_current_user_token(request) + + existing = saved_machines_db.get_machine_by_id(machine_id, user_id) + if not existing: + raise HTTPException(status_code=404, detail="Machine not found") + + update_data = {} + + if machine.name is not None: + update_data['name'] = machine.name + if machine.hostname is not None: + update_data['hostname'] = machine.hostname + if machine.port is not None: + update_data['port'] = machine.port + if machine.protocol is not None: + valid_protocols = ['rdp', 'ssh', 'vnc', 'telnet'] + if machine.protocol.lower() not in valid_protocols: + raise HTTPException( + status_code=400, + detail=f"Invalid protocol. Must be one of: {', '.join(valid_protocols)}" + ) + update_data['protocol'] = machine.protocol.lower() + if machine.os is not None: + update_data['os'] = machine.os + # Note: credentials are NOT stored, they are requested at connection time + if machine.description is not None: + update_data['description'] = machine.description + if machine.tags is not None: + update_data['tags'] = machine.tags + if machine.is_favorite is not None: + update_data['is_favorite'] = machine.is_favorite + + # Update in DB + updated_machine = saved_machines_db.update_machine(machine_id, user_id, **update_data) + + if not updated_machine: + raise HTTPException(status_code=404, detail="Machine not found after update") + + logger.info( + "Saved machine updated", + machine_id=machine_id, + user_id=user_id, + updated_fields=list(update_data.keys()) + ) + + return SavedMachineResponse( + id=str(updated_machine['id']), + user_id=updated_machine['user_id'], + name=updated_machine['name'], + hostname=updated_machine['hostname'], + port=updated_machine['port'], + protocol=updated_machine['protocol'], + os=updated_machine.get('os'), + username=updated_machine.get('username'), + description=updated_machine.get('description'), + tags=updated_machine.get('tags') or [], + is_favorite=updated_machine.get('is_favorite', False), + created_at=updated_machine['created_at'].isoformat(), + updated_at=updated_machine['updated_at'].isoformat(), + last_connected_at=updated_machine['last_connected_at'].isoformat() if updated_machine.get('last_connected_at') else None + ) + + except HTTPException: + raise + except Exception as e: + logger.error("Failed to update saved machine", + error=str(e), + machine_id=machine_id) + raise HTTPException(status_code=500, detail=f"Failed to update machine: {str(e)}") + + +@app.delete( + "/api/machines/saved/{machine_id}", + tags=["Machines"], + summary="Delete saved machine", + description="Remove saved machine from user profile" +) +async def delete_saved_machine( + machine_id: str, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + """Delete saved machine. Only owner can delete the machine. Connection history will also be deleted (CASCADE).""" + try: + user_info = get_current_user(request) + user_id = user_info["username"] + + deleted = saved_machines_db.delete_machine(machine_id, user_id) + + if not deleted: + raise HTTPException(status_code=404, detail="Machine not found") + + logger.info( + "Saved machine deleted", + machine_id=machine_id, + user_id=user_id + ) + + return {"success": True, "message": "Machine deleted successfully"} + + except HTTPException: + raise + except Exception as e: + logger.error("Failed to delete saved machine", + error=str(e), + machine_id=machine_id) + raise HTTPException(status_code=500, detail=f"Failed to delete machine: {str(e)}") + + +@app.post( + "/api/machines/saved/{machine_id}/connect", + tags=["Machines"], + summary="Connect to saved machine", + description="Record connection attempt and update timestamp" +) +async def connect_to_saved_machine( + machine_id: str, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security) +): + "Mark connection to saved machine. Updates last_connected_at and adds history entry." + try: + user_info = get_current_user(request) + user_id = user_info["username"] + client_ip = request.client.host if request.client else "unknown" + + machine = saved_machines_db.get_machine_by_id(machine_id, user_id) + if not machine: + raise HTTPException(status_code=404, detail="Machine not found") + + # Update last_connected_at + saved_machines_db.update_last_connected(machine_id, user_id) + + # Add history entry + history_id = saved_machines_db.add_connection_history( + user_id=user_id, + machine_id=machine_id, + success=True, + client_ip=client_ip + ) + + logger.info( + "Connection to saved machine recorded", + machine_id=machine_id, + user_id=user_id, + history_id=history_id + ) + + return { + "success": True, + "message": "Connection recorded", + "history_id": history_id + } + + except HTTPException: + raise + except Exception as e: + logger.error("Failed to record connection", + error=str(e), + machine_id=machine_id) + raise HTTPException(status_code=500, detail=f"Failed to record connection: {str(e)}") + + + + + + + +if __name__ == "__main__": + import uvicorn + uvicorn.run(app, host="0.0.0.0", port=8000) \ No newline at end of file diff --git a/guacamole_test_11_26/api/requirements.txt b/guacamole_test_11_26/api/requirements.txt new file mode 100755 index 00000000..49486816 --- /dev/null +++ b/guacamole_test_11_26/api/requirements.txt @@ -0,0 +1,13 @@ +fastapi==0.115.12 +uvicorn[standard]==0.32.1 +requests==2.32.3 +pydantic==2.5.0 +python-multipart==0.0.6 +structlog==23.2.0 +psutil==5.9.6 +python-dotenv==1.0.0 +PyJWT==2.8.0 +cryptography==43.0.3 +redis==5.0.1 +psycopg2-binary==2.9.9 +paramiko==3.4.0 \ No newline at end of file diff --git a/guacamole_test_11_26/api/routers.py b/guacamole_test_11_26/api/routers.py new file mode 100755 index 00000000..e57d4674 --- /dev/null +++ b/guacamole_test_11_26/api/routers.py @@ -0,0 +1,477 @@ +"""Bulk operations router for mass machine operations.""" + +import asyncio +import socket +import time +from datetime import datetime, timezone +from types import SimpleNamespace +from typing import Dict, List +from uuid import UUID + +import structlog +from fastapi import APIRouter, Depends, HTTPException, Request +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer + +from core.middleware import get_current_user +from core.models import ( + BulkHealthCheckRequest, + BulkHealthCheckResponse, + BulkHealthCheckResult, + BulkSSHCommandRequest, + BulkSSHCommandResponse, + BulkSSHCommandResult, + UserRole, +) +from core.permissions import PermissionChecker +from core.saved_machines_db import saved_machines_db +from core.audit_logger import immutable_audit_logger + +logger = structlog.get_logger(__name__) +security = HTTPBearer() + +bulk_router = APIRouter(prefix="/api/bulk", tags=["Bulk Operations"]) + + +ROLE_HEALTH_CHECK_LIMITS = { + UserRole.GUEST: 10, + UserRole.USER: 50, + UserRole.ADMIN: 200, + UserRole.SUPER_ADMIN: 200, +} + +ROLE_SSH_COMMAND_LIMITS = { + UserRole.GUEST: 0, + UserRole.USER: 20, + UserRole.ADMIN: 100, + UserRole.SUPER_ADMIN: 100, +} + + +async def check_host_availability( + hostname: str, port: int = 22, timeout: int = 5 +) -> tuple[bool, float | None, str | None]: + """Check if host is available via TCP connection.""" + start_time = time.time() + try: + reader, writer = await asyncio.wait_for( + asyncio.open_connection(hostname, port), timeout=timeout + ) + writer.close() + await writer.wait_closed() + response_time = (time.time() - start_time) * 1000 + return True, response_time, None + except asyncio.TimeoutError: + return False, None, "Connection timeout" + except socket.gaierror: + return False, None, "DNS resolution failed" + except ConnectionRefusedError: + return False, None, "Connection refused" + except Exception as e: + return False, None, f"Connection error: {str(e)}" + + +@bulk_router.post( + "/health-check", + response_model=BulkHealthCheckResponse, + summary="Bulk health check", + description="Check availability of multiple machines in parallel" +) +async def bulk_health_check( + request_data: BulkHealthCheckRequest, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security), +): + """Bulk machine availability check with role-based limits.""" + user_info = get_current_user(request) + if not user_info: + raise HTTPException(status_code=401, detail="Authentication required") + + username = user_info["username"] + user_role = UserRole(user_info["role"]) + client_ip = request.client.host if request.client else "unknown" + + max_machines = ROLE_HEALTH_CHECK_LIMITS.get(user_role, 10) + machine_count = len(request_data.machine_ids) + + if machine_count > max_machines: + logger.warning( + "Bulk health check limit exceeded", + username=username, + role=user_role.value, + requested=machine_count, + limit=max_machines, + ) + raise HTTPException( + status_code=403, + detail=f"Role {user_role.value} can check max {max_machines} machines at once", + ) + + logger.info( + "Bulk health check started", + username=username, + machine_count=machine_count, + timeout=request_data.timeout, + ) + + started_at = datetime.now(timezone.utc) + start_time = time.time() + + machines = [] + for machine_id in request_data.machine_ids: + # Try to get from saved machines first (UUID format) + try: + UUID(machine_id) + machine_dict = saved_machines_db.get_machine_by_id(machine_id, username) + if machine_dict: + # Convert dict to object with attributes for uniform access + machine = SimpleNamespace( + id=machine_dict['id'], + name=machine_dict['name'], + ip=machine_dict.get('hostname', machine_dict.get('ip', 'unknown')), + hostname=machine_dict.get('hostname', 'unknown'), + ) + machines.append(machine) + continue + except (ValueError, AttributeError): + # Not a UUID + pass + + logger.warning( + "Machine not found or invalid UUID", + username=username, + machine_id=machine_id, + ) + + async def check_machine(machine): + checked_at = datetime.now(timezone.utc).isoformat() + try: + available, response_time, error = await check_host_availability( + machine.ip, timeout=request_data.timeout + ) + + return BulkHealthCheckResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="success" if available else "failed", + available=available, + response_time_ms=int(response_time) if response_time else None, + error=error, + checked_at=checked_at, + ) + except Exception as e: + logger.error( + "Health check error", machine_id=str(machine.id), error=str(e) + ) + return BulkHealthCheckResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="failed", + available=False, + error=str(e), + checked_at=checked_at, + ) + + results = await asyncio.gather(*[check_machine(m) for m in machines]) + + completed_at = datetime.now(timezone.utc) + execution_time_ms = int((time.time() - start_time) * 1000) + + success_count = sum(1 for r in results if r.status == "success") + failed_count = len(results) - success_count + available_count = sum(1 for r in results if r.available) + unavailable_count = len(results) - available_count + + immutable_audit_logger.log_security_event( + event_type="bulk_health_check", + client_ip=client_ip, + user_agent=request.headers.get("user-agent", "unknown"), + details={ + "machine_count": len(results), + "available": available_count, + "unavailable": unavailable_count, + "execution_time_ms": execution_time_ms, + }, + severity="info", + username=username, + ) + + logger.info( + "Bulk health check completed", + username=username, + total=len(results), + available=available_count, + execution_time_ms=execution_time_ms, + ) + + return BulkHealthCheckResponse( + total=len(results), + success=success_count, + failed=failed_count, + available=available_count, + unavailable=unavailable_count, + results=results, + execution_time_ms=execution_time_ms, + started_at=started_at.isoformat(), + completed_at=completed_at.isoformat(), + ) + + +@bulk_router.post( + "/ssh-command", + response_model=BulkSSHCommandResponse, + summary="Bulk SSH command", + description="Execute SSH commands on multiple machines in parallel" +) +async def bulk_ssh_command( + request_data: BulkSSHCommandRequest, + request: Request, + credentials: HTTPAuthorizationCredentials = Depends(security), +): + """Bulk SSH command execution with role-based limits.""" + user_info = get_current_user(request) + if not user_info: + raise HTTPException(status_code=401, detail="Authentication required") + + username = user_info["username"] + user_role = UserRole(user_info["role"]) + client_ip = request.client.host if request.client else "unknown" + + if user_role == UserRole.GUEST: + raise HTTPException( + status_code=403, detail="GUEST role cannot execute SSH commands" + ) + + max_machines = ROLE_SSH_COMMAND_LIMITS.get(user_role, 0) + machine_count = len(request_data.machine_ids) + + if machine_count > max_machines: + logger.warning( + "Bulk SSH command limit exceeded", + username=username, + role=user_role.value, + requested=machine_count, + limit=max_machines, + ) + raise HTTPException( + status_code=403, + detail=f"Role {user_role.value} can execute commands on max {max_machines} machines at once", + ) + + logger.info( + "Bulk SSH command started", + username=username, + machine_count=machine_count, + command=request_data.command[:50], + mode=request_data.credentials_mode, + ) + + started_at = datetime.now(timezone.utc) + start_time = time.time() + + machines = [] + for machine_id in request_data.machine_ids: + # Try to get from saved machines first (UUID format) + try: + UUID(machine_id) + machine_dict = saved_machines_db.get_machine_by_id(machine_id, username) + if machine_dict: + # Convert dict to object with attributes for uniform access + machine = SimpleNamespace( + id=machine_dict['id'], + name=machine_dict['name'], + ip=machine_dict.get('hostname', machine_dict.get('ip', 'unknown')), + hostname=machine_dict.get('hostname', 'unknown'), + ) + machines.append(machine) + continue + except (ValueError, AttributeError): + # Not a UUID, check if hostname provided + pass + + # Check if hostname provided for non-saved machine (mock machines) + if request_data.machine_hostnames and machine_id in request_data.machine_hostnames: + hostname = request_data.machine_hostnames[machine_id] + # Create mock machine object for non-saved machines + mock_machine = SimpleNamespace( + id=machine_id, + name=f'Mock-{machine_id}', + ip=hostname, + hostname=hostname, + ) + machines.append(mock_machine) + logger.info( + "Using non-saved machine (mock)", + username=username, + machine_id=machine_id, + hostname=hostname, + ) + continue + + logger.warning( + "Machine not found and no hostname provided", + username=username, + machine_id=machine_id, + ) + + semaphore = asyncio.Semaphore(10) + + async def execute_command(machine): + async with semaphore: + executed_at = datetime.now(timezone.utc).isoformat() + cmd_start = time.time() + + try: + ssh_username = None + ssh_password = None + + if request_data.credentials_mode == "global": + if not request_data.global_credentials: + return BulkSSHCommandResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="no_credentials", + error="Global credentials not provided", + executed_at=executed_at, + ) + ssh_username = request_data.global_credentials.username + ssh_password = request_data.global_credentials.password + + else: # custom mode + if not request_data.machine_credentials or str( + machine.id + ) not in request_data.machine_credentials: + return BulkSSHCommandResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="no_credentials", + error="Custom credentials not provided for this machine", + executed_at=executed_at, + ) + creds = request_data.machine_credentials[str(machine.id)] + ssh_username = creds.username + ssh_password = creds.password + + if not ssh_username or not ssh_password: + return BulkSSHCommandResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="no_credentials", + error="Credentials missing", + executed_at=executed_at, + ) + + import paramiko + + ssh = paramiko.SSHClient() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + await asyncio.wait_for( + asyncio.get_event_loop().run_in_executor( + None, + lambda: ssh.connect( + machine.ip, + username=ssh_username, + password=ssh_password, + timeout=request_data.timeout, + ), + ), + timeout=request_data.timeout, + ) + + stdin, stdout, stderr = ssh.exec_command(request_data.command) + stdout_text = stdout.read().decode("utf-8", errors="ignore") + stderr_text = stderr.read().decode("utf-8", errors="ignore") + exit_code = stdout.channel.recv_exit_status() + + ssh.close() + + execution_time = int((time.time() - cmd_start) * 1000) + + return BulkSSHCommandResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="success" if exit_code == 0 else "failed", + exit_code=exit_code, + stdout=stdout_text[:5000], + stderr=stderr_text[:5000], + execution_time_ms=execution_time, + executed_at=executed_at, + ) + + except asyncio.TimeoutError: + return BulkSSHCommandResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="timeout", + error="Command execution timeout", + executed_at=executed_at, + ) + except Exception as e: + logger.error( + "SSH command error", + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + error=str(e), + error_type=type(e).__name__ + ) + return BulkSSHCommandResult( + machine_id=str(machine.id), + machine_name=machine.name, + hostname=machine.ip, + status="failed", + error=str(e)[:500], + executed_at=executed_at, + ) + + results = await asyncio.gather(*[execute_command(m) for m in machines]) + + completed_at = datetime.now(timezone.utc) + execution_time_ms = int((time.time() - start_time) * 1000) + + success_count = sum(1 for r in results if r.status == "success") + failed_count = len(results) - success_count + + immutable_audit_logger.log_security_event( + event_type="bulk_ssh_command", + client_ip=client_ip, + user_agent=request.headers.get("user-agent", "unknown"), + details={ + "machine_count": len(results), + "command": request_data.command[:100], + "credentials_mode": request_data.credentials_mode, + "success": success_count, + "failed": failed_count, + "execution_time_ms": execution_time_ms, + }, + severity="high", + username=username, + ) + + logger.info( + "Bulk SSH command completed", + username=username, + total=len(results), + success=success_count, + failed=failed_count, + execution_time_ms=execution_time_ms, + ) + + return BulkSSHCommandResponse( + total=len(results), + success=success_count, + failed=failed_count, + results=results, + execution_time_ms=execution_time_ms, + command=request_data.command, + started_at=started_at.isoformat(), + completed_at=completed_at.isoformat(), + ) + diff --git a/guacamole_test_11_26/api/security_config.py b/guacamole_test_11_26/api/security_config.py new file mode 100755 index 00000000..8dc45c82 --- /dev/null +++ b/guacamole_test_11_26/api/security_config.py @@ -0,0 +1,143 @@ +""" +Security configuration for Remote Access API. +""" + +import os +from typing import Any, Dict, Tuple + +from core.models import UserRole +from core.ssrf_protection import ssrf_protection + + +class SecurityConfig: + """Security configuration for the system.""" + + MAX_TTL_MINUTES = int(os.getenv("MAX_TTL_MINUTES", "480")) + + MAX_CONNECTIONS_PER_USER = int(os.getenv("MAX_CONNECTIONS_PER_USER", "5")) + + BLOCKED_HOSTS = { + "127.0.0.1", + "localhost", + "0.0.0.0", + "::1", + "169.254.169.254", + "metadata.google.internal", + } + + BLOCKED_NETWORKS = [ + "127.0.0.0/8", + "169.254.0.0/16", + "224.0.0.0/4", + "240.0.0.0/4", + "172.17.0.0/16", + "172.18.0.0/16", + "172.19.0.0/16", + "172.20.0.0/16", + "172.21.0.0/16", + "172.22.0.0/16", + "172.23.0.0/16", + "172.24.0.0/16", + "172.25.0.0/16", + "172.26.0.0/16", + "172.27.0.0/16", + "172.28.0.0/16", + "172.29.0.0/16", + "172.30.0.0/16", + "172.31.0.0/16", + ] + + ROLE_ALLOWED_NETWORKS = { + UserRole.GUEST: [], + UserRole.USER: [ + "10.0.0.0/8", + "172.16.0.0/16", + "192.168.1.0/24", + ], + UserRole.ADMIN: [ + "10.0.0.0/8", + "172.16.0.0/16", + "192.168.0.0/16", + "203.0.113.0/24", + ], + UserRole.SUPER_ADMIN: [ + "0.0.0.0/0", + ], + } + + @classmethod + def is_host_allowed( + cls, hostname: str, user_role: UserRole + ) -> Tuple[bool, str]: + """ + Check if host is allowed for the given role with enhanced SSRF protection. + + Args: + hostname: IP address or hostname. + user_role: User role. + + Returns: + Tuple (allowed: bool, reason: str). + """ + return ssrf_protection.validate_host(hostname, user_role.value) + + @classmethod + def validate_ttl(cls, ttl_minutes: int) -> Tuple[bool, str]: + """ + Validate connection TTL. + + Args: + ttl_minutes: Requested time-to-live in minutes. + + Returns: + Tuple (valid: bool, reason: str). + """ + if ttl_minutes <= 0: + return False, "TTL must be positive" + + if ttl_minutes > cls.MAX_TTL_MINUTES: + return False, f"TTL cannot exceed {cls.MAX_TTL_MINUTES} minutes" + + return True, "TTL is valid" + + @classmethod + def get_role_limits(cls, user_role: UserRole) -> Dict[str, Any]: + """ + Get limits for a role. + + Args: + user_role: User role. + + Returns: + Dictionary with limits. + """ + base_limits = { + "max_ttl_minutes": cls.MAX_TTL_MINUTES, + "max_connections": cls.MAX_CONNECTIONS_PER_USER, + "allowed_networks": cls.ROLE_ALLOWED_NETWORKS.get(user_role, []), + "can_create_connections": user_role != UserRole.GUEST, + } + + if user_role == UserRole.GUEST: + base_limits.update( + { + "max_connections": 0, + "max_ttl_minutes": 0, + } + ) + elif user_role == UserRole.USER: + base_limits.update( + { + "max_connections": 3, + "max_ttl_minutes": 240, + } + ) + elif user_role == UserRole.ADMIN: + base_limits.update( + { + "max_connections": 10, + "max_ttl_minutes": 480, + } + ) + + return base_limits \ No newline at end of file diff --git a/guacamole_test_11_26/api/services/__init__.py b/guacamole_test_11_26/api/services/__init__.py new file mode 100755 index 00000000..8f44f745 --- /dev/null +++ b/guacamole_test_11_26/api/services/__init__.py @@ -0,0 +1,5 @@ +"""Services package for system operations""" +from .system_service import SystemService + +__all__ = ['SystemService'] + diff --git a/guacamole_test_11_26/api/services/system_service.py b/guacamole_test_11_26/api/services/system_service.py new file mode 100755 index 00000000..f0cbc93c --- /dev/null +++ b/guacamole_test_11_26/api/services/system_service.py @@ -0,0 +1,225 @@ +""" +System Service Module + +Provides system monitoring and health check functionality for the Remote Access API. +Includes checks for database connectivity, daemon status, and system resources. +""" + +import socket +import psutil +from datetime import datetime +from typing import Dict, Any, Optional +import structlog + +logger = structlog.get_logger(__name__) + + +class SystemService: + """Service for system health checks and monitoring""" + + def __init__(self, service_start_time: Optional[datetime] = None): + """ + Initialize SystemService + + Args: + service_start_time: Service startup time for uptime calculation + """ + self.service_start_time = service_start_time or datetime.now() + + @staticmethod + def check_database_connection(guacamole_client: Any, guacamole_url: str) -> Dict[str, Any]: + """ + Check Guacamole database connectivity + + Args: + guacamole_client: Guacamole client instance + guacamole_url: Guacamole base URL + + Returns: + Status dictionary with connection state + """ + try: + # Try to get system token (requires database access) + token = guacamole_client.get_system_token() + + if token: + return { + "status": "ok", + "message": "Database connection healthy" + } + else: + return { + "status": "error", + "message": "Failed to obtain system token" + } + + except Exception as e: + logger.error("Database connection check failed", error=str(e)) + return { + "status": "error", + "error": str(e), + "message": "Database connection failed" + } + + @staticmethod + def check_guacd_daemon() -> Dict[str, Any]: + """ + Check if guacd daemon is running + + Returns: + Status dictionary with daemon state + """ + try: + # Check if guacd is listening on default port 4822 + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(2) + result = sock.connect_ex(('localhost', 4822)) + sock.close() + + if result == 0: + return { + "status": "ok", + "message": "guacd daemon is running", + "port": 4822 + } + else: + return { + "status": "error", + "message": "guacd daemon is not accessible", + "port": 4822 + } + + except Exception as e: + logger.error("guacd daemon check failed", error=str(e)) + return { + "status": "error", + "error": str(e), + "message": "Failed to check guacd daemon" + } + + @staticmethod + def check_system_resources() -> Dict[str, Any]: + """ + Check system resources (CPU, RAM, Disk) + + Returns: + Status dictionary with resource usage + """ + try: + # CPU usage + cpu_percent = psutil.cpu_percent(interval=1) + + # Memory usage + memory = psutil.virtual_memory() + memory_percent = memory.percent + + # Disk usage + disk = psutil.disk_usage('/') + disk_percent = disk.percent + + # Determine overall status based on thresholds + status = "ok" + warnings = [] + + if cpu_percent > 90: + status = "critical" + warnings.append(f"CPU usage critical: {cpu_percent}%") + elif cpu_percent > 80: + status = "warning" + warnings.append(f"CPU usage high: {cpu_percent}%") + + if memory_percent > 90: + status = "critical" + warnings.append(f"Memory usage critical: {memory_percent}%") + elif memory_percent > 80: + if status == "ok": + status = "warning" + warnings.append(f"Memory usage high: {memory_percent}%") + + if disk_percent > 90: + status = "critical" + warnings.append(f"Disk usage critical: {disk_percent}%") + elif disk_percent > 80: + if status == "ok": + status = "warning" + warnings.append(f"Disk usage high: {disk_percent}%") + + result = { + "status": status, + "cpu_percent": round(cpu_percent, 2), + "memory_percent": round(memory_percent, 2), + "disk_percent": round(disk_percent, 2), + "memory_available_gb": round(memory.available / (1024**3), 2), + "disk_free_gb": round(disk.free / (1024**3), 2) + } + + if warnings: + result["warnings"] = warnings + + if status == "ok": + result["message"] = "System resources healthy" + + return result + + except Exception as e: + logger.error("System resources check failed", error=str(e)) + return { + "status": "error", + "error": str(e), + "message": "Failed to check system resources" + } + + def get_system_info(self) -> Dict[str, Any]: + """ + Get system information (uptime, version, etc.) + + Returns: + Dictionary with system information + """ + try: + uptime_seconds = int((datetime.now() - self.service_start_time).total_seconds()) + + return { + "uptime_seconds": uptime_seconds, + "uptime_formatted": self._format_uptime(uptime_seconds), + "python_version": f"{psutil.PROCFS_PATH if hasattr(psutil, 'PROCFS_PATH') else 'N/A'}", + "cpu_count": psutil.cpu_count(), + "boot_time": datetime.fromtimestamp(psutil.boot_time()).isoformat() + } + + except Exception as e: + logger.error("Failed to get system info", error=str(e)) + return { + "error": str(e), + "message": "Failed to retrieve system information" + } + + @staticmethod + def _format_uptime(seconds: int) -> str: + """ + Format uptime seconds to human-readable string + + Args: + seconds: Uptime in seconds + + Returns: + Formatted uptime string + """ + days = seconds // 86400 + hours = (seconds % 86400) // 3600 + minutes = (seconds % 3600) // 60 + secs = seconds % 60 + + parts = [] + if days > 0: + parts.append(f"{days}d") + if hours > 0: + parts.append(f"{hours}h") + if minutes > 0: + parts.append(f"{minutes}m") + if secs > 0 or not parts: + parts.append(f"{secs}s") + + return " ".join(parts) + + diff --git a/guacamole_test_11_26/deploy.sh b/guacamole_test_11_26/deploy.sh new file mode 100755 index 00000000..c8d0be2c --- /dev/null +++ b/guacamole_test_11_26/deploy.sh @@ -0,0 +1,398 @@ +#!/bin/bash +# Automated deployment for Remote Access API + Guacamole +# with automatic secure administrator generation + +set -e # Exit on error + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +echo "==========================================" +echo " Remote Access API Deployment" +echo "==========================================" +echo "" + +# Output colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[OK]${NC} $1" +} + +log_warning() { + echo -e "${YELLOW}[WARNING]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Check required commands +check_requirements() { + log_info "Checking requirements..." + + if ! command -v docker &> /dev/null; then + log_error "Docker not found! Please install Docker first." + exit 1 + fi + + if ! command -v python3 &> /dev/null; then + log_error "Python 3 not found! Please install Python 3." + exit 1 + fi + + if ! docker compose version &> /dev/null; then + log_error "Docker Compose V2 not found! Please install Docker Compose V2." + exit 1 + fi + + log_success "All requirements met" +} + +# Load environment variables +load_env() { + log_info "Loading environment variables..." + + if [ ! -f ".env" ] && [ ! -f "production.env" ]; then + log_error "No .env or production.env file found!" + log_error "Please create one from encryption.env.example or production.env" + exit 1 + fi + + # Use production.env by default + ENV_FILE=".env" + if [ -f "production.env" ]; then + ENV_FILE="production.env" + log_info "Using production.env" + fi + + # Load variables + set -a + source "$ENV_FILE" + set +a + + log_success "Environment loaded from $ENV_FILE" +} + +# Check critical passwords +check_critical_passwords() { + log_info "Checking critical passwords..." + + local has_issues=0 + + # Check REDIS_PASSWORD + if [ -z "$REDIS_PASSWORD" ] || [ "$REDIS_PASSWORD" == "redis_pass" ]; then + log_error "REDIS_PASSWORD is not set or using default value!" + log_error "Set a secure password in $ENV_FILE" + has_issues=1 + fi + + # Check POSTGRES_PASSWORD + if [ -z "$POSTGRES_PASSWORD" ] || [ "$POSTGRES_PASSWORD" == "guacamole_pass" ]; then + log_error "POSTGRES_PASSWORD is not set or using default value!" + log_error "Set a secure password in $ENV_FILE" + has_issues=1 + fi + + # Check SYSTEM_ADMIN credentials + if [ -z "$SYSTEM_ADMIN_USERNAME" ] || [ -z "$SYSTEM_ADMIN_PASSWORD" ]; then + log_error "SYSTEM_ADMIN_USERNAME and SYSTEM_ADMIN_PASSWORD must be set!" + log_error "Please update your $ENV_FILE file" + has_issues=1 + fi + + if [ $has_issues -eq 1 ]; then + log_error "" + log_error "Critical passwords are missing or insecure!" + log_error "Update the following in $ENV_FILE:" + log_error " - REDIS_PASSWORD=" + log_error " - POSTGRES_PASSWORD=" + log_error " - SYSTEM_ADMIN_PASSWORD=" + log_error "" + log_error "Generate secure passwords:" + log_error " openssl rand -base64 32" + exit 1 + fi + + log_success "All critical passwords are set" +} + +# Auto-generate admin if password is not default +generate_admin_sql() { + log_info "Checking admin credentials..." + + # Check if default password is used + if [ "$SYSTEM_ADMIN_PASSWORD" == "guacadmin" ] || \ + [ "$SYSTEM_ADMIN_PASSWORD" == "guacadmin_change_in_production" ] || \ + [[ "$SYSTEM_ADMIN_PASSWORD" == *"CHANGE_ME"* ]]; then + log_warning "Default or placeholder password detected!" + log_warning "Username: $SYSTEM_ADMIN_USERNAME" + log_warning "Password: $SYSTEM_ADMIN_PASSWORD" + log_warning "" + log_warning "This is INSECURE for production!" + log_warning "Using default 002-create-admin-user.sql" + log_warning "" + read -p "Continue anyway? (y/N): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + log_info "Deployment cancelled. Please update your credentials." + exit 1 + fi + return + fi + + log_success "Custom password detected - generating secure admin SQL" + log_info "Username: $SYSTEM_ADMIN_USERNAME" + log_info "Password length: ${#SYSTEM_ADMIN_PASSWORD} characters" + + # Create backup of original SQL (if not already created) + if [ -f "002-create-admin-user.sql" ] && [ ! -f "002-create-admin-user-DEFAULT-BACKUP.sql" ]; then + log_info "Creating backup of default SQL..." + cp 002-create-admin-user.sql 002-create-admin-user-DEFAULT-BACKUP.sql + log_success "Backup created: 002-create-admin-user-DEFAULT-BACKUP.sql" + fi + + # Generate new SQL + log_info "Generating SQL with custom password..." + python3 generate_guacamole_user.py \ + --username "$SYSTEM_ADMIN_USERNAME" \ + --password "$SYSTEM_ADMIN_PASSWORD" \ + --admin \ + --verify \ + > 002-create-admin-user-GENERATED.sql + + if [ $? -ne 0 ]; then + log_error "Failed to generate SQL!" + exit 1 + fi + + # Replace SQL + mv 002-create-admin-user-GENERATED.sql 002-create-admin-user.sql + log_success "Admin SQL generated and applied" + log_info "File: 002-create-admin-user.sql (auto-generated)" +} + +# Validate docker-compose.yml +check_compose_file() { + log_info "Validating docker-compose.yml..." + + if docker compose config > /dev/null 2>&1; then + log_success "docker-compose.yml is valid" + else + log_error "Invalid docker-compose.yml!" + docker compose config + exit 1 + fi +} + +# Start containers +start_containers() { + log_info "Starting containers..." + + # Stop existing containers (if any) + docker compose down 2>/dev/null || true + + # Start + docker compose up -d + + if [ $? -eq 0 ]; then + log_success "Containers started successfully" + else + log_error "Failed to start containers!" + exit 1 + fi +} + +# Wait for services to be ready +wait_for_services() { + log_info "Waiting for services to be ready..." + + # Wait for PostgreSQL + log_info "Waiting for PostgreSQL..." + for i in {1..30}; do + if docker compose exec -T postgres pg_isready -U guacamole_user &>/dev/null; then + log_success "PostgreSQL is ready" + break + fi + if [ $i -eq 30 ]; then + log_error "PostgreSQL failed to start!" + docker compose logs postgres + exit 1 + fi + sleep 1 + done + + # Wait for Guacamole + log_info "Waiting for Guacamole..." + local guacamole_ready=0 + for i in {1..60}; do + if curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/guacamole/ | grep -q "200\|302"; then + log_success "Guacamole is ready" + guacamole_ready=1 + break + fi + sleep 2 + done + + if [ $guacamole_ready -eq 0 ]; then + log_warning "Guacamole might not be ready yet (timeout after 120s)" + log_info "Check logs: docker compose logs guacamole" + fi + + # Wait for Redis + log_info "Waiting for Redis..." + local redis_ready=0 + for i in {1..20}; do + if docker compose exec -T redis redis-cli -a "$REDIS_PASSWORD" ping &>/dev/null | grep -q "PONG"; then + log_success "Redis is ready" + redis_ready=1 + break + fi + sleep 1 + done + + if [ $redis_ready -eq 0 ]; then + log_warning "Redis might not be ready yet (timeout after 20s)" + fi + + # Wait for API + log_info "Waiting for API..." + local api_ready=0 + for i in {1..45}; do + if docker compose logs remote_access_api 2>&1 | grep -q "Application startup complete"; then + log_info "API startup detected, checking health endpoint..." + sleep 2 # Give it a moment to fully initialize + + # Check health endpoint + if curl -s http://localhost:8000/api/health | grep -q '"overall_status":"ok"'; then + log_success "API is ready and healthy" + api_ready=1 + break + elif curl -s http://localhost:8000/api/health | grep -q '"overall_status"'; then + log_warning "API is running but some components have issues" + log_info "Check: curl http://localhost:8000/api/health | jq" + api_ready=1 + break + fi + fi + sleep 2 + done + + if [ $api_ready -eq 0 ]; then + log_error "API failed to start properly (timeout after 90s)" + log_info "Check logs: docker compose logs remote_access_api" + log_info "Last 30 lines:" + docker compose logs --tail=30 remote_access_api + exit 1 + fi +} + +# Verify deployment +verify_deployment() { + log_info "Verifying deployment..." + + # Check that admin user was created + ADMIN_CHECK=$(docker compose exec -T postgres psql -U guacamole_user -d guacamole_db -t -c \ + "SELECT COUNT(*) FROM guacamole_user u + JOIN guacamole_entity e ON u.entity_id = e.entity_id + WHERE e.name = '$SYSTEM_ADMIN_USERNAME';" 2>/dev/null | tr -d ' ') + + if [ "$ADMIN_CHECK" == "1" ]; then + log_success "Admin user '$SYSTEM_ADMIN_USERNAME' exists in database" + else + log_warning "Could not verify admin user in database" + fi + + # Check API health endpoint + log_info "Checking API health endpoint..." + HEALTH_RESPONSE=$(curl -s http://localhost:8000/api/health) + + if echo "$HEALTH_RESPONSE" | grep -q '"overall_status":"ok"'; then + log_success "API health check: OK" + + # Parse component statuses (if jq is available) + if command -v jq &> /dev/null; then + echo "$HEALTH_RESPONSE" | jq -r '.components | to_entries[] | " - \(.key): \(.value.status)"' 2>/dev/null || true + fi + elif echo "$HEALTH_RESPONSE" | grep -q '"overall_status"'; then + local status=$(echo "$HEALTH_RESPONSE" | grep -o '"overall_status":"[^"]*"' | cut -d'"' -f4) + log_warning "API health check: $status (some components have issues)" + + if command -v jq &> /dev/null; then + echo "$HEALTH_RESPONSE" | jq -r '.components | to_entries[] | " - \(.key): \(.value.status)"' 2>/dev/null || true + else + log_info "Install 'jq' for detailed component status" + fi + else + log_error "API health check failed!" + log_info "Response: $HEALTH_RESPONSE" + exit 1 + fi + + # Check API logs for authentication + if docker compose logs remote_access_api 2>&1 | grep -q "System token refreshed successfully\|authenticated with system credentials"; then + log_success "API successfully authenticated with system credentials" + else + log_warning "Could not verify API system authentication in logs" + log_info "This may be normal if using cached tokens" + fi +} + +# Print deployment summary +print_summary() { + echo "" + echo "==========================================" + echo " Deployment Complete!" + echo "==========================================" + echo "" + log_success "Services are running" + echo "" + echo "Access URLs:" + echo " - Guacamole UI: http://localhost:8080/guacamole/" + echo " - API Health: http://localhost:8000/api/health" + echo " - API Docs: http://localhost:8000/docs (if enabled)" + echo "" + echo "Admin Credentials:" + echo " - Username: $SYSTEM_ADMIN_USERNAME" + echo " - Password: ${SYSTEM_ADMIN_PASSWORD:0:3}***${SYSTEM_ADMIN_PASSWORD: -3} (length: ${#SYSTEM_ADMIN_PASSWORD})" + echo "" + echo "Useful Commands:" + echo " - View logs: docker compose logs -f" + echo " - API logs: docker compose logs -f remote_access_api" + echo " - Check health: curl http://localhost:8000/api/health | jq" + echo " - Stop: docker compose down" + echo " - Restart: docker compose restart" + echo "" + + if [ -f "002-create-admin-user-DEFAULT-BACKUP.sql" ]; then + log_info "Original SQL backed up to: 002-create-admin-user-DEFAULT-BACKUP.sql" + fi + + echo "" +} + +# Main deployment flow +main() { + check_requirements + load_env + check_critical_passwords + generate_admin_sql + check_compose_file + start_containers + wait_for_services + verify_deployment + print_summary +} + +# Run +main + diff --git a/guacamole_test_11_26/docker-compose.yml b/guacamole_test_11_26/docker-compose.yml new file mode 100755 index 00000000..b8fa71e8 --- /dev/null +++ b/guacamole_test_11_26/docker-compose.yml @@ -0,0 +1,170 @@ +version: '3.3' + +services: + # Redis for session storage and rate limiting + redis: + image: redis:7-alpine + container_name: guacamole_redis + command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD} + volumes: + - redis_data_t:/data + networks: + - backend_net + restart: unless-stopped + healthcheck: + test: ["CMD", "redis-cli", "--raw", "incr", "ping"] + interval: 30s + timeout: 10s + retries: 3 + + # PostgreSQL database for Guacamole + postgres: + image: postgres:13 + container_name: guacamole_postgres + environment: + POSTGRES_DB: ${POSTGRES_DB} + POSTGRES_USER: ${POSTGRES_USER} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + volumes: + - postgres_data_t:/var/lib/postgresql/data + - ./001-create-schema.sql:/docker-entrypoint-initdb.d/001-create-schema.sql + - ./002-create-admin-user.sql:/docker-entrypoint-initdb.d/002-create-admin-user.sql + - ./003-create-api-schema.sql:/docker-entrypoint-initdb.d/003-create-api-schema.sql + networks: + - db_net + restart: unless-stopped + + # Guacamole Daemon (guacd) + guacd: + image: guacamole/guacd:latest + container_name: guacamole_daemon + networks: + - frontend_net + - backend_net + restart: unless-stopped + + # Guacamole Web Application + guacamole: + image: guacamole/guacamole:latest + container_name: guacamole_web + depends_on: + - postgres + - guacd + environment: + GUACD_HOSTNAME: guacd + GUACD_PORT: 4822 + POSTGRESQL_HOSTNAME: postgres + POSTGRESQL_DATABASE: ${POSTGRES_DB} + POSTGRESQL_USERNAME: ${POSTGRES_USER} + POSTGRESQL_PASSWORD: ${POSTGRES_PASSWORD} + # WebSocket and session settings for nginx + WEBSOCKET_TUNNEL_READ_TIMEOUT: 7200000 + WEBSOCKET_TUNNEL_WRITE_TIMEOUT: 7200000 + API_SESSION_TIMEOUT: 7200 + # Security settings + EXTENSION_PRIORITY: postgresql + # Ports removed - access through nginx only + networks: + - frontend_net + - backend_net + - db_net + restart: unless-stopped + + # Custom API Service + remote_access_api: + build: + context: ./api + dockerfile: Dockerfile + container_name: remote_access_api + depends_on: + - guacamole + - redis + environment: + # Guacamole URLs + GUACAMOLE_URL: ${GUACAMOLE_URL:-http://guacamole:8080} + GUACAMOLE_PUBLIC_URL: ${GUACAMOLE_PUBLIC_URL:-http://localhost:8080} + + # Redis Configuration + REDIS_HOST: redis + REDIS_PORT: 6379 + REDIS_PASSWORD: ${REDIS_PASSWORD} + REDIS_DB: 0 + + # PostgreSQL Configuration + POSTGRES_HOST: ${POSTGRES_HOST:-postgres} + POSTGRES_PORT: ${POSTGRES_PORT:-5432} + POSTGRES_DB: ${POSTGRES_DB:-mc_db} + POSTGRES_USER: ${POSTGRES_USER:-mc_db_user} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} + + # System Admin Account + SYSTEM_ADMIN_USERNAME: ${SYSTEM_ADMIN_USERNAME} + SYSTEM_ADMIN_PASSWORD: ${SYSTEM_ADMIN_PASSWORD} + + # JWT Configuration + JWT_SECRET_KEY: ${JWT_SECRET_KEY} + JWT_ALGORITHM: ${JWT_ALGORITHM:-HS256} + JWT_ACCESS_TOKEN_EXPIRE_MINUTES: ${JWT_ACCESS_TOKEN_EXPIRE_MINUTES:-60} + JWT_REFRESH_TOKEN_EXPIRE_DAYS: ${JWT_REFRESH_TOKEN_EXPIRE_DAYS:-7} + + # Security Settings + REQUIRE_AUTHENTICATION: ${REQUIRE_AUTHENTICATION:-true} + DEFAULT_USER_ROLE: ${DEFAULT_USER_ROLE:-USER} + + # Password Encryption + PASSWORD_ENCRYPTION_KEY: ${PASSWORD_ENCRYPTION_KEY:-} + + # API Settings + LOG_LEVEL: ${LOG_LEVEL:-INFO} + LOG_FORMAT: ${LOG_FORMAT:-json} + RATE_LIMIT_ENABLED: ${RATE_LIMIT_ENABLED:-true} + RATE_LIMIT_REQUESTS: ${RATE_LIMIT_REQUESTS:-10} + RATE_LIMIT_WINDOW: ${RATE_LIMIT_WINDOW:-60} + + ALLOWED_ORIGINS: ${ALLOWED_ORIGINS} + + ENABLE_DOCS: ${ENABLE_DOCS:-true} + + ED25519_SIGNING_KEY_PATH: /app/secrets/ed25519_signing_key.pem + + volumes: + - signing_keys_t:/app/secrets + + networks: + - backend_net + - db_net + restart: unless-stopped + + nginx: + image: nginx:alpine + container_name: remote_access_nginx + depends_on: + - remote_access_api + - guacamole + ports: + - "8443:8443" # Только порт для внешнего nginx + volumes: + - ./nginx/mc.exbytestudios.com.conf:/etc/nginx/conf.d/default.conf + - ./nginx/logs:/var/log/nginx + networks: + - frontend_net + - backend_net + restart: unless-stopped + healthcheck: + test: ["CMD", "nginx", "-t"] + interval: 30s + timeout: 10s + retries: 3 + +volumes: + postgres_data_t: + redis_data_t: + signing_keys_t: + +networks: + frontend_net: + driver: bridge + backend_net: + driver: bridge + db_net: + driver: bridge \ No newline at end of file diff --git a/guacamole_test_11_26/generate_guacamole_user.py b/guacamole_test_11_26/generate_guacamole_user.py new file mode 100755 index 00000000..00dd9003 --- /dev/null +++ b/guacamole_test_11_26/generate_guacamole_user.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +SQL generator for creating Guacamole user with custom password + +Uses same hashing algorithm as Guacamole: +- SHA-256(password_bytes + salt_bytes) +- Random 32-byte salt + +Usage: + python generate_guacamole_user.py --username admin --password MySecurePass123 + python generate_guacamole_user.py --username admin --password MySecurePass123 --admin +""" + +import hashlib +import secrets +import argparse +import sys +import io + +# Fix Windows encoding issues +if sys.platform == 'win32': + sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') + sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8') + + +def generate_guacamole_password_hash(password: str) -> tuple[bytes, bytes]: + """ + Generate hash and salt for Guacamole password + + CORRECT ALGORITHM (verified 2025-10-29): + Guacamole uses: SHA-256(password_string + salt_hex_string) + IMPORTANT: Salt converted to HEX string BEFORE hashing! + + Args: + password: Password in plain text + + Returns: + Tuple (password_hash, password_salt) as bytes for PostgreSQL + """ + # Generate random 32-byte salt + salt_bytes = secrets.token_bytes(32) + + # CRITICAL: Convert salt to HEX STRING (uppercase) + # Guacamole hashes: password + hex(salt), NOT password + binary(salt)! + salt_hex_string = salt_bytes.hex().upper() + + # Compute SHA-256(password_string + salt_hex_string) + # Concatenate password STRING + salt HEX STRING, then encode to UTF-8 + hash_input = password + salt_hex_string + password_hash = hashlib.sha256(hash_input.encode('utf-8')).digest() + + return password_hash, salt_bytes + + +def bytes_to_postgres_hex(data: bytes) -> str: + """ + Convert bytes to PostgreSQL hex format for decode() + + Args: + data: Bytes to convert + + Returns: + String in 'HEXSTRING' format for use in decode('...', 'hex') + """ + return data.hex().upper() + + +def generate_sql(username: str, password: str, is_admin: bool = False) -> str: + """ + Generate SQL for creating Guacamole user + + Args: + username: Username + password: Password + is_admin: If True, grant full administrator privileges + + Returns: + SQL script to execute + """ + password_hash, password_salt = generate_guacamole_password_hash(password) + + hash_hex = bytes_to_postgres_hex(password_hash) + salt_hex = bytes_to_postgres_hex(password_salt) + + sql = f"""-- Generated Guacamole user creation SQL +-- Username: {username} +-- Password: {'*' * len(password)} (length: {len(password)}) +-- Generated with: generate_guacamole_user.py + +-- Create user entity +INSERT INTO guacamole_entity (name, type) +VALUES ('{username}', 'USER'); + +-- Create user with password hash +INSERT INTO guacamole_user (entity_id, password_hash, password_salt, password_date) +SELECT + entity_id, + decode('{hash_hex}', 'hex'), + decode('{salt_hex}', 'hex'), + CURRENT_TIMESTAMP +FROM guacamole_entity +WHERE name = '{username}' AND guacamole_entity.type = 'USER'; +""" + + if is_admin: + sql += f""" +-- Grant all system permissions (administrator) +INSERT INTO guacamole_system_permission (entity_id, permission) +SELECT entity_id, permission::guacamole_system_permission_type +FROM ( + VALUES + ('{username}', 'CREATE_CONNECTION'), + ('{username}', 'CREATE_CONNECTION_GROUP'), + ('{username}', 'CREATE_SHARING_PROFILE'), + ('{username}', 'CREATE_USER'), + ('{username}', 'CREATE_USER_GROUP'), + ('{username}', 'ADMINISTER') +) permissions (username, permission) +JOIN guacamole_entity ON permissions.username = guacamole_entity.name AND guacamole_entity.type = 'USER'; + +-- Grant permission to read/update/administer self +INSERT INTO guacamole_user_permission (entity_id, affected_user_id, permission) +SELECT guacamole_entity.entity_id, guacamole_user.user_id, permission::guacamole_object_permission_type +FROM ( + VALUES + ('{username}', '{username}', 'READ'), + ('{username}', '{username}', 'UPDATE'), + ('{username}', '{username}', 'ADMINISTER') +) permissions (username, affected_username, permission) +JOIN guacamole_entity ON permissions.username = guacamole_entity.name AND guacamole_entity.type = 'USER' +JOIN guacamole_entity affected ON permissions.affected_username = affected.name AND guacamole_entity.type = 'USER' +JOIN guacamole_user ON guacamole_user.entity_id = affected.entity_id; +""" + + return sql + + +def main(): + parser = argparse.ArgumentParser( + description='Generate SQL for creating Guacamole user with custom password', + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + # Create regular user + python generate_guacamole_user.py --username john --password SecurePass123 + + # Create administrator user + python generate_guacamole_user.py --username admin --password AdminPass456 --admin + + # Save to file + python generate_guacamole_user.py --username admin --password AdminPass456 --admin > 002-custom-admin.sql + + # Apply directly to running database + python generate_guacamole_user.py --username admin --password AdminPass456 --admin | \\ + docker compose exec -T postgres psql -U guacamole_user -d guacamole_db + +SECURITY NOTES: + - Never commit generated SQL files with passwords to git! + - Use strong passwords (minimum 16 characters, mixed case, numbers, symbols) + - Change default passwords immediately after deployment + - Store passwords securely (password manager, secrets vault) +""" + ) + + parser.add_argument( + '--username', + required=True, + help='Username for the new Guacamole user' + ) + + parser.add_argument( + '--password', + required=True, + help='Password for the new user (plain text)' + ) + + parser.add_argument( + '--admin', + action='store_true', + help='Grant administrator privileges (ADMINISTER system permission)' + ) + + parser.add_argument( + '--verify', + action='store_true', + help='Verify password by generating hash twice' + ) + + args = parser.parse_args() + + # Validate password strength + if len(args.password) < 8: + print("[WARNING] Password is too short (< 8 characters)", file=sys.stderr) + print(" Recommended: minimum 16 characters with mixed case, numbers, symbols", file=sys.stderr) + response = input("Continue anyway? (y/N): ") + if response.lower() != 'y': + sys.exit(1) + + # Verify if requested + if args.verify: + print("[VERIFY] Verifying hash generation...", file=sys.stderr) + hash1, salt1 = generate_guacamole_password_hash(args.password) + hash2, salt2 = generate_guacamole_password_hash(args.password) + + # Salts should be different (random) + if salt1 == salt2: + print("[ERROR] Salt generation not random!", file=sys.stderr) + sys.exit(1) + + # But if we use same salt, hash should be same + # Use correct algorithm: SHA256(password_string + salt_hex_string) + salt_hex_string = salt1.hex().upper() + hash_test = hashlib.sha256((args.password + salt_hex_string).encode('utf-8')).digest() + if hash_test == hash1: + print("[OK] Hash generation verified", file=sys.stderr) + else: + print("[ERROR] Hash generation mismatch!", file=sys.stderr) + sys.exit(1) + + # Generate SQL + sql = generate_sql(args.username, args.password, args.admin) + + # Output + print(sql) + + # Print info to stderr (so it doesn't interfere with piping SQL) + role = "Administrator" if args.admin else "Regular User" + print(f"\n[OK] SQL generated successfully!", file=sys.stderr) + print(f" Username: {args.username}", file=sys.stderr) + print(f" Role: {role}", file=sys.stderr) + print(f" Password length: {len(args.password)} characters", file=sys.stderr) + print(f"\n[INFO] To apply this SQL:", file=sys.stderr) + print(f" docker compose exec -T postgres psql -U guacamole_user -d guacamole_db < output.sql", file=sys.stderr) + + +if __name__ == '__main__': + main() + diff --git a/guacamole_test_11_26/nginx/mc.exbytestudios.com.conf b/guacamole_test_11_26/nginx/mc.exbytestudios.com.conf new file mode 100755 index 00000000..724144ef --- /dev/null +++ b/guacamole_test_11_26/nginx/mc.exbytestudios.com.conf @@ -0,0 +1,185 @@ +# Docker Nginx конфигурация для mc.exbytestudios.com +# Внутренний nginx в Docker контейнере +# Принимает HTTP от внешнего nginx, проксирует в сервисы + +# WebSocket upgrade mapping +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +# Upstream definitions (using Docker service names) +upstream remote_access_api { + server remote_access_api:8000; + keepalive 32; +} + +upstream guacamole_web { + server guacamole:8080; + keepalive 32; +} + +# Main server block - слушает на порту 8443 для внешнего nginx +server { + listen 8443; + server_name _; # Принимаем любой Host от внешнего nginx + + # Logging (внутренние логи Docker) + access_log /var/log/nginx/docker.access.log; + error_log /var/log/nginx/docker.error.log; + + # General settings + client_max_body_size 10M; + client_body_timeout 60s; + client_header_timeout 60s; + keepalive_timeout 65s; + + # Root location - redirect to API docs + location = / { + return 302 /api/docs; + } + + # ========================================================================= + # API Endpoints - Все бизнес-эндпоинты с префиксом /api/ + # ========================================================================= + # FastAPI endpoints: /api/auth/*, /api/connections, /api/machines/*, + # /api/bulk/*, /api/security/* + # CORS headers добавляются в Gateway nginx для избежания дубликатов + location /api/ { + proxy_pass http://remote_access_api; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # Передача Content-Type и Content-Length для POST/PUT + proxy_set_header Content-Type $content_type; + proxy_set_header Content-Length $content_length; + + # Timeouts + proxy_connect_timeout 30s; + proxy_send_timeout 120s; + proxy_read_timeout 120s; + + # Buffering для POST body + proxy_buffering off; + proxy_request_buffering off; + client_max_body_size 10M; + + # Cache control + add_header Cache-Control "no-cache, no-store, must-revalidate" always; + add_header Pragma "no-cache" always; + add_header Expires "0" always; + } + + # ✅ WebSocket Notifications - специальная обработка для WebSocket + # КРИТИЧНО: Длинные таймауты и отключение буферизации для WebSocket + location /ws/ { + proxy_pass http://remote_access_api; + proxy_http_version 1.1; + + # ✅ WebSocket upgrade headers + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + + # Standard proxy headers + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # ✅ КРИТИЧНО: Длинные таймауты для WebSocket (до 2 часов) + proxy_connect_timeout 60s; + proxy_send_timeout 7200s; + proxy_read_timeout 7200s; + + # ✅ КРИТИЧНО: Отключаем буферизацию для WebSocket + proxy_buffering off; + proxy_request_buffering off; + + # Cache control + add_header Cache-Control "no-cache, no-store, must-revalidate" always; + } + + + # Guacamole Web Application + location /guacamole/ { + proxy_pass http://guacamole_web/guacamole/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # WebSocket support for Guacamole + proxy_read_timeout 7200s; + proxy_send_timeout 7200s; + + # Buffer settings for WebSocket + proxy_buffering off; + proxy_request_buffering off; + + # Allow iframe embedding for Guacamole client (desktop/electron apps) + proxy_hide_header X-Frame-Options; + proxy_hide_header Content-Security-Policy; + + # Cache control + add_header Cache-Control "no-cache, no-store, must-revalidate" always; + } + + # Guacamole WebSocket tunnel + location /guacamole/websocket-tunnel { + proxy_pass http://guacamole_web/guacamole/websocket-tunnel; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # WebSocket specific settings + proxy_read_timeout 7200s; + proxy_send_timeout 7200s; + proxy_buffering off; + proxy_request_buffering off; + + # Allow iframe embedding and WebSocket in iframe + proxy_hide_header X-Frame-Options; + proxy_hide_header Content-Security-Policy; + } + + # Guacamole static assets + location ~ ^/guacamole/(.*\.(js|css|json|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot))$ { + proxy_pass http://guacamole_web/guacamole/$1; + proxy_http_version 1.1; + proxy_set_header Host $host; + + # Cache static assets for 1 hour + add_header Cache-Control "public, max-age=3600"; + expires 1h; + } + + # Custom error pages + error_page 404 /404.html; + error_page 500 502 503 504 /50x.html; + + location = /404.html { + return 404 '{"error": "Not Found", "message": "The requested resource was not found"}'; + add_header Content-Type application/json always; + } + + location = /50x.html { + return 500 '{"error": "Internal Server Error", "message": "Please try again later"}'; + add_header Content-Type application/json always; + } +} diff --git a/guacamole_test_11_26/nginx/mc.exbytestudios_gate.com b/guacamole_test_11_26/nginx/mc.exbytestudios_gate.com new file mode 100755 index 00000000..8da68ff5 --- /dev/null +++ b/guacamole_test_11_26/nginx/mc.exbytestudios_gate.com @@ -0,0 +1,329 @@ +# ============================================================================= +# Gateway Nginx Configuration for mc.exbytestudios.com +# +# Архитектура: +# Internet -> Gateway Nginx (этот сервер) -> Docker Server (192.168.200.10:8443) +# +# Gateway: SSL termination, DDoS protection, Rate limiting, Security headers +# Docker: Internal nginx -> FastAPI + Guacamole +# ============================================================================= + +# ============================================================================= +# CORS Allowed Origins +# 🔒 ВАЖНО: Добавьте сюда только доверенные домены! +# ============================================================================= +map $http_origin $cors_origin { + default ""; + "~^https://mc\.exbytestudios\.com$" $http_origin; + "~^https://test\.exbytestudios\.com$" $http_origin; + "~^http://localhost:5173$" $http_origin; + "~^http://127\.0\.0\.1:5173$" $http_origin; +} + +# Rate limiting для защиты от DDoS +limit_req_zone $binary_remote_addr zone=api:10m rate=30r/s; +limit_req_zone $binary_remote_addr zone=guacamole:10m rate=50r/s; + +# Upstream для Docker сервера (внутренняя сеть) +upstream docker_server { + server 192.168.200.10:8443; + keepalive 32; + keepalive_requests 100; + keepalive_timeout 60s; +} + +# ============================================================================= +# HTTP Server - Redirect to HTTPS +# ============================================================================= +server { + listen 80; + listen [::]:80; + server_name mc.exbytestudios.com; + + # Let's Encrypt ACME challenge + location /.well-known/acme-challenge/ { + root /var/www/html; + } + + # Redirect all other traffic to HTTPS + location / { + return 301 https://$server_name$request_uri; + } +} + +# ============================================================================= +# HTTPS Server - Main Gateway +# ============================================================================= +server { + listen 443 ssl http2; + listen [::]:443 ssl http2; + server_name mc.exbytestudios.com; + + # SSL Configuration + # ⚠️ Эти пути будут автоматически настроены certbot + # Если certbot не используется, замените на свои сертификаты + ssl_certificate /etc/letsencrypt/live/mc.exbytestudios.com/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/mc.exbytestudios.com/privkey.pem; + + # Modern SSL configuration (Mozilla Intermediate) + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384; + ssl_prefer_server_ciphers off; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 1d; + ssl_session_tickets off; + + # OCSP Stapling + ssl_stapling on; + ssl_stapling_verify on; + ssl_trusted_certificate /etc/letsencrypt/live/mc.exbytestudios.com/chain.pem; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + # Security Headers + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-XSS-Protection "1; mode=block" always; + add_header Referrer-Policy "strict-origin-when-cross-origin" always; + add_header Permissions-Policy "geolocation=(), microphone=(), camera=()" always; + + # Logging + access_log /var/log/nginx/mc.exbytestudios.com.access.log; + error_log /var/log/nginx/mc.exbytestudios.com.error.log warn; + + # General settings + client_max_body_size 100M; + client_body_timeout 120s; + client_header_timeout 120s; + keepalive_timeout 65s; + + # ========================================================================= + # Root - Redirect to API docs + # ========================================================================= + location = / { + return 302 /api/docs; + } + + # ========================================================================= + # API Endpoints - Rate limiting + # ========================================================================= + location /api/ { + limit_req zone=api burst=20 nodelay; + + # ✅ CORS Headers для /api/machines/saved и других /api/* endpoints + # Используем $cors_origin из map для проверки разрешенных доменов + add_header 'Access-Control-Allow-Origin' '$cors_origin' always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always; + add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, X-Requested-With' always; + add_header 'Access-Control-Allow-Credentials' 'true' always; + + # Handle preflight requests + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '$cors_origin' always; + add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS' always; + add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, X-Requested-With' always; + add_header 'Access-Control-Allow-Credentials' 'true' always; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + + proxy_pass http://docker_server; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # ✅ КРИТИЧНО: Скрываем CORS заголовки от backend чтобы избежать дубликатов + proxy_hide_header Access-Control-Allow-Origin; + proxy_hide_header Access-Control-Allow-Methods; + proxy_hide_header Access-Control-Allow-Headers; + proxy_hide_header Access-Control-Allow-Credentials; + + # Timeouts + proxy_connect_timeout 30s; + proxy_send_timeout 120s; + proxy_read_timeout 120s; + + # Buffering + proxy_buffering on; + proxy_buffer_size 8k; + proxy_buffers 8 8k; + proxy_busy_buffers_size 16k; + + # Cache control + add_header Cache-Control "no-cache, no-store, must-revalidate" always; + add_header Pragma "no-cache" always; + add_header Expires "0" always; + } + + # ========================================================================= + # WebSocket Notifications - специальная обработка для WebSocket + # КРИТИЧНО: Длинные таймауты и отключение буферизации + # ========================================================================= + location /ws/ { + # Легкий rate limiting для WebSocket (меньше чем для API) + limit_req zone=api burst=5 nodelay; + + # ✅ CORS Headers для WebSocket подключений + add_header 'Access-Control-Allow-Origin' '$cors_origin' always; + add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always; + add_header 'Access-Control-Allow-Headers' 'Authorization, Sec-WebSocket-Protocol, Sec-WebSocket-Extensions, Sec-WebSocket-Key, Sec-WebSocket-Version' always; + add_header 'Access-Control-Allow-Credentials' 'true' always; + + # Handle preflight requests (хотя для WebSocket обычно не нужны) + if ($request_method = 'OPTIONS') { + add_header 'Access-Control-Allow-Origin' '$cors_origin' always; + add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always; + add_header 'Access-Control-Allow-Headers' 'Authorization, Sec-WebSocket-Protocol, Sec-WebSocket-Extensions, Sec-WebSocket-Key, Sec-WebSocket-Version' always; + add_header 'Access-Control-Allow-Credentials' 'true' always; + add_header 'Access-Control-Max-Age' 1728000; + add_header 'Content-Type' 'text/plain charset=UTF-8'; + add_header 'Content-Length' 0; + return 204; + } + + proxy_pass http://docker_server; + proxy_http_version 1.1; + + # ✅ КРИТИЧНО: WebSocket upgrade headers + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + + # Standard proxy headers + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header X-Forwarded-Port $server_port; + + # ✅ КРИТИЧНО: Скрываем CORS заголовки от backend + proxy_hide_header Access-Control-Allow-Origin; + proxy_hide_header Access-Control-Allow-Methods; + proxy_hide_header Access-Control-Allow-Headers; + proxy_hide_header Access-Control-Allow-Credentials; + + # ✅ КРИТИЧНО: Длинные таймауты для WebSocket (до 2 часов) + proxy_connect_timeout 60s; + proxy_send_timeout 7200s; + proxy_read_timeout 7200s; + + # ✅ КРИТИЧНО: Отключаем буферизацию для WebSocket + proxy_buffering off; + proxy_request_buffering off; + + # Cache control + add_header Cache-Control "no-cache, no-store, must-revalidate" always; + } + + # ========================================================================= + # Guacamole Web Application - Rate limiting + # ========================================================================= + location /guacamole/ { + limit_req zone=guacamole burst=10 nodelay; + + proxy_pass http://docker_server/guacamole/; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # WebSocket support - long timeouts for remote sessions + proxy_read_timeout 7200s; + proxy_send_timeout 7200s; + + # Disable buffering for WebSocket + proxy_buffering off; + proxy_request_buffering off; + + # Allow iframe embedding for Guacamole client (desktop/electron apps) + proxy_hide_header X-Frame-Options; + proxy_hide_header Content-Security-Policy; + + # Cache control + add_header Cache-Control "no-cache, no-store, must-revalidate" always; + } + + # Guacamole WebSocket Tunnel + location /guacamole/websocket-tunnel { + limit_req zone=guacamole burst=5 nodelay; + + proxy_pass http://docker_server/guacamole/websocket-tunnel; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection $connection_upgrade; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + # WebSocket specific settings + proxy_read_timeout 7200s; + proxy_send_timeout 7200s; + proxy_buffering off; + proxy_request_buffering off; + + # Allow iframe embedding and WebSocket in iframe + proxy_hide_header X-Frame-Options; + proxy_hide_header Content-Security-Policy; + } + + # Guacamole Static Assets - Caching + location ~ ^/guacamole/(.*\.(js|css|json|png|jpg|jpeg|gif|ico|svg|woff|woff2|ttf|eot))$ { + proxy_pass http://docker_server/guacamole/$1; + proxy_http_version 1.1; + proxy_set_header Host $host; + + # Cache static assets for 1 hour + add_header Cache-Control "public, max-age=3600"; + expires 1h; + } + + # ========================================================================= + # Security - Block sensitive paths + # ========================================================================= + location ~ ^/(\.env|\.git|docker-compose|Dockerfile|\.htaccess|\.htpasswd) { + deny all; + return 404; + } + + location ~ /\. { + deny all; + return 404; + } + + # ========================================================================= + # Error Pages + # ========================================================================= + error_page 404 /404.html; + error_page 500 502 503 504 /50x.html; + + location = /404.html { + return 404 '{"error": "Not Found", "message": "The requested resource was not found"}'; + add_header Content-Type application/json always; + } + + location = /50x.html { + return 500 '{"error": "Internal Server Error", "message": "Service temporarily unavailable"}'; + add_header Content-Type application/json always; + } +} + +# ============================================================================= +# WebSocket Upgrade Mapping +# ============================================================================= +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +}