/* * SPDX-License-Identifier: AGPL-3.0-or-later * Copyright (C) 2025 Sergej Görzen * This file is part of OmiLAXR. */ namespace OmiLAXR.Composers { /// /// Categorizes data statements by their functional and semantic role in XR-based Learning Analytics. /// These groups support filtering, interpretation, and visualization of learning-related interactions and system events. /// public enum ComposerGroup { /// /// System-level events, such as logging, session control, or framework operations. /// System, /// /// Visual attention indicators, especially derived from eye tracking or head direction (e.g. gaze on object). /// Attention, /// /// Gestures and body movements (e.g. hand tracking, symbolic gestures, interaction via controllers). /// Gesture, /// /// Affective states such as frustration, joy, confusion—typically inferred through analysis or self-report. /// Emotion, /// /// Environmental conditions and system-relevant spatial/visual context (e.g. lighting, spatial anchors, object states). /// Environment, /// /// Physiological and psychometric measurements (e.g. heart rate, GSR, EEG), possibly from biosensors. /// Physiology, /// /// General user input, including button presses, trigger pulls, UI selection, and controller events. /// Input, /// /// Locomotion and spatial navigation data (e.g. walking, teleporting, rotation). /// Locomotion, /// /// Cognitive state indicators or inferred mental processes, such as load, memory use, or problem-solving behavior. /// Cognition, /// /// Collaboration and interaction between users or agents, such as communication or co-manipulation of objects. /// Collaboration, /// /// Task-related activities such as starting, completing, or retrying a learning task or challenge. /// Task, /// /// Feedback delivered to the user, e.g. hints, corrections, reinforcement, or scaffolding. /// Feedback, /// /// Movement- or object-related actions using virtual tools (e.g. grabbing, rotating, activating devices). /// ToolUse, /// /// Speech-related interaction, including spoken commands, verbal feedback, or analysis of speech content. /// Speech, /// /// Errors, failed actions, or invalid interactions captured during task execution or interaction. /// Error, /// /// Performance metrics or evaluation outcomes, such as scores, completion time, or success rates. /// Performance, /// /// Contextual metadata, such as scenario settings, level info, environment configuration, or session parameters. /// Context, /// /// Data from external assistance systems (e.g. coaching tools, researcher annotations, automated tutors). /// Assistance, /// /// Unclassified or miscellaneous events that do not fit into predefined categories. /// Other } }