Skip to content

Commit 368f303

Browse files
committed
add local stream option for participants
1 parent c27952d commit 368f303

File tree

10 files changed

+40
-5
lines changed

10 files changed

+40
-5
lines changed

backend/session/data/participant/participant_data.py

+5
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ class ParticipantData(BaseData):
3333
size : SizeData
3434
muted_video : bool
3535
muted_audio : bool
36+
local_stream : bool
3637
position : PositionData
3738
chat : list or custom_types.chat_message.ChatMessageDict
3839
filters : list or filters.FilterDict
@@ -80,6 +81,9 @@ class ParticipantData(BaseData):
8081
muted_audio: bool = field(repr=False)
8182
"""Whether the participants' audio is forcefully muted by the experimenter."""
8283

84+
local_stream: bool = field(repr=False)
85+
"""Whether the participants' view is using a local stream."""
86+
8387
position: PositionData = field(repr=False)
8488
"""Position of the participant on the canvas (frontend).
8589
@@ -143,6 +147,7 @@ def asdict(self) -> ParticipantDict:
143147
"size": self.size.asdict(),
144148
"muted_video": self.muted_video,
145149
"muted_audio": self.muted_audio,
150+
"local_stream": self.local_stream,
146151
"position": self.position.asdict(),
147152
"chat": self.chat,
148153
"audio_filters": self.audio_filters,

backend/session/data/participant/participant_data_factory.py

+1
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ def participant_data_factory(participant_dict: ParticipantDict) -> ParticipantDa
3030
sizeData,
3131
participant_dict["muted_video"],
3232
participant_dict["muted_audio"],
33+
participant_dict["local_stream"],
3334
positionData,
3435
participant_dict["chat"],
3536
participant_dict["audio_filters"],

backend/session/data/participant/participant_dict.py

+3
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,8 @@ class ParticipantDict(TypedDict):
2828
Whether the participants' video is forcefully muted by the experimenter.
2929
muted_audio : bool
3030
Whether the participants' audio is forcefully muted by the experimenter.
31+
local_stream : bool
32+
Whether the participants' view is using a local stream.
3133
audio_filters : list of filters.FilterDict
3234
Active audio filters for this participant.
3335
video_filters : list of filters.FilterDict
@@ -68,6 +70,7 @@ class ParticipantDict(TypedDict):
6870
participant_name: str
6971
muted_video: bool
7072
muted_audio: bool
73+
local_stream: bool
7174
audio_filters: list[FilterDict]
7275
video_filters: list[FilterDict]
7376
chat_filters: list[ChatFilterDict]

backend/session/data/participant/participant_dict_validator.py

+1
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ def is_valid_participant(data, recursive: bool = True) -> TypeGuard[ParticipantD
7575
and isinstance(data["lastMessageReadTime"], int)
7676
and isinstance(data["chat_filters"], list)
7777
and isinstance(data["muted_audio"], bool)
78+
and isinstance(data["local_stream"], bool)
7879
and isinstance(data["banned"], bool)
7980
and isinstance(data["canvas_id"], str)
8081
)

backend/users/participant.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def __init__(
7979
WebRTC `offer`. Use factory instead of instantiating Participant directly.
8080
"""
8181
super(Participant, self).__init__(
82-
participant_id, participant_data.muted_video, participant_data.muted_audio
82+
participant_id, participant_data.muted_video, participant_data.muted_audio, participant_data.local_stream
8383
)
8484
self._logger = logging.getLogger(f"Participant-{participant_id}")
8585
self._participant_data = participant_data

backend/users/user.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,7 @@ class User(AsyncIOEventEmitter, metaclass=ABCMeta):
8989
_logger: logging.Logger
9090
_muted_video: bool
9191
_muted_audio: bool
92+
_local_stream: bool
9293
_connection: ConnectionInterface | None
9394
_handlers: dict[str, list[Callable[[Any], Coroutine[Any, Any, MessageDict | None]]]]
9495
_ping_buffer: deque # buffer of n last ping times
@@ -99,7 +100,7 @@ class User(AsyncIOEventEmitter, metaclass=ABCMeta):
99100
__lock: asyncio.Lock
100101

101102
def __init__(
102-
self, user_id: str, muted_video: bool = False, muted_audio: bool = False
103+
self, user_id: str, muted_video: bool = False, muted_audio: bool = False, local_stream: bool = False
103104
) -> None:
104105
"""Instantiate new User base class.
105106
@@ -120,6 +121,7 @@ def __init__(
120121
self._experiment = None
121122
self._muted_video = muted_video
122123
self._muted_audio = muted_audio
124+
self._local_stream = local_stream
123125
self._handlers = {}
124126
self._ping_buffer = deque(maxlen=100)
125127
self._pinging = False
@@ -140,6 +142,11 @@ def muted_audio(self) -> bool:
140142
"""bool indicating if the users audio is muted."""
141143
return self._muted_audio
142144

145+
@property
146+
def local_stream(self) -> bool:
147+
"""bool indicating if the users video is using local stream."""
148+
return self._local_stream
149+
143150
@property
144151
def recorded(self) -> bool:
145152
"""bool indicating if the users video is recorded."""

frontend/src/modals/ParticipantDataModal/ParticipantDataModal.tsx

+8
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,14 @@ function ParticipantDataModal({
454454
handleChange("muted_video", !participantCopy.muted_video);
455455
}}
456456
/>
457+
<FormControlLabel
458+
control={<Checkbox />}
459+
label="Local Stream"
460+
checked={participantCopy.local_stream}
461+
onChange={() => {
462+
handleChange("local_stream", !participantCopy.local_stream);
463+
}}
464+
/>
457465
</Box>
458466
</Box>
459467
{/* Displays the list of filters available in the backend in a dropdown */}

frontend/src/pages/MeetingRoom/MeetingRoom.js

+11-3
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import {
1414
import { InstructionsTab } from "../../components/molecules/InstructionsTab/InstructionsTab";
1515
import "./MeetingRoom.css";
1616
import { ChatGptTab } from "../../components/molecules/ChatGptTab/ChatGptTab";
17+
import { getParticipantById } from "../../utils/utils";
1718

1819
function MeetingRoom({ localStream, connection, onGetSession, onChat }) {
1920
const videoElement = useRef(null);
@@ -65,8 +66,15 @@ function MeetingRoom({ localStream, connection, onGetSession, onChat }) {
6566
}, [connection]);
6667

6768
useEffect(() => {
68-
setParticipantStream(localStream);
69-
}, [localStream]);
69+
if (sessionData) {
70+
const participant = getParticipantById(participantIdParam, sessionData);
71+
if (participant.local_stream) {
72+
setParticipantStream(localStream);
73+
} else {
74+
setParticipantStream(connection.remoteStream);
75+
}
76+
}
77+
}, [localStream, sessionData]);
7078

7179
useEffect(() => {
7280
if (participantStream && videoElement.current) {
@@ -92,7 +100,7 @@ function MeetingRoom({ localStream, connection, onGetSession, onChat }) {
92100
<VideoCanvas
93101
connectedParticipants={connectedParticipants}
94102
sessionData={sessionData}
95-
localStream={localStream}
103+
localStream={participantStream}
96104
ownParticipantId={participantIdParam}
97105
/>
98106
) : (

frontend/src/types.ts

+1
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ export type Participant = {
4343
size: { width: number; height: number };
4444
muted_video: boolean;
4545
muted_audio: boolean;
46+
local_stream: boolean;
4647
position: { x: number; y: number; z: number };
4748
chat: ChatMessage[];
4849
audio_filters: Filter[];

frontend/src/utils/constants.ts

+1
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ export const INITIAL_PARTICIPANT_DATA: Participant = {
3131
participant_name: "",
3232
muted_audio: true,
3333
muted_video: true,
34+
local_stream: false,
3435
banned: false,
3536
video_filters: [],
3637
audio_filters: [],

0 commit comments

Comments
 (0)