2020-07-23 21:46:04 +02:00
// SPDX-License-Identifier: MPL-2.0
// Copyright © 2020 Skyline Team and Contributors (https://github.com/skyline-emu/)
2023-01-01 00:32:05 +01:00
# include <gpu.h>
2020-11-03 10:44:09 +01:00
# include <common/signal.h>
2023-01-01 00:32:05 +01:00
# include <common/settings.h>
2021-01-20 22:29:42 +01:00
# include <loader/loader.h>
2020-11-03 10:44:09 +01:00
# include <kernel/types/KProcess.h>
2021-03-24 21:09:21 +01:00
# include <soc.h>
2021-07-01 21:21:17 +02:00
# include <os.h>
2022-08-31 16:20:56 +02:00
# include "channel.h"
2023-03-05 22:50:00 +01:00
# include "macro/macro_state.h"
2020-07-23 21:46:04 +02:00
2021-03-24 21:09:21 +01:00
namespace skyline : : soc : : gm20b {
2021-07-01 21:21:17 +02:00
/**
* @ brief A single pushbuffer method header that describes a compressed method sequence
* @ url https : //github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/manuals/volta/gv100/dev_ram.ref.txt#L850
* @ url https : //github.com/NVIDIA/open-gpu-doc/blob/ab27fc22db5de0d02a4cabe08e555663b62db4d4/classes/host/clb06f.h#L179
*/
union PushBufferMethodHeader {
u32 raw ;
enum class TertOp : u8 {
Grp0IncMethod = 0 ,
Grp0SetSubDevMask = 1 ,
Grp0StoreSubDevMask = 2 ,
Grp0UseSubDevMask = 3 ,
Grp2NonIncMethod = 0 ,
} ;
enum class SecOp : u8 {
Grp0UseTert = 0 ,
IncMethod = 1 ,
Grp2UseTert = 2 ,
NonIncMethod = 3 ,
ImmdDataMethod = 4 ,
OneInc = 5 ,
Reserved6 = 6 ,
EndPbSegment = 7 ,
} ;
u16 methodAddress : 12 ;
struct {
u8 _pad0_ : 4 ;
u16 subDeviceMask : 12 ;
} ;
struct {
u16 _pad1_ : 13 ;
2022-01-19 21:45:51 +01:00
SubchannelId methodSubChannel : 3 ;
2021-07-01 21:21:17 +02:00
union {
TertOp tertOp : 3 ;
u16 methodCount : 13 ;
u16 immdData : 13 ;
} ;
} ;
struct {
u32 _pad2_ : 29 ;
SecOp secOp : 3 ;
} ;
2022-02-22 21:37:31 +01:00
/**
* @ brief Checks if a method is ' pure ' i . e . does not touch macro or GPFIFO methods
*/
bool Pure ( ) const {
2022-09-29 21:26:48 +02:00
u32 size { [ & ] ( ) - > u32 {
2022-02-22 21:37:31 +01:00
switch ( secOp ) {
case SecOp : : NonIncMethod :
case SecOp : : ImmdDataMethod :
return 0 ;
case SecOp : : OneInc :
return 1 ;
default :
return methodCount ;
}
} ( ) } ;
2022-09-29 21:26:48 +02:00
u32 end { static_cast < u32 > ( methodAddress + size ) } ;
2022-02-22 21:37:31 +01:00
return end < engine : : EngineMethodsEnd & & methodAddress > = engine : : GPFIFO : : RegisterCount ;
}
2021-07-01 21:21:17 +02:00
} ;
static_assert ( sizeof ( PushBufferMethodHeader ) = = sizeof ( u32 ) ) ;
2021-10-08 21:25:21 +02:00
ChannelGpfifo : : ChannelGpfifo ( const DeviceState & state , ChannelContext & channelCtx , size_t numEntries ) :
state ( state ) ,
2022-01-19 21:23:10 +01:00
gpfifoEngine ( state . soc - > host1x . syncpoints , channelCtx ) ,
2021-10-08 21:25:21 +02:00
channelCtx ( channelCtx ) ,
gpEntries ( numEntries ) ,
thread ( std : : thread ( & ChannelGpfifo : : Run , this ) ) { }
2023-03-05 22:50:00 +01:00
void ChannelGpfifo : : SendFull ( u32 method , GpfifoArgument argument , SubchannelId subChannel , bool lastCall ) {
2021-07-01 21:21:17 +02:00
if ( method < engine : : GPFIFO : : RegisterCount ) {
2023-03-05 22:50:00 +01:00
gpfifoEngine . CallMethod ( method , * argument ) ;
2022-01-19 21:45:51 +01:00
} else if ( method < engine : : EngineMethodsEnd ) { [[likely]]
2023-03-05 22:50:00 +01:00
SendPure ( method , * argument , subChannel ) ;
2022-01-19 21:45:51 +01:00
} else {
switch ( subChannel ) {
case SubchannelId : : ThreeD :
2023-03-05 22:50:00 +01:00
skipDirtyFlushes = channelCtx . maxwell3D . HandleMacroCall ( method - engine : : EngineMethodsEnd , argument , lastCall ,
[ & executor = channelCtx . executor ] {
executor . Submit ( { } , true ) ;
} ) ;
2020-08-09 15:17:45 +02:00
break ;
2022-01-19 21:45:51 +01:00
case SubchannelId : : TwoD :
2023-03-05 22:50:00 +01:00
skipDirtyFlushes = channelCtx . fermi2D . HandleMacroCall ( method - engine : : EngineMethodsEnd , argument , lastCall ,
[ & executor = channelCtx . executor ] {
executor . Submit ( { } , true ) ;
} ) ;
2020-08-09 15:17:45 +02:00
break ;
default :
2023-03-05 22:50:00 +01:00
Logger : : Warn ( " Called method 0x{:X} out of bounds for engine 0x{:X}, args: 0x{:X} " , method , subChannel , * argument ) ;
2022-01-19 21:45:51 +01:00
break ;
2020-08-09 15:17:45 +02:00
}
}
2020-07-23 21:46:04 +02:00
}
2022-02-22 21:37:31 +01:00
void ChannelGpfifo : : SendPure ( u32 method , u32 argument , SubchannelId subChannel ) {
2022-08-31 16:20:56 +02:00
if ( subChannel = = SubchannelId : : ThreeD ) [[likely]] {
channelCtx . maxwell3D . CallMethod ( method , argument ) ;
return ;
}
2022-02-22 21:37:31 +01:00
switch ( subChannel ) {
case SubchannelId : : ThreeD :
2022-08-31 16:20:56 +02:00
channelCtx . maxwell3D . CallMethod ( method , argument ) ;
2022-02-22 21:37:31 +01:00
break ;
2022-04-09 14:14:06 +02:00
case SubchannelId : : Compute :
channelCtx . keplerCompute . CallMethod ( method , argument ) ;
break ;
2022-03-20 19:05:54 +01:00
case SubchannelId : : Inline2Mem :
channelCtx . inline2Memory . CallMethod ( method , argument ) ;
break ;
2022-04-09 14:14:06 +02:00
case SubchannelId : : Copy :
channelCtx . maxwellDma . CallMethod ( method , argument ) ;
2022-02-07 14:15:55 +01:00
case SubchannelId : : TwoD :
2022-07-31 16:05:51 +02:00
channelCtx . fermi2D . CallMethod ( method , argument ) ;
2022-03-20 19:08:11 +01:00
break ;
2022-02-22 21:37:31 +01:00
default :
Logger : : Warn ( " Called method 0x{:X} in unimplemented engine 0x{:X}, args: 0x{:X} " , method , subChannel , argument ) ;
break ;
}
}
2022-03-04 20:41:22 +01:00
void ChannelGpfifo : : SendPureBatchNonInc ( u32 method , span < u32 > arguments , SubchannelId subChannel ) {
switch ( subChannel ) {
case SubchannelId : : ThreeD :
2022-08-31 16:20:56 +02:00
channelCtx . maxwell3D . CallMethodBatchNonInc ( method , arguments ) ;
2022-03-04 20:41:22 +01:00
break ;
2022-04-09 14:14:06 +02:00
case SubchannelId : : Compute :
channelCtx . keplerCompute . CallMethodBatchNonInc ( method , arguments ) ;
break ;
2022-03-20 19:05:54 +01:00
case SubchannelId : : Inline2Mem :
channelCtx . inline2Memory . CallMethodBatchNonInc ( method , arguments ) ;
break ;
2022-04-09 14:14:06 +02:00
case SubchannelId : : Copy :
channelCtx . maxwellDma . CallMethodBatchNonInc ( method , arguments ) ;
2022-03-20 19:08:11 +01:00
break ;
2022-03-04 20:41:22 +01:00
default :
Logger : : Warn ( " Called method 0x{:X} in unimplemented engine 0x{:X} with batch args " , method , subChannel ) ;
break ;
}
}
2021-10-08 21:25:21 +02:00
void ChannelGpfifo : : Process ( GpEntry gpEntry ) {
2021-01-21 21:36:02 +01:00
if ( ! gpEntry . size ) {
// This is a GPFIFO control entry, all control entries have a zero length and contain no pushbuffers
switch ( gpEntry . opcode ) {
case GpEntry : : Opcode : : Nop :
return ;
default :
2021-11-10 23:21:43 +01:00
Logger : : Warn ( " Unsupported GpEntry control opcode used: {} " , static_cast < u8 > ( gpEntry . opcode ) ) ;
2021-01-21 21:36:02 +01:00
return ;
}
}
2022-08-31 14:00:15 +02:00
auto pushBufferMappedRanges { channelCtx . asCtx - > gmmu . TranslateRange ( gpEntry . Address ( ) , gpEntry . size * sizeof ( u32 ) ) } ;
2023-03-04 21:16:37 +01:00
2023-02-04 23:38:50 +01:00
bool pushBufferCopied { } ; //!< Set by the below lambda in order to track if the pushbuffer is a copy of guest memory or not
2022-08-31 14:00:15 +02:00
auto pushBuffer { [ & ] ( ) - > span < u32 > {
if ( pushBufferMappedRanges . size ( ) = = 1 ) {
return pushBufferMappedRanges . front ( ) . cast < u32 > ( ) ;
} else {
// Create an intermediate copy of pushbuffer data if it's split across multiple mappings
pushBufferData . resize ( gpEntry . size ) ;
channelCtx . asCtx - > gmmu . Read < u32 > ( pushBufferData , gpEntry . Address ( ) ) ;
2023-02-04 23:38:50 +01:00
pushBufferCopied = true ;
2022-08-31 14:00:15 +02:00
return span ( pushBufferData ) ;
}
} ( ) } ;
2021-01-21 21:36:02 +01:00
2023-03-05 22:50:00 +01:00
bool pushbufferDirty { false } ;
for ( auto range : pushBufferMappedRanges ) {
if ( channelCtx . executor . usageTracker . dirtyIntervals . Intersect ( range ) ) {
if ( skipDirtyFlushes )
pushbufferDirty = true ;
else
channelCtx . executor . Submit ( { } , true ) ;
}
}
2021-10-13 22:46:30 +02:00
// There will be at least one entry here
2022-08-31 14:00:15 +02:00
auto entry { pushBuffer . begin ( ) } ;
2021-10-13 22:46:30 +02:00
2023-03-05 22:50:00 +01:00
auto getArgument { [ & ] ( ) {
return GpfifoArgument { pushBufferCopied ? * entry : 0 , pushBufferCopied ? nullptr : entry . base ( ) , pushbufferDirty } ;
} } ;
2021-10-13 22:46:30 +02:00
// Executes the current split method, returning once execution is finished or the current GpEntry has reached its end
auto resumeSplitMethod { [ & ] ( ) {
switch ( resumeState . state ) {
case MethodResumeState : : State : : Inc :
2023-02-04 23:38:50 +01:00
while ( entry ! = pushBuffer . end ( ) & & resumeState . remaining ) {
2023-03-05 22:50:00 +01:00
SendFull ( resumeState . address + + , getArgument ( ) , resumeState . subChannel , - - resumeState . remaining = = 0 ) ;
2023-02-04 23:38:50 +01:00
entry + + ;
}
2021-10-13 22:46:30 +02:00
break ;
case MethodResumeState : : State : : OneInc :
2023-03-05 22:50:00 +01:00
SendFull ( resumeState . address + + , getArgument ( ) , resumeState . subChannel , - - resumeState . remaining = = 0 ) ;
2023-02-04 23:38:50 +01:00
entry + + ;
2021-10-13 22:46:30 +02:00
// After the first increment OneInc methods work the same as a NonInc method, this is needed so they can resume correctly if they are broken up by multiple GpEntries
resumeState . state = MethodResumeState : : State : : NonInc ;
[[fallthrough]] ;
case MethodResumeState : : State : : NonInc :
2023-02-04 23:38:50 +01:00
while ( entry ! = pushBuffer . end ( ) & & resumeState . remaining ) {
2023-03-05 22:50:00 +01:00
SendFull ( resumeState . address , getArgument ( ) , resumeState . subChannel , - - resumeState . remaining = = 0 ) ;
2023-02-04 23:38:50 +01:00
entry + + ;
}
2021-10-13 22:46:30 +02:00
break ;
}
} } ;
// We've a method from a previous GpEntry that needs resuming
if ( resumeState . remaining )
resumeSplitMethod ( ) ;
// Process more methods if the entries are still not all used up after handling resuming
2022-08-31 14:00:15 +02:00
for ( ; entry ! = pushBuffer . end ( ) ; entry + + ) {
2022-09-29 21:26:48 +02:00
if ( entry > = pushBuffer . end ( ) ) [[unlikely]]
2022-03-04 20:41:22 +01:00
throw exception ( " GPFIFO buffer overflow! " ) ; // This should never happen
2022-09-29 21:26:48 +02:00
// Entries containing all zeroes is a NOP, skip over them
for ( ; * entry = = 0 ; entry + + )
if ( entry = = std : : prev ( pushBuffer . end ( ) ) )
return ;
2020-08-09 15:17:45 +02:00
2021-01-21 21:36:02 +01:00
PushBufferMethodHeader methodHeader { . raw = * entry } ;
2021-10-13 22:46:30 +02:00
// Needed in order to check for methods split across multiple GpEntries
2022-08-31 14:00:15 +02:00
ssize_t remainingEntries { std : : distance ( entry , pushBuffer . end ( ) ) - 1 } ;
2021-10-13 22:46:30 +02:00
// Handles storing state and initial execution for methods that are split across multiple GpEntries
auto startSplitMethod { [ & ] ( auto methodState ) {
resumeState = {
. remaining = methodHeader . methodCount ,
. address = methodHeader . methodAddress ,
. subChannel = methodHeader . methodSubChannel ,
. state = methodState
} ;
// Skip over method header as `resumeSplitMethod` doesn't expect it to be there
entry + + ;
resumeSplitMethod ( ) ;
} } ;
2022-02-22 21:37:31 +01:00
/**
* @ brief Handles execution of a specific method type as specified by the State template parameter
*/
2022-08-31 16:20:56 +02:00
auto dispatchCalls { [ & ] < MethodResumeState : : State State > ( ) {
2022-02-22 21:37:31 +01:00
/**
* @ brief Gets the offset to apply to the method address for a given dispatch loop index
*/
auto methodOffset { [ ] ( u32 i ) - > u32 {
2022-08-31 16:20:56 +02:00
if constexpr ( State = = MethodResumeState : : State : : Inc )
return i ;
else if constexpr ( State = = MethodResumeState : : State : : OneInc )
return i ? 1 : 0 ;
else
return 0 ;
2022-02-22 21:37:31 +01:00
} } ;
2022-03-04 20:41:22 +01:00
constexpr u32 BatchCutoff { 4 } ; //!< Cutoff needed to send method calls in a batch which is espcially important for UBO updates. This helps to avoid the extra overhead batching for small packets.
// TODO: Only batch for specific target methods like UBO updates, since normal dispatch is generally cheaper
2022-08-31 16:20:56 +02:00
if ( remainingEntries > = methodHeader . methodCount ) { [[likely]]
2022-02-22 21:37:31 +01:00
if ( methodHeader . Pure ( ) ) [[likely]] {
2022-03-04 20:41:22 +01:00
if constexpr ( State = = MethodResumeState : : State : : NonInc ) {
// For pure noninc methods we can send all method calls as a span in one go
2022-08-31 16:20:56 +02:00
if ( methodHeader . methodCount > BatchCutoff ) [[unlikely]] {
SendPureBatchNonInc ( methodHeader . methodAddress , span ( & ( * + + entry ) , methodHeader . methodCount ) , methodHeader . methodSubChannel ) ;
2022-03-04 20:41:22 +01:00
entry + = methodHeader . methodCount - 1 ;
return false ;
}
} else if constexpr ( State = = MethodResumeState : : State : : OneInc ) {
// For pure oneinc methods we can send the initial method then send the rest as a span in one go
2022-08-31 16:20:56 +02:00
if ( methodHeader . methodCount > ( BatchCutoff + 1 ) ) [[unlikely]] {
SendPure ( methodHeader . methodAddress , * + + entry , methodHeader . methodSubChannel ) ;
2023-02-04 23:38:50 +01:00
SendPureBatchNonInc ( methodHeader . methodAddress + 1 , span ( ( + + entry ) . base ( ) , methodHeader . methodCount - 1 ) , methodHeader . methodSubChannel ) ;
2022-03-04 20:41:22 +01:00
entry + = methodHeader . methodCount - 2 ;
return false ;
}
}
2022-09-29 21:26:48 +02:00
# pragma unroll(2)
2022-08-31 16:20:56 +02:00
for ( u32 i { } ; i < methodHeader . methodCount ; i + + )
SendPure ( methodHeader . methodAddress + methodOffset ( i ) , * + + entry , methodHeader . methodSubChannel ) ;
2021-10-13 22:46:30 +02:00
} else {
2022-02-22 21:37:31 +01:00
// Slow path for methods that touch GPFIFO or macros
2023-02-04 23:38:50 +01:00
for ( u32 i { } ; i < methodHeader . methodCount ; i + + ) {
entry + + ;
2023-03-05 22:50:00 +01:00
SendFull ( methodHeader . methodAddress + methodOffset ( i ) , getArgument ( ) , methodHeader . methodSubChannel , i = = methodHeader . methodCount - 1 ) ;
2023-02-04 23:38:50 +01:00
}
2021-10-13 22:46:30 +02:00
}
2022-02-22 21:37:31 +01:00
} else {
startSplitMethod ( State ) ;
return true ;
}
2021-03-24 21:09:21 +01:00
2022-02-22 21:37:31 +01:00
return false ;
} } ;
2021-03-24 21:09:21 +01:00
2022-02-22 21:37:31 +01:00
/**
* @ brief Handles execution of a single method
* @ return If the this was the final method in the current GpEntry
*/
2022-08-31 16:20:56 +02:00
auto processMethod { [ & ] ( ) - > bool {
2022-09-29 21:26:48 +02:00
if ( methodHeader . secOp = = PushBufferMethodHeader : : SecOp : : IncMethod ) [[likely]] {
return dispatchCalls . operator ( ) < MethodResumeState : : State : : Inc > ( ) ;
} else if ( methodHeader . secOp = = PushBufferMethodHeader : : SecOp : : OneInc ) [[likely]] {
return dispatchCalls . operator ( ) < MethodResumeState : : State : : OneInc > ( ) ;
} else if ( methodHeader . secOp = = PushBufferMethodHeader : : SecOp : : ImmdDataMethod ) {
if ( methodHeader . Pure ( ) )
SendPure ( methodHeader . methodAddress , methodHeader . immdData , methodHeader . methodSubChannel ) ;
else
2023-03-05 22:50:00 +01:00
SendFull ( methodHeader . methodAddress , GpfifoArgument { methodHeader . immdData } , methodHeader . methodSubChannel , true ) ;
2022-09-29 21:26:48 +02:00
return false ;
} else if ( methodHeader . secOp = = PushBufferMethodHeader : : SecOp : : NonIncMethod ) [[unlikely]] {
return dispatchCalls . operator ( ) < MethodResumeState : : State : : NonInc > ( ) ;
} else if ( methodHeader . secOp = = PushBufferMethodHeader : : SecOp : : EndPbSegment ) [[unlikely]] {
2022-10-30 17:41:04 +01:00
return true ;
} else if ( methodHeader . secOp = = PushBufferMethodHeader : : SecOp : : Grp0UseTert ) {
if ( methodHeader . tertOp = = PushBufferMethodHeader : : TertOp : : Grp0SetSubDevMask )
return false ;
throw exception ( " Unsupported pushbuffer method TertOp: {} " , static_cast < u8 > ( methodHeader . tertOp ) ) ;
2022-09-29 21:26:48 +02:00
} else {
2022-10-30 17:41:04 +01:00
throw exception ( " Unsupported pushbuffer method SecOp: {} " , static_cast < u8 > ( methodHeader . secOp ) ) ;
2022-02-22 21:37:31 +01:00
}
} } ;
bool hitEnd { [ & ] ( ) {
2022-08-31 16:20:56 +02:00
if ( methodHeader . methodSubChannel ! = SubchannelId : : ThreeD ) [[unlikely]]
channelCtx . maxwell3D . FlushEngineState ( ) ; // Flush the 3D engine state when doing any calls to other engines
return processMethod ( ) ;
2022-02-22 21:37:31 +01:00
} ( ) } ;
if ( hitEnd )
2022-05-04 21:48:59 +02:00
break ;
2020-07-23 21:46:04 +02:00
}
}
2021-10-08 21:25:21 +02:00
void ChannelGpfifo : : Run ( ) {
2022-06-09 18:40:44 +02:00
if ( int result { pthread_setname_np ( pthread_self ( ) , " GPFIFO " ) } )
Logger : : Warn ( " Failed to set the thread name: {} " , strerror ( result ) ) ;
2020-11-03 10:44:09 +01:00
try {
2022-04-03 14:07:35 +02:00
signal : : SetSignalHandler ( { SIGINT , SIGILL , SIGTRAP , SIGBUS , SIGFPE } , signal : : ExceptionalSignalHandler ) ;
signal : : SetSignalHandler ( { SIGSEGV } , nce : : NCE : : HostSignalHandler ) ; // We may access NCE trapped memory
2021-10-13 22:46:30 +02:00
2022-10-09 13:51:27 +02:00
bool channelLocked { } ;
gpEntries . Process ( [ this , & channelLocked ] ( GpEntry gpEntry ) {
2021-11-10 23:21:43 +01:00
Logger : : Debug ( " Processing pushbuffer: 0x{:X}, Size: 0x{:X} " , gpEntry . Address ( ) , + gpEntry . size ) ;
2022-10-09 13:51:27 +02:00
if ( ! channelLocked ) {
channelCtx . Lock ( ) ;
channelLocked = true ;
}
2021-01-21 21:36:02 +01:00
Process ( gpEntry ) ;
2022-10-09 13:51:27 +02:00
} , [ this , & channelLocked ] ( ) {
2022-07-16 18:25:31 +02:00
// If we run out of GpEntries to process ensure we submit any remaining GPU work before waiting for more to arrive
Logger : : Debug ( " Finished processing pushbuffer batch " ) ;
2022-11-25 17:23:04 +01:00
if ( channelLocked ) {
channelCtx . executor . Submit ( ) ;
channelCtx . Unlock ( ) ;
channelLocked = false ;
2022-11-28 19:37:58 +01:00
}
2020-11-03 10:44:09 +01:00
} ) ;
} catch ( const signal : : SignalException & e ) {
if ( e . signal ! = SIGINT ) {
2021-11-10 23:21:43 +01:00
Logger : : Error ( " {} \n Stack Trace:{} " , e . what ( ) , state . loader - > GetStackTrace ( e . frames ) ) ;
2022-04-10 09:49:50 +02:00
Logger : : EmulationContext . Flush ( ) ;
2020-11-03 10:44:09 +01:00
signal : : BlockSignal ( { SIGINT } ) ;
2020-11-17 01:48:41 +01:00
state . process - > Kill ( false ) ;
2020-11-03 10:44:09 +01:00
}
2022-04-11 18:07:47 +02:00
} catch ( const exception & e ) {
Logger : : ErrorNoPrefix ( " {} \n Stack Trace:{} " , e . what ( ) , state . loader - > GetStackTrace ( e . frames ) ) ;
Logger : : EmulationContext . Flush ( ) ;
signal : : BlockSignal ( { SIGINT } ) ;
state . process - > Kill ( false ) ;
2020-11-03 10:44:09 +01:00
} catch ( const std : : exception & e ) {
2021-11-10 23:21:43 +01:00
Logger : : Error ( e . what ( ) ) ;
2022-04-10 09:49:50 +02:00
Logger : : EmulationContext . Flush ( ) ;
2021-01-15 22:15:06 +01:00
signal : : BlockSignal ( { SIGINT } ) ;
2020-11-17 01:48:41 +01:00
state . process - > Kill ( false ) ;
2020-11-03 10:44:09 +01:00
}
2020-07-23 21:46:04 +02:00
}
2021-10-08 21:25:21 +02:00
void ChannelGpfifo : : Push ( span < GpEntry > entries ) {
gpEntries . Append ( entries ) ;
2020-07-23 21:46:04 +02:00
}
2020-11-03 10:44:09 +01:00
2021-10-11 21:08:42 +02:00
void ChannelGpfifo : : Push ( GpEntry entry ) {
gpEntries . Push ( entry ) ;
}
2021-10-08 21:25:21 +02:00
ChannelGpfifo : : ~ ChannelGpfifo ( ) {
2020-11-03 10:44:09 +01:00
if ( thread . joinable ( ) ) {
pthread_kill ( thread . native_handle ( ) , SIGINT ) ;
thread . join ( ) ;
}
}
2020-07-23 21:46:04 +02:00
}