rtc_plugins/util/RTCContext.cpp

368 lines
14 KiB
C++
Raw Normal View History

2025-04-09 16:18:50 +08:00
#include "RTCContext.h"
2025-04-15 17:52:22 +08:00
#define GIL
2025-04-09 16:18:50 +08:00
void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo) {
//std::cout << "RTCContext::onRoom():" << roomInfo.roomId << "," << roomInfo.displayName << "," << roomInfo.userId << "," << roomInfo.message;
std::cout << "RTCContext::onRoom()" << std::endl;
std::lock_guard<std::mutex> lock(mutex_);
isOnRoom_ = true;
}
2025-04-15 15:35:24 +08:00
void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId,
RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) {
std::cout << "RTCContext::onConsumer():" << consumerInfo.roomId << "," << consumerInfo.displayName << ","
<< consumerInfo.channelIndex << std::endl;
if (isRecv_) {
std::lock_guard<std::mutex> lock(mutex_);
std::cout << "registerSoundLevelListener" << std::endl;
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, consumerInfo.roomId,
peerId, consumerInfo.channelIndex, this);
if (0 != ret1) {
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
return;
}
2025-04-15 15:01:52 +08:00
2025-04-15 15:35:24 +08:00
std::cout << "muteAudio" << std::endl;
int16_t ret2 = rtcEngine_->muteAudio(consumerInfo.roomId, peerId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM,
false, consumerInfo.channelIndex);
if (0 != ret2) {
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
return;
}
2025-04-15 15:01:52 +08:00
2025-04-15 15:35:24 +08:00
std::cout << "init recv succ" << std::endl;
}
2025-04-09 16:18:50 +08:00
}
void RTCContext::onRender(const char* roomId, const char* peerId,
RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) {
std::cout << "RTCContext::onRender()" << std::endl;
}
void RTCContext::onCallBackMessage(uint32_t msgId, const char* msg) {
std::lock_guard<std::mutex> lock(mutex_);
if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) {
2025-04-15 17:19:02 +08:00
std::cout << "receive join multi room callback" << msgId << std::endl;
2025-04-09 16:18:50 +08:00
isJoinMultiRoom_ = true;
}
2025-04-15 17:19:02 +08:00
std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg << std::endl;
2025-04-09 16:18:50 +08:00
}
void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) {
//std::cout << "RTCContext::onCallBackCustomData(), obj:" << object.peerId << "," << object.data << "," << object.data_length;
std::cout << "RTCContext::onCallBackCustomData()" << std::endl;
}
void RTCContext::onSoundLevelUpdate(const char* roomId, const char* peerId, uint16_t audioSourceType,
uint8_t channelIndex, uint16_t volume, int32_t vad)
{
std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl;
}
2025-04-10 21:33:07 +08:00
2025-04-11 08:42:29 +08:00
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
2025-04-15 14:16:41 +08:00
std::cout << "=== 开始音频处理 ===" << std::endl;
2025-04-15 10:30:23 +08:00
std::cout << "audioFrame:" << audioFrame.dataCount << "," << audioFrame.sampleRate << "," <<
audioFrame.numChannels << "," << audioFrame.channelIndex << std::endl;
2025-04-11 08:42:29 +08:00
// 1. 获取GIL
std::cout << "[1] 获取GIL锁..." << std::endl;
2025-04-15 17:23:17 +08:00
#ifdef GIL
2025-04-15 17:20:33 +08:00
PyGILState_STATE gstate = PyGILState_Ensure();
2025-04-15 17:23:17 +08:00
#endif
2025-04-11 08:42:29 +08:00
2025-04-15 16:47:20 +08:00
namespace py = boost::python;
2025-04-11 08:42:29 +08:00
try {
// 2. 输入参数校验
std::cout << "[2] 检查输入参数..." << std::endl;
std::cout << " dataCount: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
if (!audioFrame.data || audioFrame.dataCount <= 0) {
std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
throw std::invalid_argument("Invalid audio frame data");
}
const size_t data_size = audioFrame.dataCount * sizeof(int16_t);
2025-04-15 13:04:06 +08:00
std::cout << "step1" << std::endl;
2025-04-15 14:16:41 +08:00
namespace py = boost::python;
namespace np = boost::python::numpy;
2025-04-15 11:46:34 +08:00
npy_intp shape[1] = { static_cast<npy_intp>(audioFrame.dataCount) };
2025-04-15 13:04:06 +08:00
std::cout << "step2" << std::endl;
2025-04-11 08:42:29 +08:00
// 7. 执行回调
if (!pyCallback_.is_none()) {
std::cout << "[7] 准备执行Python回调..." << std::endl;
2025-04-11 09:57:01 +08:00
// 增加引用计数防止提前释放
2025-04-15 16:47:20 +08:00
//Py_INCREF(pyCallback_.ptr());
2025-04-11 08:42:29 +08:00
try {
2025-04-15 15:28:47 +08:00
2025-04-15 17:20:33 +08:00
//PyGILState_STATE gstate = PyGILState_Ensure();
2025-04-15 17:30:56 +08:00
std::cout << "data:" << audioFrame.data << std::endl;
std::cout << "当前线程是否持有 GIL: " << PyGILState_Check() << std::endl;
2025-04-15 17:46:45 +08:00
np::dtype dtype = np::dtype::get_builtin<int16_t>();
std::cout << "init dtype" << std::endl;
2025-04-15 17:48:51 +08:00
if (!Py_IsInitialized()) {
std::cerr << "Python 解释器未初始化!" << std::endl;
return;
}
2025-04-15 17:40:52 +08:00
try {
py::object str_repr = py::str(dtype);
2025-04-15 17:42:47 +08:00
std::cout << "str_repr" << std::endl;
2025-04-15 17:40:52 +08:00
if(str_repr.ptr() != Py_None) {
2025-04-15 17:42:47 +08:00
std::cout << "str_repr is not null" << std::endl;
2025-04-15 17:40:52 +08:00
std::string dtype_str = py::extract<std::string>(str_repr);
std::cout << "数据类型: " << dtype_str << std::endl;
} else {
std::cout << "数据类型: None" << std::endl;
}
} catch (const py::error_already_set&) {
std::cout<< "数据类型转换错误" << std::endl;
PyErr_Clear();
}
2025-04-15 17:35:26 +08:00
std::cout << "数据形状: " << shape[0] << std::endl;
2025-04-15 16:47:20 +08:00
np::ndarray audioArray = np::from_data(
audioFrame.data, // 数据指针
dtype, // 数据类型 (int16)
py::make_tuple(shape[0]), // 形状 (1D)
py::make_tuple(sizeof(int16_t)), // 步长
py::object() // 所有者Python管理
);
std::cout << " 数据拷贝完成" << std::endl;
2025-04-15 11:46:34 +08:00
pyCallback_(
audioArray, // numpy 数组
data_size, // 数据大小
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
2025-04-11 08:42:29 +08:00
);
2025-04-11 08:50:11 +08:00
std::cout << " after callback" << std::endl;
if (PyErr_Occurred()) {
2025-04-11 09:57:01 +08:00
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
if (value) {
PyObject* str = PyObject_Str(value);
if (str) {
std::cerr << "Python Error: " << PyUnicode_AsUTF8(str) << std::endl;
Py_DECREF(str);
}
}
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
//PyErr_Print();
2025-04-11 08:50:11 +08:00
throw std::runtime_error("Python callback error");
}
2025-04-11 08:42:29 +08:00
std::cout << " 回调执行成功" << std::endl;
2025-04-11 09:13:06 +08:00
2025-04-11 09:15:57 +08:00
} catch (const py::error_already_set& e) {
std::cerr << "[PYTHON ERROR] ";
PyErr_Print(); // 自动打印到stderr
// 可选:获取更详细的错误信息
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
std::cerr << "Details: "
<< PyUnicode_AsUTF8(PyObject_Str(value)) << std::endl;
PyErr_Restore(type, value, traceback);
}
2025-04-15 16:47:20 +08:00
//Py_DECREF(pyCallback_.ptr());
2025-04-11 09:13:06 +08:00
} catch (...) {
std::cout << "[ERROR] 回调执行失败" << std::endl;
2025-04-15 15:28:47 +08:00
2025-04-15 16:47:20 +08:00
//Py_DECREF(pyCallback_.ptr());
2025-04-11 08:42:29 +08:00
throw;
}
2025-04-15 16:47:20 +08:00
//Py_DECREF(pyCallback_.ptr());
2025-04-11 08:42:29 +08:00
} else {
std::cout << "[7] 无回调函数设置" << std::endl;
}
// 8. 释放资源
std::cout << "[8] 释放共享内存资源..." << std::endl;
std::cout << "[9] 释放GIL..." << std::endl;
2025-04-15 17:23:17 +08:00
#ifdef GIL
2025-04-15 16:47:20 +08:00
PyGILState_Release(gstate);
2025-04-15 17:23:17 +08:00
#endif
2025-04-11 08:42:29 +08:00
std::cout << "=== 音频处理完成 ===" << std::endl;
} catch (const std::exception& e) {
std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
2025-04-15 17:23:17 +08:00
#ifdef GIL
2025-04-15 16:47:20 +08:00
PyGILState_Release(gstate);
2025-04-15 17:23:17 +08:00
#endif
2025-04-11 08:42:29 +08:00
std::cerr << "Audio process error: " << e.what() << std::endl;
2025-04-10 22:02:12 +08:00
}
2025-04-15 17:23:17 +08:00
#ifdef GIL
2025-04-15 17:19:02 +08:00
PyGILState_Release(gstate);
2025-04-15 17:23:17 +08:00
#endif
2025-04-10 11:15:04 +08:00
}
2025-04-10 22:06:08 +08:00
2025-04-09 16:18:50 +08:00
void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info)
{
std::cout << "-----------------------------------" << std::endl;
std::cout << "RTCContext::onProducer()" << std::endl;
}
bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId)
{
2025-04-10 21:19:55 +08:00
std::cout << "init, numpyApi_:" << numpyApi_[93] << std::endl;
2025-04-10 21:23:38 +08:00
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in init" << std::endl;
2025-04-10 16:38:26 +08:00
} else {
2025-04-10 21:23:38 +08:00
std::cout << "numpyApi_ is not null in init" << std::endl;
2025-04-10 16:38:26 +08:00
}
2025-04-09 16:18:50 +08:00
mrtc::IMRTCEngineFactory * rtcFactory = mrtc::getMRTCEngineFactory();
if (!rtcFactory)
{
return false;
}
rtcEngine_ = rtcFactory->produceMRTCEngine();
if (!rtcEngine_)
{
return false;
}
mrtc::MRTCEngineConfig engineConfig;
strcpy(engineConfig.domain, domain);
strcpy(engineConfig.applicationId, appid);
strcpy(engineConfig.appSecrectKey, appSecrectKey);
engineConfig.port = port;
if (0 != rtcEngine_->init(engineConfig, this))
{
std::cout << "RTCContext::instance().init() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->setUserInfo(selfUserId, selfDisplayName, selfRoomId))
{
std::cout << "RTCContext::instance().setUserInfo() failed" << std::endl;
return false;
}
mrtc::MRTCJoinAuthority authority;
strcpy(authority.applicationId, appid);
strcpy(authority.appSecretKey, appSecrectKey);
mrtc::MRTCJoinConfig loginConfig;
if (0!= rtcEngine_->joinRoom(authority, loginConfig))
{
std::cout << "RTCContext::instance().joinRoom() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_ROOM, this))
{
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_CONSUMER, this))
{
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false;
}
2025-04-15 14:03:35 +08:00
namespace py = boost::python;
namespace np = boost::python::numpy;
Py_Initialize(); // 初始化 Python
np::initialize();
2025-04-09 16:18:50 +08:00
return true;
}
bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex)
{
2025-04-15 15:35:24 +08:00
isRecv_ = true;
2025-04-09 16:18:50 +08:00
return true;
}
2025-04-15 16:47:20 +08:00
bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, uint8_t channelNum)
2025-04-09 16:18:50 +08:00
{
while (!isOnRoom_)
{
std::cout << "wait for OnRoom" << std::endl;
sleep(3);
}
2025-04-14 18:24:16 +08:00
if (std::string(srcRoomId) != std::string(destRoomId)) {
2025-04-14 21:11:08 +08:00
isMultiRoom_ = true;
2025-04-14 18:24:16 +08:00
std::cout << "join multi room" << std::endl;
int16_t ret1 = rtcEngine_->joinMultiRoom(destRoomId);
if (ret1 != 0)
{
std::cout << "joinMultiRoom fail, ret:" << ret1;
return false;
}
2025-04-14 21:11:08 +08:00
} else {
isMultiRoom_ = false;
2025-04-09 16:18:50 +08:00
}
mrtc::MRTCAudioOption option;
2025-04-15 16:47:20 +08:00
option.channel = channelNum;
2025-04-15 09:21:27 +08:00
if (std::string(srcRoomId) != std::string(destRoomId)) {
strcpy(option.dstRoomId, destRoomId);
}
2025-04-09 16:18:50 +08:00
option.channelIndex = destChannelIndex;
std::cout << "startCustomAudio" << std::endl;
int16_t ret2 = rtcEngine_->startCustomAudio(option);
if (ret2 != 0)
{
std::cout << "startCustomAudio fail, ret:" << ret2;
return false;
}
std::cout << "init send succ" << std::endl;
return true;
}
2025-04-15 17:19:02 +08:00
bool RTCContext::initGIL() {
isGIL_ = true;
}
2025-04-09 16:18:50 +08:00
void RTCContext::destorySend(const int16_t selfChannelIndex)
{
rtcEngine_->stopCustomAudio(selfChannelIndex);
}
2025-04-15 16:47:20 +08:00
int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels,
uint64_t dataLength)
2025-04-09 16:18:50 +08:00
{
std::lock_guard<std::mutex> lock(mutex_);
if (pData_)
{
return rtcEngine_->sendCustomAudioData(channelIndex, pData, nSampleRate, nNumberOfChannels, dataLength);
}
return 0;
}
int16_t RTCContext::sendCustomAudioData(const int16_t channelIndex, void* customData, int32_t sampleRate,
uint64_t channelNum, uint64_t dataLen)
{
2025-04-14 21:11:08 +08:00
while(!isOnRoom_ || (isMultiRoom_ && !isJoinMultiRoom_)) {
2025-04-09 16:18:50 +08:00
std::cout << "wait for room and multi room before send" << std::endl;
sleep(3);
}
std::lock_guard<std::mutex> lock(mutex_);
2025-04-10 09:07:06 +08:00
if (customData == nullptr) {
std::cout << "customData is null" << std::endl;
return -1;
}
std::cout << "customData addr is:" << customData << std::endl;
2025-04-15 16:47:20 +08:00
return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate,
channelNum, dataLen);
2025-04-09 16:18:50 +08:00
}
mrtc::IMRTCEngine* RTCContext::getRtcEngine() const
{
std::lock_guard<std::mutex> lock(mutex_);
return rtcEngine_;
}
void* RTCContext::getpData() const
{
std::lock_guard<std::mutex> lock(mutex_);
return pData_;
}
void RTCContext::setpData(void* pData)
{
std::lock_guard<std::mutex> lock(mutex_);
pData_ = pData;
}
void RTCContext::setPyCallback(boost::python::object callback) {
std::lock_guard<std::mutex> lock(mutex_);
2025-04-10 20:51:28 +08:00
pyCallback_ = callback;
}
void RTCContext::setNumpyApi(void **numpyApi) {
std::lock_guard<std::mutex> lock(mutex_);
numpyApi_ = numpyApi;
2025-04-10 21:19:55 +08:00
std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl;
2025-04-09 16:18:50 +08:00
}