diff --git a/test_time_r.py b/test_time_r.py index a1087d5..7a25fff 100644 --- a/test_time_r.py +++ b/test_time_r.py @@ -19,12 +19,6 @@ def my_callback(shmName, dataSize, dataCount, sampleRate, numChannels, channelIn print(f"dataSize:{dataSize}, dataCount:{dataCount}, sampleRate:{sampleRate}, numChannels:{numChannels}, channelIndex:{channelIndex}") print(f"data:{shmName}") - #audioData = np.array([0, 1, -1, 0], dtype=np.int16) - #ret = rtc_plugins.sendCustomAudioData(srcChannelIndex, audioData, 48000, 1, len(audioData)) - #if ret != 0: - # print(f"resend fail, ret:{ret}") - #else: - # print("resend succ") ret = rtc_plugins.init(destUserId, destDisplayName, destRoomId, my_callback) if ret != 0: print(f"init fail, ret:{ret}") @@ -33,7 +27,7 @@ ret = rtc_plugins.initRecv(destRoomId, srcUserId, destChannelIndex) if ret != 0: print(f"initRecv fail, ret:{ret}") exit(1) -ret = rtc_plugins.initSend(destRoomId, srcRoomId, srcChannelIndex) +ret = rtc_plugins.initSend(destRoomId, srcRoomId, srcChannelIndex, 1) if ret != 0: print(f"initSend fail, ret:{ret}") exit(1) diff --git a/test_time_s.py b/test_time_s.py index 44c54b9..0b6aa26 100644 --- a/test_time_s.py +++ b/test_time_s.py @@ -18,7 +18,7 @@ ret = rtc_plugins.init(srcUserId, srcDisplayName, srcRoomId, my_callback) if ret != 0: print(f"init fail, ret:{ret}") exit(1) -ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex) +ret = rtc_plugins.initSend(srcRoomId, destRoomId, destChannelIndex, 1) if ret != 0: print(f"initSend fail, ret:{ret}") exit(1) diff --git a/util/RTCContext.cpp b/util/RTCContext.cpp index c1bee8f..c79e680 100644 --- a/util/RTCContext.cpp +++ b/util/RTCContext.cpp @@ -60,7 +60,6 @@ void RTCContext::onAudioProcess(const char* roomId, const char* peerId, mrtc::MRTCAudioFrame& audioFrame, mrtc::MRTCAudioSourceType audioSourceType) { - namespace py = boost::python; std::cout << "=== 开始音频处理 ===" << std::endl; std::cout << "audioFrame:" << audioFrame.dataCount << "," << audioFrame.sampleRate << "," << audioFrame.numChannels << "," << audioFrame.channelIndex << std::endl; @@ -71,8 +70,9 @@ void RTCContext::onAudioProcess(const char* roomId, const char* peerId, std::cerr << "Python 解释器未初始化!" << std::endl; return; } - //PyGILState_STATE gstate = PyGILState_Ensure(); + PyGILState_STATE gstate = PyGILState_Ensure(); + namespace py = boost::python; try { // 2. 输入参数校验 std::cout << "[2] 检查输入参数..." << std::endl; @@ -93,29 +93,31 @@ void RTCContext::onAudioProcess(const char* roomId, const char* peerId, std::cout << "step2" << std::endl; np::dtype dtype = np::dtype::get_builtin(); std::cout << "step3" << std::endl; - np::ndarray audioArray = np::from_data( - audioFrame.data, // 数据指针 - dtype, // 数据类型 (int16) - py::make_tuple(shape[0]), // 形状 (1D) - py::make_tuple(sizeof(int16_t)), // 步长 - py::object() // 所有者(Python管理) - ); - std::cout << " 数据拷贝完成" << std::endl; // 7. 执行回调 if (!pyCallback_.is_none()) { std::cout << "[7] 准备执行Python回调..." << std::endl; // 增加引用计数防止提前释放 - Py_INCREF(pyCallback_.ptr()); + //Py_INCREF(pyCallback_.ptr()); try { + /* std::cout << " pyCallback_ type: " << Py_TYPE(pyCallback_.ptr())->tp_name << std::endl; PyObject* repr = PyObject_Repr(pyCallback_.ptr()); if (repr) { std::cout << " pyCallback_ repr: " << PyUnicode_AsUTF8(repr) << std::endl; Py_DECREF(repr); // 必须手动释放 } + */ //PyGILState_STATE gstate = PyGILState_Ensure(); + np::ndarray audioArray = np::from_data( + audioFrame.data, // 数据指针 + dtype, // 数据类型 (int16) + py::make_tuple(shape[0]), // 形状 (1D) + py::make_tuple(sizeof(int16_t)), // 步长 + py::object() // 所有者(Python管理) + ); + std::cout << " 数据拷贝完成" << std::endl; pyCallback_( audioArray, // numpy 数组 data_size, // 数据大小 @@ -124,7 +126,7 @@ void RTCContext::onAudioProcess(const char* roomId, const char* peerId, audioFrame.numChannels, audioFrame.channelIndex ); - //PyGILState_Release(gstate); + PyGILState_Release(gstate); std::cout << " after callback" << std::endl; if (PyErr_Occurred()) { PyObject *type, *value, *traceback; @@ -155,14 +157,14 @@ void RTCContext::onAudioProcess(const char* roomId, const char* peerId, << PyUnicode_AsUTF8(PyObject_Str(value)) << std::endl; PyErr_Restore(type, value, traceback); } - Py_DECREF(pyCallback_.ptr()); + //Py_DECREF(pyCallback_.ptr()); } catch (...) { std::cout << "[ERROR] 回调执行失败" << std::endl; - Py_DECREF(pyCallback_.ptr()); + //Py_DECREF(pyCallback_.ptr()); throw; } - Py_DECREF(pyCallback_.ptr()); + //Py_DECREF(pyCallback_.ptr()); } else { std::cout << "[7] 无回调函数设置" << std::endl; } @@ -171,12 +173,12 @@ void RTCContext::onAudioProcess(const char* roomId, const char* peerId, std::cout << "[8] 释放共享内存资源..." << std::endl; std::cout << "[9] 释放GIL..." << std::endl; - //PyGILState_Release(gstate); + PyGILState_Release(gstate); std::cout << "=== 音频处理完成 ===" << std::endl; } catch (const std::exception& e) { std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl; - //PyGILState_Release(gstate); + PyGILState_Release(gstate); std::cerr << "Audio process error: " << e.what() << std::endl; } } @@ -252,7 +254,7 @@ bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const i return true; } -bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex) +bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, uint8_t channelNum) { while (!isOnRoom_) { @@ -273,6 +275,7 @@ bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const i } mrtc::MRTCAudioOption option; + option.channel = channelNum; if (std::string(srcRoomId) != std::string(destRoomId)) { strcpy(option.dstRoomId, destRoomId); } @@ -292,7 +295,8 @@ void RTCContext::destorySend(const int16_t selfChannelIndex) { rtcEngine_->stopCustomAudio(selfChannelIndex); } -int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels, uint64_t dataLength) +int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels, + uint64_t dataLength) { std::lock_guard lock(mutex_); if (pData_) @@ -314,7 +318,8 @@ int16_t RTCContext::sendCustomAudioData(const int16_t channelIndex, void* custom return -1; } std::cout << "customData addr is:" << customData << std::endl; - return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate, channelNum, dataLen); + return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate, + channelNum, dataLen); } mrtc::IMRTCEngine* RTCContext::getRtcEngine() const { diff --git a/util/RTCContext.h b/util/RTCContext.h index 6806daf..b80ded5 100644 --- a/util/RTCContext.h +++ b/util/RTCContext.h @@ -84,7 +84,7 @@ public: mrtc::IMRTCEngine* getRtcEngine() const; bool init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId); bool initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex); - bool initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex); + bool initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, uint8_t channelNum); void* getpData() const; void setpData(void* pData);