9***@qq.com
9***@qq.com
  • 发布:2023-07-21 15:41
  • 更新:2025-01-11 15:00
  • 阅读:955

uniapp 原生插件如何使用广播的方式实现传值

分类:uni-app
var globalEvent = uni.requireNativePlugin('globalEvent');  
            globalEvent.addEventListener('baScanViewEvent', function(e) {  
                console.log('baScanViewEvent' + JSON.stringify(e));  
            });  

这种注册事件的方式,插件可以通过这个方式传值到前端

有没有大佬知道怎么实现啊,,,,头皮都抠破啦

2023-07-21 15:41 负责人:无 分享
已邀请:
9***@qq.com

9***@qq.com (作者)

被折磨了一波,总算解决了
js代码

mounted() {  
            let that = this  

            plus.globalEvent.addEventListener('callBackText', function(e) {  
                // uni.showModal({  
                //  title: 'fcm notify',  
                //  content: "fcmNotify收到:" + JSON.stringify(e)  
                // });  
                if(e.code == 0){  
                    let text = "";  
                    text += "-----" + that.$DataUtil.formatNow("Y-M-D h:m:s") + "-----" + "\n";  
                    text += JSON.stringify(e.data);  
                    that.zhi = text;  
                }  
            });  

        },

调用代码

openFun() {  
                let that = this  
                plugin.openAudio(function(res) {  
                    console.log("initDevice")  
                    console.log(res)  
                    uni.showModal({  
                        title: JSON.stringify(res)  
                    })  
                    // if (res.code == 0) {  
                    //  that.zhi += "\n" + "-----" + that.$DataUtil.formatNow("Y-M-D h:m:s") + "-----" + "\n";  
                    //  that.zhi += res.data;  
                    // }  
                });  
            },

android Kotlin 代码

@UniJSMethod(uiThread = true)
fun openAudio(jsCallback: UniJSCallback?) {
val result = JSONObject()
if (!isRecording) {
val ret = initMicrophone()
if (!ret) {
UniLogUtils.e(TAG, "Failed to initialize microphone")
if (jsCallback != null) {
result.put("code", -1)
result.put("data", null)
result.put("msg", "Failed to initialize microphone")
jsCallback.invoke(result)
}
return
}
UniLogUtils.i(TAG, "state: ${audioRecord?.state}")
audioRecord!!.startRecording()
isRecording = true
model.reset()
lastText = ""
idx = 0

        recordingThread = thread(true) {  
            processSamples()  
        }  

// mUniSDKInstance.runOnUiThread {
// processSamples()
// }
UniLogUtils.i(TAG, "Started recording")
if (jsCallback != null) {
result.put("code", 0)
result.put("data", null)
result.put("msg", "开启语音监听成功")
jsCallback.invoke(result)
}
} else {
isRecording = false
audioRecord!!.stop()
audioRecord!!.release()
audioRecord = null
UniLogUtils.i(TAG, "Stopped recording")

        if (jsCallback != null) {  
            result.put("code", 0)  
            result.put("data", null)  
            result.put("msg", "停止语音监听成功")  
            jsCallback.invoke(result)  
        }  

    }  
}  

实现循环传参
private fun processSamples() {
UniLogUtils.i(TAG, "processing samples")
val resultData = JSONObject()

    val interval = 0.1 // i.e., 100 ms  
    val bufferSize = (interval * sampleRateInHz).toInt() // in samples  
    val buffer = ShortArray(bufferSize)  

    while (isRecording) {  
        val ret = audioRecord?.read(buffer, 0, buffer.size)  
        if (ret != null && ret > 0) {  
            val samples = FloatArray(ret) { buffer[it] / 32768.0f }  
            model.acceptSamples(samples)  
            while (model.isReady()) {  
                model.decode()  
            }  

                UniLogUtils.i(TAG, "Handler is running: ${handler.looper.isCurrentThread}")  

                    val isEndpoint = model.isEndpoint()  
                    val text = model.text  
                    if (text.isNotBlank()) {  
                        if (lastText.isBlank()) {  
                            resultData.put("text", "${idx}: ${text}");  
                            resultData.put("isEndpoint", isEndpoint);  
                        } else {  
                            resultData.put("text", "${lastText}\n${idx}: ${text}");  
                            resultData.put("isEndpoint", isEndpoint);  
                        }  
                    }  

                    if (isEndpoint) {  
                        model.reset()  
//                        if (text.isNotBlank()) {  
//                            lastText = "${lastText}\n${idx}: ${text}"  
//                            idx += 1  
//                        }  
                    }  
                    sendEventToJS("callBackText",0,resultData,"获取语音文字成功");  
        }  
    }  
}  

这样就能把android识别到的语音转成文字实时发送到uniapp去使用了

希望下一个兄弟不会被折磨把,恶心人,

我特么头皮抠烂

shanghai

shanghai

学习了

要回复问题请先登录注册