Update model/openlamm.py
Browse files- model/openlamm.py +2 -2
model/openlamm.py
CHANGED
|
@@ -11,7 +11,7 @@ from torch.nn.utils import rnn
|
|
| 11 |
from types import SimpleNamespace
|
| 12 |
from peft import LoraConfig, TaskType, get_peft_model
|
| 13 |
from transformers import LlamaTokenizer, LlamaForCausalLM, LlamaConfig
|
| 14 |
-
|
| 15 |
import numpy as np
|
| 16 |
# from header import *
|
| 17 |
|
|
@@ -19,9 +19,9 @@ from transformers import StoppingCriteria, StoppingCriteriaList
|
|
| 19 |
|
| 20 |
from .CLIP import load as load_clip
|
| 21 |
from .PROCESS import data
|
| 22 |
-
|
| 23 |
from .modeling_llama import LlamaForCausalLM
|
| 24 |
from .utils.pcl_utils import MEAN_COLOR_RGB, RandomCuboid, random_sampling
|
|
|
|
| 25 |
|
| 26 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 27 |
|
|
|
|
| 11 |
from types import SimpleNamespace
|
| 12 |
from peft import LoraConfig, TaskType, get_peft_model
|
| 13 |
from transformers import LlamaTokenizer, LlamaForCausalLM, LlamaConfig
|
| 14 |
+
|
| 15 |
import numpy as np
|
| 16 |
# from header import *
|
| 17 |
|
|
|
|
| 19 |
|
| 20 |
from .CLIP import load as load_clip
|
| 21 |
from .PROCESS import data
|
|
|
|
| 22 |
from .modeling_llama import LlamaForCausalLM
|
| 23 |
from .utils.pcl_utils import MEAN_COLOR_RGB, RandomCuboid, random_sampling
|
| 24 |
+
import .conversations
|
| 25 |
|
| 26 |
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 27 |
|